diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 8ceecb3abb4a2..1aefeee710f47 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -24,4 +24,4 @@ /.github/ @peternied -/MAINTAINERS.md @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/MAINTAINERS.md @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gaobinlong @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah diff --git a/.github/benchmark-configs.json b/.github/benchmark-configs.json new file mode 100644 index 0000000000000..a5b1951d2240c --- /dev/null +++ b/.github/benchmark-configs.json @@ -0,0 +1,155 @@ +{ + "name": "Cluster and opensearch-benchmark configurations", + "id_1": { + "description": "Indexing only configuration for NYC_TAXIS workload", + "supported_major_versions": ["2", "3"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "true", + "MIN_DISTRIBUTION": "true", + "TEST_WORKLOAD": "nyc_taxis", + "WORKLOAD_PARAMS": "{\"number_of_replicas\":\"0\",\"number_of_shards\":\"1\"}", + "EXCLUDE_TASKS": "type:search", + "CAPTURE_NODE_STAT": "true" + }, + "cluster_configuration": { + "size": "Single-Node", + "data_instance_config": "4vCPU, 32G Mem, 16G Heap" + } + }, + "id_2": { + "description": "Indexing only configuration for HTTP_LOGS workload", + "supported_major_versions": ["2", "3"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "true", + "MIN_DISTRIBUTION": "true", + "TEST_WORKLOAD": "http_logs", + "WORKLOAD_PARAMS": "{\"number_of_replicas\":\"0\",\"number_of_shards\":\"1\"}", + "EXCLUDE_TASKS": "type:search", + "CAPTURE_NODE_STAT": "true" + }, + "cluster_configuration": { + "size": "Single-Node", + "data_instance_config": "4vCPU, 32G Mem, 16G Heap" + } + }, + "id_3": { + "description": "Search only test-procedure for NYC_TAXIS, uses snapshot to restore the data for OS-3.0.0", + "supported_major_versions": ["3"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "true", + "MIN_DISTRIBUTION": "true", + "TEST_WORKLOAD": "nyc_taxis", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"nyc_taxis_1_shard\"}", + "CAPTURE_NODE_STAT": "true" + }, + "cluster_configuration": { + "size": "Single-Node", + "data_instance_config": "4vCPU, 32G Mem, 16G Heap" + } + }, + "id_4": { + "description": "Search only test-procedure for HTTP_LOGS, uses snapshot to restore the data for OS-3.0.0", + "supported_major_versions": ["3"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "true", + "MIN_DISTRIBUTION": "true", + "TEST_WORKLOAD": "http_logs", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"http_logs_1_shard\"}", + "CAPTURE_NODE_STAT": "true" + }, + "cluster_configuration": { + "size": "Single-Node", + "data_instance_config": "4vCPU, 32G Mem, 16G Heap" + } + }, + "id_5": { + "description": "Search only test-procedure for HTTP_LOGS, uses snapshot to restore the data for OS-3.0.0", + "supported_major_versions": ["3"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "true", + "MIN_DISTRIBUTION": "true", + "TEST_WORKLOAD": "big5", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"big5_1_shard\"}", + "CAPTURE_NODE_STAT": "true" + }, + "cluster_configuration": { + "size": "Single-Node", + "data_instance_config": "4vCPU, 32G Mem, 16G Heap" + } + }, + "id_6": { + "description": "Search only test-procedure for NYC_TAXIS, uses snapshot to restore the data for OS-2.x", + "supported_major_versions": ["2"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "true", + "MIN_DISTRIBUTION": "true", + "TEST_WORKLOAD": "nyc_taxis", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots\",\"snapshot_name\":\"nyc_taxis_1_shard\"}", + "CAPTURE_NODE_STAT": "true" + }, + "cluster_configuration": { + "size": "Single-Node", + "data_instance_config": "4vCPU, 32G Mem, 16G Heap" + } + }, + "id_7": { + "description": "Search only test-procedure for HTTP_LOGS, uses snapshot to restore the data for OS-2.x", + "supported_major_versions": ["2"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "true", + "MIN_DISTRIBUTION": "true", + "TEST_WORKLOAD": "http_logs", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots\",\"snapshot_name\":\"http_logs_1_shard\"}", + "CAPTURE_NODE_STAT": "true" + }, + "cluster_configuration": { + "size": "Single-Node", + "data_instance_config": "4vCPU, 32G Mem, 16G Heap" + } + }, + "id_8": { + "description": "Search only test-procedure for HTTP_LOGS, uses snapshot to restore the data for OS-2.x", + "supported_major_versions": ["2"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "true", + "MIN_DISTRIBUTION": "true", + "TEST_WORKLOAD": "big5", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots\",\"snapshot_name\":\"big5_1_shard\"}", + "CAPTURE_NODE_STAT": "true" + }, + "cluster_configuration": { + "size": "Single-Node", + "data_instance_config": "4vCPU, 32G Mem, 16G Heap" + } + }, + "id_9": { + "description": "Indexing and search configuration for pmc workload", + "supported_major_versions": ["2", "3"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "true", + "MIN_DISTRIBUTION": "true", + "TEST_WORKLOAD": "pmc", + "WORKLOAD_PARAMS": "{\"number_of_replicas\":\"0\",\"number_of_shards\":\"1\"}", + "CAPTURE_NODE_STAT": "true" + }, + "cluster_configuration": { + "size": "Single-Node", + "data_instance_config": "4vCPU, 32G Mem, 16G Heap" + } + }, + "id_10": { + "description": "Indexing only configuration for stack-overflow workload", + "supported_major_versions": ["2", "3"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "true", + "MIN_DISTRIBUTION": "true", + "TEST_WORKLOAD": "so", + "WORKLOAD_PARAMS": "{\"number_of_replicas\":\"0\",\"number_of_shards\":\"1\"}", + "CAPTURE_NODE_STAT": "true" + }, + "cluster_configuration": { + "size": "Single-Node", + "data_instance_config": "4vCPU, 32G Mem, 16G Heap" + } + } +} diff --git a/.github/workflows/add-performance-comment.yml b/.github/workflows/add-performance-comment.yml new file mode 100644 index 0000000000000..b522d348c84b2 --- /dev/null +++ b/.github/workflows/add-performance-comment.yml @@ -0,0 +1,25 @@ +name: Performance Label Action + +on: + pull_request_target: + types: [labeled] + +jobs: + add-comment: + if: github.event.label.name == 'Performance' + runs-on: ubuntu-latest + permissions: + pull-requests: write + + steps: + - name: Add comment to PR + uses: actions/github-script@v6 + with: + github-token: ${{secrets.GITHUB_TOKEN}} + script: | + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: "Hello!\nWe have added a performance benchmark workflow that runs by adding a comment on the PR.\n Please refer https://github.com/opensearch-project/OpenSearch/blob/main/PERFORMANCE_BENCHMARKS.md on how to run benchmarks on pull requests." + }) diff --git a/.github/workflows/benchmark-pull-request.yml b/.github/workflows/benchmark-pull-request.yml new file mode 100644 index 0000000000000..1aa2b6271719b --- /dev/null +++ b/.github/workflows/benchmark-pull-request.yml @@ -0,0 +1,163 @@ +name: Run performance benchmark on pull request +on: + issue_comment: + types: [created] +jobs: + run-performance-benchmark-on-pull-request: + if: ${{ (github.event.issue.pull_request) && (contains(github.event.comment.body, '"run-benchmark-test"')) }} + runs-on: ubuntu-latest + permissions: + id-token: write + contents: read + issues: write + pull-requests: write + steps: + - name: Checkout Repository + uses: actions/checkout@v3 + - name: Set up required env vars + run: | + echo "PR_NUMBER=${{ github.event.issue.number }}" >> $GITHUB_ENV + echo "REPOSITORY=${{ github.event.repository.full_name }}" >> $GITHUB_ENV + OPENSEARCH_VERSION=$(awk -F '=' '/^opensearch[[:space:]]*=/ {gsub(/[[:space:]]/, "", $2); print $2}' buildSrc/version.properties) + echo "OPENSEARCH_VERSION=$OPENSEARCH_VERSION" >> $GITHUB_ENV + major_version=$(echo $OPENSEARCH_VERSION | cut -d'.' -f1) + echo "OPENSEARCH_MAJOR_VERSION=$major_version" >> $GITHUB_ENV + echo "USER_TAGS=pull_request_number:${{ github.event.issue.number }},repository:OpenSearch" >> $GITHUB_ENV + - name: Check comment format + id: check_comment + uses: actions/github-script@v6 + with: + script: | + const fs = require('fs'); + const comment = context.payload.comment.body; + let commentJson; + try { + commentJson = JSON.parse(comment); + } catch (error) { + core.setOutput('invalid', 'true'); + return; + } + if (!commentJson.hasOwnProperty('run-benchmark-test')) { + core.setOutput('invalid', 'true'); + return; + } + const configId = commentJson['run-benchmark-test']; + let benchmarkConfigs; + try { + benchmarkConfigs = JSON.parse(fs.readFileSync('.github/benchmark-configs.json', 'utf8')); + } catch (error) { + core.setFailed('Failed to read benchmark-configs.json'); + return; + } + const openSearchMajorVersion = process.env.OPENSEARCH_MAJOR_VERSION; + console.log('MAJOR_VERSION', openSearchMajorVersion) + if (!benchmarkConfigs.hasOwnProperty(configId) || + !benchmarkConfigs[configId].supported_major_versions.includes(openSearchMajorVersion)) { + core.setOutput('invalid', 'true'); + return; + } + const clusterBenchmarkConfigs = benchmarkConfigs[configId]['cluster-benchmark-configs']; + for (const [key, value] of Object.entries(clusterBenchmarkConfigs)) { + core.exportVariable(key, value); + } + - name: Post invalid format comment + if: steps.check_comment.outputs.invalid == 'true' + uses: actions/github-script@v6 + with: + github-token: ${{secrets.GITHUB_TOKEN}} + script: | + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: 'Invalid comment format or config id. Please refer to https://github.com/opensearch-project/OpenSearch/blob/main/PERFORMANCE_BENCHMARKS.md on how to run benchmarks on pull requests.' + }) + - name: Fail workflow for invalid comment + if: steps.check_comment.outputs.invalid == 'true' + run: | + echo "Invalid comment format detected. Failing the workflow." + exit 1 + - id: get_approvers + run: | + echo "approvers=$(cat .github/CODEOWNERS | grep '^\*' | tr -d '* ' | sed 's/@/,/g' | sed 's/,//1')" >> $GITHUB_OUTPUT + - uses: trstringer/manual-approval@v1 + if: (!contains(steps.get_approvers.outputs.approvers, github.event.comment.user.login)) + with: + secret: ${{ github.TOKEN }} + approvers: ${{ steps.get_approvers.outputs.approvers }} + minimum-approvals: 1 + issue-title: 'Request to approve/deny benchmark run for PR #${{ env.PR_NUMBER }}' + issue-body: "Please approve or deny the benchmark run for PR #${{ env.PR_NUMBER }}" + exclude-workflow-initiator-as-approver: false + - name: Get PR Details + id: get_pr + uses: actions/github-script@v7 + with: + script: | + const issue = context.payload.issue; + const prNumber = issue.number; + console.log(`Pull Request Number: ${prNumber}`); + + const { data: pull_request } = await github.rest.pulls.get({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: prNumber, + }); + + return { + "headRepoFullName": pull_request.head.repo.full_name, + "headRef": pull_request.head.ref + }; + - name: Set pr details env vars + run: | + echo '${{ steps.get_pr.outputs.result }}' | jq -r '.headRepoFullName' + echo '${{ steps.get_pr.outputs.result }}' | jq -r '.headRef' + headRepo=$(echo '${{ steps.get_pr.outputs.result }}' | jq -r '.headRepoFullName') + headRef=$(echo '${{ steps.get_pr.outputs.result }}' | jq -r '.headRef') + echo "prHeadRepo=$headRepo" >> $GITHUB_ENV + echo "prheadRef=$headRef" >> $GITHUB_ENV + - name: Checkout PR Repo + uses: actions/checkout@v2 + with: + repository: ${{ env.prHeadRepo }} + ref: ${{ env.prHeadRef }} + token: ${{ secrets.GITHUB_TOKEN }} + - name: Setup Java + uses: actions/setup-java@v1 + with: + java-version: 21 + - name: Build and Assemble OpenSearch from PR + run: | + ./gradlew :distribution:archives:linux-tar:assemble -Dbuild.snapshot=false + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.UPLOAD_ARCHIVE_ARTIFACT_ROLE }} + role-session-name: publish-to-s3 + aws-region: us-west-2 + - name: Push to S3 + run: | + aws s3 cp distribution/archives/linux-tar/build/distributions/opensearch-min-$OPENSEARCH_VERSION-linux-x64.tar.gz s3://${{ secrets.ARCHIVE_ARTIFACT_BUCKET_NAME }}/PR-$PR_NUMBER/ + echo "DISTRIBUTION_URL=${{ secrets.ARTIFACT_BUCKET_CLOUDFRONT_URL }}/PR-$PR_NUMBER/opensearch-min-$OPENSEARCH_VERSION-linux-x64.tar.gz" >> $GITHUB_ENV + - name: Checkout opensearch-build repo + uses: actions/checkout@v4 + with: + repository: opensearch-project/opensearch-build + ref: main + path: opensearch-build + - name: Trigger jenkins workflow to run gradle check + run: | + cat $GITHUB_ENV + bash opensearch-build/scripts/benchmark/benchmark-pull-request.sh ${{ secrets.JENKINS_PR_BENCHMARK_GENERIC_WEBHOOK_TOKEN }} + - name: Update PR with Job Url + uses: actions/github-script@v6 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const workflowUrl = process.env.WORKFLOW_URL; + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: `The Jenkins job url is ${workflowUrl} . Final results will be published once the job is completed.` + }) diff --git a/CHANGELOG.md b/CHANGELOG.md index 91020dc6a05c9..f53c96e4ea224 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Print reason why parent task was cancelled ([#14604](https://github.com/opensearch-project/OpenSearch/issues/14604)) - Add matchesPluginSystemIndexPattern to SystemIndexRegistry ([#14750](https://github.com/opensearch-project/OpenSearch/pull/14750)) - Add Plugin interface for loading application based configuration templates (([#14659](https://github.com/opensearch-project/OpenSearch/issues/14659))) +- Refactor remote-routing-table service inline with remote state interfaces([#14668](https://github.com/opensearch-project/OpenSearch/pull/14668)) +- Add prefix mode verification setting for repository verification (([#14790](https://github.com/opensearch-project/OpenSearch/pull/14790))) - [Range Queries] Add new approximateable query framework to short-circuit range queries ([#13788](https://github.com/opensearch-project/OpenSearch/pull/13788)) ### Dependencies @@ -74,6 +76,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Refactoring FilterPath.parse by using an iterative approach ([#14200](https://github.com/opensearch-project/OpenSearch/pull/14200)) - Refactoring Grok.validatePatternBank by using an iterative approach ([#14206](https://github.com/opensearch-project/OpenSearch/pull/14206)) - Update help output for _cat ([#14722](https://github.com/opensearch-project/OpenSearch/pull/14722)) +- Fix bulk upsert ignores the default_pipeline and final_pipeline when auto-created index matches the index template ([#12891](https://github.com/opensearch-project/OpenSearch/pull/12891)) +- Fix NPE in ReplicaShardAllocator ([#14385](https://github.com/opensearch-project/OpenSearch/pull/14385)) +- Fix constant_keyword field type used when creating index ([#14807](https://github.com/opensearch-project/OpenSearch/pull/14807)) ### Security diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 3298ceb15463c..f77c69ddeff2a 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -14,6 +14,7 @@ This document contains a list of maintainers in this repo. See [opensearch-proje | Charlotte Henkle | [CEHENKLE](https://github.com/CEHENKLE) | Amazon | | Dan Widdis | [dbwiddis](https://github.com/dbwiddis) | Amazon | | Daniel "dB." Doubrovkine | [dblock](https://github.com/dblock) | Amazon | +| Gao Binlong | [gaobinlong](https://github.com/gaobinlong) | Amazon | | Gaurav Bafna | [gbbafna](https://github.com/gbbafna) | Amazon | | Jay Deng | [jed326](https://github.com/jed326) | Amazon | | Kunal Kotwani | [kotwanikunal](https://github.com/kotwanikunal) | Amazon | diff --git a/PERFORMANCE_BENCHMARKS.md b/PERFORMANCE_BENCHMARKS.md new file mode 100644 index 0000000000000..252c4ae312136 --- /dev/null +++ b/PERFORMANCE_BENCHMARKS.md @@ -0,0 +1,112 @@ +# README: Running Performance Benchmarks on Pull Requests + +## Overview + +`benchmark-pull-request` GitHub Actions workflow is designed to automatically run performance benchmarks on a pull request when a specific comment is made on the pull request. This ensures that performance benchmarks are consistently and accurately applied to code changes, helping maintain the performance standards of the repository. + +## Workflow Trigger + +The workflow is triggered when a new comment is created on a pull request. Specifically, it checks for the presence of the `"run-benchmark-test"` keyword in the comment body. If this keyword is detected, the workflow proceeds to run the performance benchmarks. + +## Key Steps in the Workflow + +1. **Check Comment Format and Configuration:** + - Validates the format of the comment to ensure it contains the required `"run-benchmark-test"` keyword and is in json format. + - Extracts the benchmark configuration ID from the comment and verifies if it exists in the `benchmark-config.json` file. + - Checks if the extracted configuration ID is supported for the current OpenSearch major version. + +2. **Post Invalid Format Comment:** + - If the comment format is invalid or the configuration ID is not supported, a comment is posted on the pull request indicating the problem, and the workflow fails. + +3. **Manual Approval (if necessary):** + - Fetches the list of approvers from the `.github/CODEOWNERS` file. + - If the commenter is not one of the maintainers, a manual approval request is created. The workflow pauses until an approver approves or denies the benchmark run by commenting appropriate word on the issue. + - The issue for approval request is auto-closed once the approver is done adding appropriate comment + +4. **Build and Assemble OpenSearch:** + - Builds and assembles (x64-linux tar) the OpenSearch distribution from the pull request code changes. + +5. **Upload to S3:** + - Configures AWS credentials and uploads the assembled OpenSearch distribution to an S3 bucket for further use in benchmarking. + - The S3 bucket is fronted by cloudfront to only allow downloads. + - The lifecycle policy on the S3 bucket will delete the uploaded artifacts after 30-days. + +6. **Trigger Jenkins Workflow:** + - Triggers a Jenkins workflow to run the benchmark tests using a webhook token. + +7. **Update Pull Request with Job URL:** + - Posts a comment on the pull request with the URL of the Jenkins job. The final benchmark results will be posted once the job completes. + - To learn about how benchmark job works see https://github.com/opensearch-project/opensearch-build/tree/main/src/test_workflow#benchmarking-tests + +## How to Use This Workflow + +1. **Ensure `benchmark-config.json` is Up-to-Date:** + - The `benchmark-config.json` file should contain valid benchmark configurations with supported major versions and cluster-benchmark configurations. + +2. **Add the Workflow to Your Repository:** + - Save the workflow YAML file (typically named `benchmark.yml`) in the `.github/workflows` directory of your repository. + +3. **Make a Comment to Trigger the Workflow:** + - On any pull request issue, make a comment containing the keyword `"run-benchmark-test"` along with the configuration ID. For example: + ```json + {"run-benchmark-test": "id_1"} + ``` + +4. **Monitor Workflow Progress:** + - The workflow will validate the comment, check for approval (if necessary), build the OpenSearch distribution, and trigger the Jenkins job. + - A comment will be posted on the pull request with the URL of the Jenkins job. You can monitor the progress and final results there as well. + +## Example Comment Format + +To run the benchmark with configuration ID `id_1`, post the following comment on the pull request issue: +```json +{"run-benchmark-test": "id_1"} +``` + +## How to add a new benchmark configuration + +The benchmark-config.json file accepts the following schema. +```json +{ + "id_": { + "description": "Short description of the configuration", + "supported_major_versions": ["2", "3"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "Use single node cluster for benchmarking, accepted values are \"true\" or \"false\"", + "MIN_DISTRIBUTION": "Use OpenSearch min distribution, should always be \"true\"", + "MANAGER_NODE_COUNT": "For multi-node cluster tests, number of cluster manager nodes, empty value defaults to 3.", + "DATA_NODE_COUNT": "For multi-node cluster tests, number of data nodes, empty value defaults to 2.", + "DATA_INSTANCE_TYPE": "EC2 instance type for data node, empty defaults to r5.xlarge.", + "DATA_NODE_STORAGE": "Data node ebs block storage size, empty value defaults to 100Gb", + "JVM_SYS_PROPS": "A comma-separated list of key=value pairs that will be added to jvm.options as JVM system properties", + "ADDITIONAL_CONFIG": "Additional space delimited opensearch.yml config parameters. e.g., `search.concurrent_segment_search.enabled:true`", + "TEST_WORKLOAD": "The workload name from OpenSearch Benchmark Workloads. https://github.com/opensearch-project/opensearch-benchmark-workloads. Default is nyc_taxis", + "WORKLOAD_PARAMS": "With this parameter you can inject variables into workloads, e.g.{\"number_of_replicas\":\"0\",\"number_of_shards\":\"3\"}. See https://opensearch.org/docs/latest/benchmark/reference/commands/command-flags/#workload-params", + "EXCLUDE_TASKS": "Defines a comma-separated list of test procedure tasks not to run. e.g. type:search, see https://opensearch.org/docs/latest/benchmark/reference/commands/command-flags/#exclude-tasks", + "INCLUDE_TASKS": "Defines a comma-separated list of test procedure tasks to run. By default, all tasks listed in a test procedure array are run. See https://opensearch.org/docs/latest/benchmark/reference/commands/command-flags/#include-tasks", + "TEST_PROCEDURE": "Defines a test procedure to use. e.g., `append-no-conflicts,significant-text`. Uses default if none provided. See https://opensearch.org/docs/latest/benchmark/reference/commands/command-flags/#test-procedure", + "CAPTURE_NODE_STAT": "Enable opensearch-benchmark node-stats telemetry to capture system level metrics like cpu, jvm etc., see https://opensearch.org/docs/latest/benchmark/reference/telemetry/#node-stats" + }, + "cluster_configuration": { + "size": "Single-Node/Multi-Node", + "data_instance_config": "data-instance-config, e.g., 4vCPU, 32G Mem, 16G Heap" + } + } +} +``` +To add a new test configuration that are suitable to your changes please create a new PR to add desired cluster and benchmark configurations. + +## How to compare my results against baseline? + +Apart from just running benchmarks the user will also be interested in how their change is performing against current OpenSearch distribution with the exactly same cluster and benchmark configurations. +The user can refer to https://s12d.com/basline-dashboards (WIP) to access baseline data for their workload, this data is generated by our nightly benchmark runs on latest build distribution artifacts for 3.0 and 2.x. +In the future, we will add the [compare](https://opensearch.org/docs/latest/benchmark/reference/commands/compare/) feature of opensearch-benchmark to run comparison and publish data on the PR as well. + +## Notes + +- Ensure all required secrets (e.g., `GITHUB_TOKEN`, `UPLOAD_ARCHIVE_ARTIFACT_ROLE`, `ARCHIVE_ARTIFACT_BUCKET_NAME`, `JENKINS_PR_BENCHMARK_GENERIC_WEBHOOK_TOKEN`) are properly set in the repository secrets. +- The `CODEOWNERS` file should list the GitHub usernames of approvers for the benchmark process. + +By following these instructions, repository maintainers can ensure consistent and automated performance benchmarking for all code changes introduced via pull requests. + + diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml index edb7b77eb8d28..36b2b5351dcad 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml @@ -41,6 +41,10 @@ teardown: ingest.delete_pipeline: id: "pipeline2" ignore: 404 + - do: + indices.delete_index_template: + name: test_index_template_for_bulk + ignore: 404 --- "Test bulk request without default pipeline": @@ -168,6 +172,40 @@ teardown: id: test_id3 - match: { _source: {"f1": "v2", "f2": 47, "field1": "value1"}} +# related issue: https://github.com/opensearch-project/OpenSearch/issues/12888 +--- +"Test bulk upsert honors default_pipeline and final_pipeline when the auto-created index matches with the index template": + - skip: + version: " - 2.15.99" + reason: "fixed in 2.16.0" + - do: + indices.put_index_template: + name: test_for_bulk_upsert_index_template + body: + index_patterns: test_bulk_upsert_* + template: + settings: + number_of_shards: 1 + number_of_replicas: 0 + default_pipeline: pipeline1 + final_pipeline: pipeline2 + + - do: + bulk: + refresh: true + body: + - '{"update": {"_index": "test_bulk_upsert_index", "_id": "test_id3"}}' + - '{"upsert": {"f1": "v2", "f2": 47}, "doc": {"x": 1}}' + + - match: { errors: false } + - match: { items.0.update.result: created } + + - do: + get: + index: test_bulk_upsert_index + id: test_id3 + - match: { _source: {"f1": "v2", "f2": 47, "field1": "value1", "field2": "value2"}} + --- "Test bulk API with batch enabled happy case": - skip: diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java index b4c62fbf85cb8..4a95b04de3952 100644 --- a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java @@ -60,7 +60,6 @@ import java.util.function.ToLongBiFunction; import org.ehcache.Cache; -import org.ehcache.CachePersistenceException; import org.ehcache.PersistentCacheManager; import org.ehcache.config.builders.CacheConfigurationBuilder; import org.ehcache.config.builders.CacheEventListenerConfigurationBuilder; @@ -104,8 +103,6 @@ public class EhcacheDiskCache implements ICache { // Unique id associated with this cache. private final static String UNIQUE_ID = UUID.randomUUID().toString(); private final static String THREAD_POOL_ALIAS_PREFIX = "ehcachePool"; - private final static int MINIMUM_MAX_SIZE_IN_BYTES = 1024 * 100; // 100KB - // A Cache manager can create many caches. private final PersistentCacheManager cacheManager; @@ -127,13 +124,18 @@ public class EhcacheDiskCache implements ICache { private final Serializer keySerializer; private final Serializer valueSerializer; + final static int MINIMUM_MAX_SIZE_IN_BYTES = 1024 * 100; // 100KB + final static String CACHE_DATA_CLEANUP_DURING_INITIALIZATION_EXCEPTION = "Failed to delete ehcache disk cache under " + + "path: %s during initialization. Please clean this up manually and restart the process"; + /** * Used in computeIfAbsent to synchronize loading of a given key. This is needed as ehcache doesn't provide a * computeIfAbsent method. */ Map, CompletableFuture, V>>> completableFutureMap = new ConcurrentHashMap<>(); - private EhcacheDiskCache(Builder builder) { + @SuppressForbidden(reason = "Ehcache uses File.io") + EhcacheDiskCache(Builder builder) { this.keyType = Objects.requireNonNull(builder.keyType, "Key type shouldn't be null"); this.valueType = Objects.requireNonNull(builder.valueType, "Value type shouldn't be null"); this.expireAfterAccess = Objects.requireNonNull(builder.getExpireAfterAcess(), "ExpireAfterAccess value shouldn't " + "be null"); @@ -151,6 +153,18 @@ private EhcacheDiskCache(Builder builder) { if (this.storagePath == null || this.storagePath.isBlank()) { throw new IllegalArgumentException("Storage path shouldn't be null or empty"); } + // Delete all the previous disk cache related files/data. We don't persist data between process restart for + // now which is why need to do this. Clean up in case there was a non graceful restart and we had older disk + // cache data still lying around. + Path ehcacheDirectory = Paths.get(this.storagePath); + if (Files.exists(ehcacheDirectory)) { + try { + logger.info("Found older disk cache data lying around during initialization under path: {}", this.storagePath); + IOUtils.rm(ehcacheDirectory); + } catch (IOException e) { + throw new OpenSearchException(String.format(CACHE_DATA_CLEANUP_DURING_INITIALIZATION_EXCEPTION, this.storagePath), e); + } + } if (builder.threadPoolAlias == null || builder.threadPoolAlias.isBlank()) { this.threadPoolAlias = THREAD_POOL_ALIAS_PREFIX + "DiskWrite#" + UNIQUE_ID; } else { @@ -175,6 +189,11 @@ private EhcacheDiskCache(Builder builder) { } } + // Package private for testing + PersistentCacheManager getCacheManager() { + return this.cacheManager; + } + @SuppressWarnings({ "rawtypes" }) private Cache buildCache(Duration expireAfterAccess, Builder builder) { // Creating the cache requires permissions specified in plugin-security.policy @@ -255,7 +274,7 @@ Map, CompletableFuture, V>>> getCompletableFutur } @SuppressForbidden(reason = "Ehcache uses File.io") - private PersistentCacheManager buildCacheManager() { + PersistentCacheManager buildCacheManager() { // In case we use multiple ehCaches, we can define this cache manager at a global level. // Creating the cache manager also requires permissions specified in plugin-security.policy return AccessController.doPrivileged((PrivilegedAction) () -> { @@ -444,20 +463,21 @@ public void refresh() { @Override @SuppressForbidden(reason = "Ehcache uses File.io") public void close() { - cacheManager.removeCache(this.diskCacheAlias); - cacheManager.close(); try { - cacheManager.destroyCache(this.diskCacheAlias); - // Delete all the disk cache related files/data - Path ehcacheDirectory = Paths.get(this.storagePath); - if (Files.exists(ehcacheDirectory)) { + cacheManager.close(); + } catch (Exception e) { + logger.error(() -> new ParameterizedMessage("Exception occurred while trying to close ehcache manager"), e); + } + // Delete all the disk cache related files/data in case it is present + Path ehcacheDirectory = Paths.get(this.storagePath); + if (Files.exists(ehcacheDirectory)) { + try { IOUtils.rm(ehcacheDirectory); + } catch (IOException e) { + logger.error(() -> new ParameterizedMessage("Failed to delete ehcache disk cache data under path: {}", this.storagePath)); } - } catch (CachePersistenceException e) { - throw new OpenSearchException("Exception occurred while destroying ehcache and associated data", e); - } catch (IOException e) { - logger.error(() -> new ParameterizedMessage("Failed to delete ehcache disk cache data under path: {}", this.storagePath)); } + } /** diff --git a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java index 29551befd3e9f..2bc24227bb513 100644 --- a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java +++ b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java @@ -25,6 +25,7 @@ import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.bytes.CompositeBytesReference; @@ -34,6 +35,8 @@ import java.io.IOException; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; @@ -47,10 +50,17 @@ import java.util.concurrent.Phaser; import java.util.function.ToLongBiFunction; +import org.ehcache.PersistentCacheManager; + import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_LISTENER_MODE_SYNC_KEY; import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_MAX_SIZE_IN_BYTES_KEY; import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_STORAGE_PATH_KEY; +import static org.opensearch.cache.store.disk.EhcacheDiskCache.MINIMUM_MAX_SIZE_IN_BYTES; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; @ThreadLeakFilters(filters = { EhcacheThreadLeakFilter.class }) public class EhCacheDiskCacheTests extends OpenSearchSingleNodeTestCase { @@ -882,6 +892,289 @@ public void testStatsTrackingDisabled() throws Exception { } } + public void testDiskCacheFilesAreClearedUpDuringCloseAndInitialization() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener removalListener = new MockRemovalListener<>(); + ToLongBiFunction, String> weigher = getWeigher(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + String path = env.nodePaths()[0].path.toString() + "/request_cache"; + // Create a dummy file to simulate a scenario where the data is already in the disk cache storage path + // beforehand. + Files.createDirectory(Path.of(path)); + Path dummyFilePath = Files.createFile(Path.of(path + "/testing.txt")); + assertTrue(Files.exists(dummyFilePath)); + ICache ehcacheTest = new EhcacheDiskCache.Builder().setThreadPoolAlias("ehcacheTest") + .setStoragePath(path) + .setIsEventListenerModeSync(true) + .setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setDiskCacheAlias("test1") + .setValueSerializer(new StringSerializer()) + .setDimensionNames(List.of(dimensionName)) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setThreadPoolAlias("") + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .setWeigher(weigher) + .setStatsTrackingEnabled(false) + .build(); + int randomKeys = randomIntBetween(10, 100); + for (int i = 0; i < randomKeys; i++) { + ICacheKey iCacheKey = getICacheKey(UUID.randomUUID().toString()); + ehcacheTest.put(iCacheKey, UUID.randomUUID().toString()); + assertEquals(0, ehcacheTest.count()); // Expect count of 0 if NoopCacheStatsHolder is used + assertEquals(new ImmutableCacheStats(0, 0, 0, 0, 0), ehcacheTest.stats().getTotalStats()); + } + // Verify that older data was wiped out after initialization + assertFalse(Files.exists(dummyFilePath)); + + // Verify that there is data present under desired path by explicitly verifying the folder name by prefix + // (used from disk cache alias) + assertTrue(Files.exists(Path.of(path))); + boolean folderExists = Files.walk(Path.of(path)) + .filter(Files::isDirectory) + .anyMatch(path1 -> path1.getFileName().toString().startsWith("test1")); + assertTrue(folderExists); + ehcacheTest.close(); + assertFalse(Files.exists(Path.of(path))); // Verify everything is cleared up now after close() + } + } + + public void testDiskCacheCloseCalledTwiceAndVerifyDiskDataIsCleanedUp() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener removalListener = new MockRemovalListener<>(); + ToLongBiFunction, String> weigher = getWeigher(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + String path = env.nodePaths()[0].path.toString() + "/request_cache"; + ICache ehcacheTest = new EhcacheDiskCache.Builder().setThreadPoolAlias(null) + .setStoragePath(path) + .setIsEventListenerModeSync(true) + .setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setDiskCacheAlias("test1") + .setValueSerializer(new StringSerializer()) + .setDimensionNames(List.of(dimensionName)) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .setWeigher(weigher) + .setStatsTrackingEnabled(false) + .build(); + int randomKeys = randomIntBetween(10, 100); + for (int i = 0; i < randomKeys; i++) { + ICacheKey iCacheKey = getICacheKey(UUID.randomUUID().toString()); + ehcacheTest.put(iCacheKey, UUID.randomUUID().toString()); + assertEquals(0, ehcacheTest.count()); // Expect count storagePath 0 if NoopCacheStatsHolder is used + assertEquals(new ImmutableCacheStats(0, 0, 0, 0, 0), ehcacheTest.stats().getTotalStats()); + } + ehcacheTest.close(); + assertFalse(Files.exists(Path.of(path))); // Verify everything is cleared up now after close() + // Call it again. This will throw an exception. + ehcacheTest.close(); + } + } + + public void testDiskCacheCloseAfterCleaningUpFilesManually() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener removalListener = new MockRemovalListener<>(); + ToLongBiFunction, String> weigher = getWeigher(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + String path = env.nodePaths()[0].path.toString() + "/request_cache"; + ICache ehcacheTest = new EhcacheDiskCache.Builder().setThreadPoolAlias(null) + .setStoragePath(path) + .setIsEventListenerModeSync(true) + .setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setDiskCacheAlias("test1") + .setValueSerializer(new StringSerializer()) + .setDimensionNames(List.of(dimensionName)) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .setWeigher(weigher) + .setStatsTrackingEnabled(false) + .build(); + int randomKeys = randomIntBetween(10, 100); + for (int i = 0; i < randomKeys; i++) { + ICacheKey iCacheKey = getICacheKey(UUID.randomUUID().toString()); + ehcacheTest.put(iCacheKey, UUID.randomUUID().toString()); + assertEquals(0, ehcacheTest.count()); // Expect count storagePath 0 if NoopCacheStatsHolder is used + assertEquals(new ImmutableCacheStats(0, 0, 0, 0, 0), ehcacheTest.stats().getTotalStats()); + } + IOUtils.rm(Path.of(path)); + ehcacheTest.close(); + } + } + + public void testEhcacheDiskCacheWithoutStoragePathDefined() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener removalListener = new MockRemovalListener<>(); + ToLongBiFunction, String> weigher = getWeigher(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + assertThrows( + IllegalArgumentException.class, + () -> new EhcacheDiskCache.Builder().setThreadPoolAlias("ehcacheTest") + .setIsEventListenerModeSync(true) + .setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setDiskCacheAlias("test1") + .setValueSerializer(new StringSerializer()) + .setDimensionNames(List.of(dimensionName)) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .setWeigher(weigher) + .setStatsTrackingEnabled(false) + .build() + ); + } + } + + public void testEhcacheDiskCacheWithoutStoragePathNull() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener removalListener = new MockRemovalListener<>(); + ToLongBiFunction, String> weigher = getWeigher(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + assertThrows( + IllegalArgumentException.class, + () -> new EhcacheDiskCache.Builder().setThreadPoolAlias("ehcacheTest") + .setStoragePath(null) + .setIsEventListenerModeSync(true) + .setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setDiskCacheAlias("test1") + .setValueSerializer(new StringSerializer()) + .setDimensionNames(List.of(dimensionName)) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .setWeigher(weigher) + .setStatsTrackingEnabled(false) + .build() + ); + } + } + + public void testEhcacheWithStorageSizeLowerThanMinimumExpected() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener removalListener = new MockRemovalListener<>(); + ToLongBiFunction, String> weigher = getWeigher(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + assertThrows( + IllegalArgumentException.class, + () -> new EhcacheDiskCache.Builder().setThreadPoolAlias("ehcacheTest") + .setIsEventListenerModeSync(true) + .setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setDiskCacheAlias("test1") + .setValueSerializer(new StringSerializer()) + .setDimensionNames(List.of(dimensionName)) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(MINIMUM_MAX_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .setWeigher(weigher) + .setStatsTrackingEnabled(false) + .build() + ); + } + } + + public void testEhcacheWithStorageSizeZero() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener removalListener = new MockRemovalListener<>(); + ToLongBiFunction, String> weigher = getWeigher(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + assertThrows( + IllegalArgumentException.class, + () -> new EhcacheDiskCache.Builder().setThreadPoolAlias("ehcacheTest") + .setIsEventListenerModeSync(true) + .setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setDiskCacheAlias("test1") + .setValueSerializer(new StringSerializer()) + .setDimensionNames(List.of(dimensionName)) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(0) + .setRemovalListener(removalListener) + .setWeigher(weigher) + .setStatsTrackingEnabled(false) + .build() + ); + } + } + + public void testEhcacheCloseWithDestroyCacheMethodThrowingException() throws Exception { + EhcacheDiskCache ehcacheDiskCache = new MockEhcahceDiskCache(createDummyBuilder(null)); + PersistentCacheManager cacheManager = ehcacheDiskCache.getCacheManager(); + doNothing().when(cacheManager).removeCache(anyString()); + doNothing().when(cacheManager).close(); + doThrow(new RuntimeException("test")).when(cacheManager).destroyCache(anyString()); + ehcacheDiskCache.close(); + } + + static class MockEhcahceDiskCache extends EhcacheDiskCache { + + public MockEhcahceDiskCache(Builder builder) { + super(builder); + } + + @Override + PersistentCacheManager buildCacheManager() { + PersistentCacheManager cacheManager = mock(PersistentCacheManager.class); + return cacheManager; + } + } + + private EhcacheDiskCache.Builder createDummyBuilder(String storagePath) throws IOException { + Settings settings = Settings.builder().build(); + MockRemovalListener removalListener = new MockRemovalListener<>(); + ToLongBiFunction, String> weigher = getWeigher(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + if (storagePath == null || storagePath.isBlank()) { + storagePath = env.nodePaths()[0].path.toString() + "/request_cache"; + } + return (EhcacheDiskCache.Builder) new EhcacheDiskCache.Builder().setThreadPoolAlias( + "ehcacheTest" + ) + .setIsEventListenerModeSync(true) + .setStoragePath(storagePath) + .setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setDiskCacheAlias("test1") + .setValueSerializer(new StringSerializer()) + .setDimensionNames(List.of(dimensionName)) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .setWeigher(weigher) + .setStatsTrackingEnabled(false); + } + } + private List getRandomDimensions(List dimensionNames) { Random rand = Randomness.get(); int bound = 3; diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_constant_keyword.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_constant_keyword.yml new file mode 100644 index 0000000000000..9864bfbbb26e9 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_constant_keyword.yml @@ -0,0 +1,70 @@ +--- +# The test setup includes: +# - Create index with constant_keyword field type +# - Check mapping +# - Index two example documents +# - Search +# - Delete Index when connection is teardown + +"Mappings and Supported queries": + - skip: + version: " - 2.99.99" + reason: "fixed in 3.0.0" + + # Create index with constant_keyword field type + - do: + indices.create: + index: test + body: + mappings: + properties: + genre: + type: "constant_keyword" + value: "1" + + # Index document + - do: + index: + index: test + id: 1 + body: { + "genre": "1" + } + + - do: + index: + index: test + id: 2 + body: { + "genre": 1 + } + + - do: + indices.refresh: + index: test + + # Check mapping + - do: + indices.get_mapping: + index: test + - is_true: test.mappings + - match: { test.mappings.properties.genre.type: constant_keyword } + - length: { test.mappings.properties.genre: 2 } + + # Verify Document Count + - do: + search: + body: { + query: { + match_all: {} + } + } + + - length: { hits.hits: 2 } + - match: { hits.hits.0._source.genre: "1" } + - match: { hits.hits.1._source.genre: 1 } + + # Delete Index when connection is teardown + - do: + indices.delete: + index: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/10_basic.yml index e78a5cf93c666..41f87c1df28ed 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/10_basic.yml @@ -273,8 +273,8 @@ --- "Can set is_hidden": - skip: - version: " - 2.99.99" - reason: "Fix was introduced in 3.0.0" + version: " - 2.15.99" + reason: "Fix was introduced in 2.16.0" - do: indices.create: index: test_index @@ -295,8 +295,8 @@ --- "Throws exception with invalid parameters": - skip: - version: " - 2.99.99" - reason: "Fix was introduced in 3.0.0" + version: " - 2.15.99" + reason: "Fix was introduced in 2.16.0" - do: indices.create: diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java index 2bea36ed80c9f..e4e681a5433b5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java @@ -46,6 +46,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING; import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; @@ -277,4 +278,30 @@ protected IndexShard getIndexShard(String dataNode, String indexName) throws Exe IndexService indexService = indicesService.indexService(new Index(indexName, uuid)); return indexService.getShard(0); } + + public void changeReplicaCountAndEnsureGreen(int replicaCount, String indexName) { + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(indexName) + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, replicaCount)) + ); + ensureGreen(indexName); + } + + public void completeDocRepToRemoteMigration() { + assertTrue( + internalCluster().client() + .admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .putNull(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey()) + .putNull(MIGRATION_DIRECTION_SETTING.getKey()) + ) + .get() + .isAcknowledged() + ); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java index 216c104dfecc1..b55219e1cb37f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java @@ -546,6 +546,73 @@ public void testRemoteIndexPathFileExistsAfterMigration() throws Exception { assertTrue(Arrays.stream(files).anyMatch(file -> file.toString().contains(fileNamePrefix))); } + /** + * Scenario: + * Creates an index with 1 pri 1 rep setup with 3 docrep nodes (1 cluster manager + 2 data nodes), + * initiate migration and create 3 remote nodes (1 cluster manager + 2 data nodes) and moves over + * only primary shard copy of the index + * After the primary shard copy is relocated, decrease replica count to 0, stop all docrep nodes + * and conclude migration. Remote store index settings should be applied to the index at this point. + */ + public void testIndexSettingsUpdateDuringReplicaCountDecrement() throws Exception { + String indexName = "migration-index-replica-decrement"; + String docrepClusterManager = internalCluster().startClusterManagerOnlyNode(); + + logger.info("---> Starting 2 docrep nodes"); + List docrepNodeNames = internalCluster().startDataOnlyNodes(2); + internalCluster().validateClusterFormed(); + + logger.info("---> Creating index with 1 primary and 1 replica"); + Settings oneReplica = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .build(); + createIndexAndAssertDocrepProperties(indexName, oneReplica); + + int docsToIndex = randomIntBetween(10, 100); + logger.info("---> Indexing {} on both indices", docsToIndex); + indexBulk(indexName, docsToIndex); + + logger.info( + "---> Stopping shard rebalancing to ensure shards do not automatically move over to newer nodes after they are launched" + ); + stopShardRebalancing(); + + logger.info("---> Starting 3 remote store enabled nodes"); + initDocRepToRemoteMigration(); + setAddRemote(true); + internalCluster().startClusterManagerOnlyNode(); + List remoteNodeNames = internalCluster().startDataOnlyNodes(2); + internalCluster().validateClusterFormed(); + + String primaryNode = primaryNodeName(indexName); + + logger.info("---> Moving over primary to remote store enabled nodes"); + assertAcked( + client().admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(indexName, 0, primaryNode, remoteNodeNames.get(0))) + .execute() + .actionGet() + ); + waitForRelocation(); + waitNoPendingTasksOnAll(); + + logger.info("---> Reducing replica count to 0 for the index"); + changeReplicaCountAndEnsureGreen(0, indexName); + + logger.info("---> Stopping all docrep nodes"); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(docrepClusterManager)); + for (String node : docrepNodeNames) { + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node)); + } + internalCluster().validateClusterFormed(); + completeDocRepToRemoteMigration(); + waitNoPendingTasksOnAll(); + assertRemoteProperties(indexName); + } + private void createIndexAndAssertDocrepProperties(String index, Settings settings) { createIndexAssertHealthAndDocrepProperties(index, settings, this::ensureGreen); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 216e1fb2ed1cc..3988d50b2ce1e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -42,7 +42,6 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -59,17 +58,13 @@ import org.opensearch.common.settings.SettingsException; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.index.remote.RemoteMigrationIndexMetadataUpdater; import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; import java.io.IOException; -import java.util.Collection; -import java.util.Set; -import java.util.stream.Collectors; -import static org.opensearch.index.remote.RemoteMigrationIndexMetadataUpdater.indexHasAllRemoteStoreRelatedMetadata; +import static org.opensearch.index.remote.RemoteStoreUtils.checkAndFinalizeRemoteStoreMigration; /** * Transport action for updating cluster settings @@ -262,13 +257,14 @@ public void onFailure(String source, Exception e) { @Override public ClusterState execute(final ClusterState currentState) { - validateCompatibilityModeSettingRequest(request, state); - final ClusterState clusterState = updater.updateSettings( + boolean isCompatibilityModeChanging = validateCompatibilityModeSettingRequest(request, state); + ClusterState clusterState = updater.updateSettings( currentState, clusterSettings.upgradeSettings(request.transientSettings()), clusterSettings.upgradeSettings(request.persistentSettings()), logger ); + clusterState = checkAndFinalizeRemoteStoreMigration(isCompatibilityModeChanging, request, clusterState, logger); changed = clusterState != currentState; return clusterState; } @@ -278,19 +274,23 @@ public ClusterState execute(final ClusterState currentState) { /** * Runs various checks associated with changing cluster compatibility mode + * * @param request cluster settings update request, for settings to be updated and new values * @param clusterState current state of cluster, for information on nodes + * @return true if the incoming cluster settings update request is switching compatibility modes */ - public void validateCompatibilityModeSettingRequest(ClusterUpdateSettingsRequest request, ClusterState clusterState) { + public boolean validateCompatibilityModeSettingRequest(ClusterUpdateSettingsRequest request, ClusterState clusterState) { Settings settings = Settings.builder().put(request.persistentSettings()).put(request.transientSettings()).build(); if (RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING.exists(settings)) { - String value = RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING.get(settings).mode; validateAllNodesOfSameVersion(clusterState.nodes()); - if (RemoteStoreNodeService.CompatibilityMode.STRICT.mode.equals(value)) { + if (RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING.get( + settings + ) == RemoteStoreNodeService.CompatibilityMode.STRICT) { validateAllNodesOfSameType(clusterState.nodes()); - validateIndexSettings(clusterState); } + return true; } + return false; } /** @@ -310,31 +310,18 @@ private void validateAllNodesOfSameVersion(DiscoveryNodes discoveryNodes) { * @param discoveryNodes current discovery nodes in the cluster */ private void validateAllNodesOfSameType(DiscoveryNodes discoveryNodes) { - Set nodeTypes = discoveryNodes.getNodes() + boolean allNodesDocrepEnabled = discoveryNodes.getNodes() .values() .stream() - .map(DiscoveryNode::isRemoteStoreNode) - .collect(Collectors.toSet()); - if (nodeTypes.size() != 1) { + .allMatch(discoveryNode -> discoveryNode.isRemoteStoreNode() == false); + boolean allNodesRemoteStoreEnabled = discoveryNodes.getNodes() + .values() + .stream() + .allMatch(discoveryNode -> discoveryNode.isRemoteStoreNode()); + if (allNodesDocrepEnabled == false && allNodesRemoteStoreEnabled == false) { throw new SettingsException( "can not switch to STRICT compatibility mode when the cluster contains both remote and non-remote nodes" ); } } - - /** - * Verifies that while trying to switch to STRICT compatibility mode, - * all indices in the cluster have {@link RemoteMigrationIndexMetadataUpdater#indexHasAllRemoteStoreRelatedMetadata(IndexMetadata)} as true. - * If not, throws {@link SettingsException} - * @param clusterState current cluster state - */ - private void validateIndexSettings(ClusterState clusterState) { - Collection allIndicesMetadata = clusterState.metadata().indices().values(); - if (allIndicesMetadata.isEmpty() == false - && allIndicesMetadata.stream().anyMatch(indexMetadata -> indexHasAllRemoteStoreRelatedMetadata(indexMetadata) == false)) { - throw new SettingsException( - "can not switch to STRICT compatibility mode since all indices in the cluster does not have remote store based index settings" - ); - } - } } diff --git a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java index 9654bd1c114ba..6cb5e049e0f1e 100644 --- a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java @@ -717,7 +717,7 @@ public IndexRequest doc() { private IndexRequest safeDoc() { if (doc == null) { - doc = new IndexRequest(); + doc = new IndexRequest(index); } return doc; } @@ -803,7 +803,7 @@ public IndexRequest upsertRequest() { private IndexRequest safeUpsertRequest() { if (upsertRequest == null) { - upsertRequest = new IndexRequest(); + upsertRequest = new IndexRequest(index); } return upsertRequest; } diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java b/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java index cc1b0713393f3..f3f245ee9f8f0 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java @@ -11,35 +11,24 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.lucene.store.IndexInput; import org.opensearch.action.LatchedActionListener; -import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.DiffableUtils; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.common.CheckedRunnable; -import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; -import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; -import org.opensearch.common.blobstore.stream.write.WritePriority; -import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; -import org.opensearch.common.blobstore.transfer.stream.OffsetRangeIndexInputStream; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; -import org.opensearch.common.lucene.store.ByteArrayIndexInput; +import org.opensearch.common.remote.RemoteWritableEntityStore; import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.action.ActionListener; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.index.Index; +import org.opensearch.core.compress.Compressor; import org.opensearch.gateway.remote.ClusterMetadataManifest; import org.opensearch.gateway.remote.RemoteStateTransferException; +import org.opensearch.gateway.remote.model.RemoteRoutingTableBlobStore; import org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable; -import org.opensearch.index.remote.RemoteStoreEnums; -import org.opensearch.index.remote.RemoteStorePathStrategy; -import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.node.Node; import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.repositories.RepositoriesService; @@ -52,12 +41,10 @@ import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.concurrent.ExecutorService; import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Collectors; -import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteRoutingTableEnabled; /** @@ -67,64 +54,29 @@ */ public class InternalRemoteRoutingTableService extends AbstractLifecycleComponent implements RemoteRoutingTableService { - /** - * This setting is used to set the remote routing table store blob store path type strategy. - */ - public static final Setting REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING = new Setting<>( - "cluster.remote_store.routing_table.path_type", - RemoteStoreEnums.PathType.HASHED_PREFIX.toString(), - RemoteStoreEnums.PathType::parseString, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ); - - /** - * This setting is used to set the remote routing table store blob store path hash algorithm strategy. - * This setting will come to effect if the {@link #REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING} - * is either {@code HASHED_PREFIX} or {@code HASHED_INFIX}. - */ - public static final Setting REMOTE_ROUTING_TABLE_PATH_HASH_ALGO_SETTING = new Setting<>( - "cluster.remote_store.routing_table.path_hash_algo", - RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64.toString(), - RemoteStoreEnums.PathHashAlgorithm::parseString, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ); - - public static final String INDEX_ROUTING_PATH_TOKEN = "index-routing"; - public static final String INDEX_ROUTING_FILE_PREFIX = "index_routing"; - public static final String INDEX_ROUTING_METADATA_PREFIX = "indexRouting--"; - private static final Logger logger = LogManager.getLogger(InternalRemoteRoutingTableService.class); private final Settings settings; private final Supplier repositoriesService; + private Compressor compressor; + private RemoteWritableEntityStore remoteIndexRoutingTableStore; + private final ClusterSettings clusterSettings; private BlobStoreRepository blobStoreRepository; - private RemoteStoreEnums.PathType pathType; - private RemoteStoreEnums.PathHashAlgorithm pathHashAlgo; - private ThreadPool threadPool; + private final ThreadPool threadPool; + private final String clusterName; public InternalRemoteRoutingTableService( Supplier repositoriesService, Settings settings, ClusterSettings clusterSettings, - ThreadPool threadpool + ThreadPool threadpool, + String clusterName ) { assert isRemoteRoutingTableEnabled(settings) : "Remote routing table is not enabled"; this.repositoriesService = repositoriesService; this.settings = settings; - this.pathType = clusterSettings.get(REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING); - this.pathHashAlgo = clusterSettings.get(REMOTE_ROUTING_TABLE_PATH_HASH_ALGO_SETTING); - clusterSettings.addSettingsUpdateConsumer(REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING, this::setPathTypeSetting); - clusterSettings.addSettingsUpdateConsumer(REMOTE_ROUTING_TABLE_PATH_HASH_ALGO_SETTING, this::setPathHashAlgoSetting); this.threadPool = threadpool; - } - - private void setPathTypeSetting(RemoteStoreEnums.PathType pathType) { - this.pathType = pathType; - } - - private void setPathHashAlgoSetting(RemoteStoreEnums.PathHashAlgorithm pathHashAlgo) { - this.pathHashAlgo = pathHashAlgo; + this.clusterName = clusterName; + this.clusterSettings = clusterSettings; } public List getIndicesRouting(RoutingTable routingTable) { @@ -151,43 +103,32 @@ public DiffableUtils.MapDiff getIndexRoutingAsyncAction( - ClusterState clusterState, + @Override + public CheckedRunnable getAsyncIndexRoutingWriteAction( + String clusterUUID, + long term, + long version, IndexRoutingTable indexRouting, - LatchedActionListener latchedActionListener, - BlobPath clusterBasePath + LatchedActionListener latchedActionListener ) { - BlobPath indexRoutingPath = clusterBasePath.add(INDEX_ROUTING_PATH_TOKEN); - BlobPath path = pathType.path( - RemoteStorePathStrategy.PathInput.builder().basePath(indexRoutingPath).indexUUID(indexRouting.getIndex().getUUID()).build(), - pathHashAlgo - ); - final BlobContainer blobContainer = blobStoreRepository.blobStore().blobContainer(path); - - final String fileName = getIndexRoutingFileName(clusterState.term(), clusterState.version()); + RemoteIndexRoutingTable remoteIndexRoutingTable = new RemoteIndexRoutingTable(indexRouting, clusterUUID, compressor, term, version); ActionListener completionListener = ActionListener.wrap( - resp -> latchedActionListener.onResponse( - new ClusterMetadataManifest.UploadedIndexMetadata( - indexRouting.getIndex().getName(), - indexRouting.getIndex().getUUID(), - path.buildAsString() + fileName, - INDEX_ROUTING_METADATA_PREFIX - ) - ), + resp -> latchedActionListener.onResponse(remoteIndexRoutingTable.getUploadedMetadata()), ex -> latchedActionListener.onFailure( new RemoteStateTransferException("Exception in writing index to remote store: " + indexRouting.getIndex().toString(), ex) ) ); - return () -> uploadIndex(indexRouting, fileName, blobContainer, completionListener); + return () -> remoteIndexRoutingTableStore.writeAsync(remoteIndexRoutingTable, completionListener); } /** @@ -214,111 +155,21 @@ public List getAllUploadedIndices return new ArrayList<>(allUploadedIndicesRouting.values()); } - private void uploadIndex( - IndexRoutingTable indexRouting, - String fileName, - BlobContainer blobContainer, - ActionListener completionListener - ) { - RemoteIndexRoutingTable indexRoutingInput = new RemoteIndexRoutingTable(indexRouting); - BytesReference bytesInput = null; - try (BytesStreamOutput streamOutput = new BytesStreamOutput()) { - indexRoutingInput.writeTo(streamOutput); - bytesInput = streamOutput.bytes(); - } catch (IOException e) { - logger.error("Failed to serialize IndexRoutingTable for [{}]: [{}]", indexRouting, e); - completionListener.onFailure(e); - return; - } - - if (blobContainer instanceof AsyncMultiStreamBlobContainer == false) { - try { - blobContainer.writeBlob(fileName, bytesInput.streamInput(), bytesInput.length(), true); - completionListener.onResponse(null); - } catch (IOException e) { - logger.error("Failed to write IndexRoutingTable to remote store for indexRouting [{}]: [{}]", indexRouting, e); - completionListener.onFailure(e); - } - return; - } - - try (IndexInput input = new ByteArrayIndexInput("indexrouting", BytesReference.toBytes(bytesInput))) { - try ( - RemoteTransferContainer remoteTransferContainer = new RemoteTransferContainer( - fileName, - fileName, - input.length(), - true, - WritePriority.URGENT, - (size, position) -> new OffsetRangeIndexInputStream(input, size, position), - null, - false - ) - ) { - ((AsyncMultiStreamBlobContainer) blobContainer).asyncBlobUpload( - remoteTransferContainer.createWriteContext(), - completionListener - ); - } catch (IOException e) { - logger.error("Failed to write IndexRoutingTable to remote store for indexRouting [{}]: [{}]", indexRouting, e); - completionListener.onFailure(e); - } - } catch (IOException e) { - logger.error( - "Failed to create transfer object for IndexRoutingTable for remote store upload for indexRouting [{}]: [{}]", - indexRouting, - e - ); - completionListener.onFailure(e); - } - } - @Override public CheckedRunnable getAsyncIndexRoutingReadAction( + String clusterUUID, String uploadedFilename, - Index index, LatchedActionListener latchedActionListener ) { - int idx = uploadedFilename.lastIndexOf("/"); - String blobFileName = uploadedFilename.substring(idx + 1); - BlobContainer blobContainer = blobStoreRepository.blobStore() - .blobContainer(BlobPath.cleanPath().add(uploadedFilename.substring(0, idx))); - return () -> readAsync( - blobContainer, - blobFileName, - index, - threadPool.executor(ThreadPool.Names.REMOTE_STATE_READ), - ActionListener.wrap( - response -> latchedActionListener.onResponse(response.getIndexRoutingTable()), - latchedActionListener::onFailure - ) + ActionListener actionListener = ActionListener.wrap( + latchedActionListener::onResponse, + latchedActionListener::onFailure ); - } - private void readAsync( - BlobContainer blobContainer, - String name, - Index index, - ExecutorService executorService, - ActionListener listener - ) { - executorService.execute(() -> { - try { - listener.onResponse(read(blobContainer, name, index)); - } catch (Exception e) { - listener.onFailure(e); - } - }); - } + RemoteIndexRoutingTable remoteIndexRoutingTable = new RemoteIndexRoutingTable(uploadedFilename, clusterUUID, compressor); - private RemoteIndexRoutingTable read(BlobContainer blobContainer, String path, Index index) { - try { - return new RemoteIndexRoutingTable(blobContainer.readBlob(path), index); - } catch (IOException | AssertionError e) { - logger.error(() -> new ParameterizedMessage("RoutingTable read failed for path {}", path), e); - throw new RemoteStateTransferException("Failed to read RemoteRoutingTable from Manifest with error ", e); - } + return () -> remoteIndexRoutingTableStore.readAsync(remoteIndexRoutingTable, actionListener); } @Override @@ -335,16 +186,6 @@ public List getUpdatedIndexRoutin }).collect(Collectors.toList()); } - private String getIndexRoutingFileName(long term, long version) { - return String.join( - DELIMITER, - INDEX_ROUTING_FILE_PREFIX, - RemoteStoreUtils.invertLong(term), - RemoteStoreUtils.invertLong(version), - RemoteStoreUtils.invertLong(System.currentTimeMillis()) - ); - } - @Override protected void doClose() throws IOException { if (blobStoreRepository != null) { @@ -362,6 +203,16 @@ protected void doStart() { final Repository repository = repositoriesService.get().repository(remoteStoreRepo); assert repository instanceof BlobStoreRepository : "Repository should be instance of BlobStoreRepository"; blobStoreRepository = (BlobStoreRepository) repository; + compressor = blobStoreRepository.getCompressor(); + + this.remoteIndexRoutingTableStore = new RemoteRoutingTableBlobStore<>( + new BlobStoreTransferService(blobStoreRepository.blobStore(), threadPool), + blobStoreRepository, + clusterName, + threadPool, + ThreadPool.Names.REMOTE_STATE_READ, + clusterSettings + ); } @Override @@ -377,5 +228,4 @@ public void deleteStaleIndexRoutingPaths(List stalePaths) throws IOExcep throw e; } } - } diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/NoopRemoteRoutingTableService.java b/server/src/main/java/org/opensearch/cluster/routing/remote/NoopRemoteRoutingTableService.java index 6236d107d0220..4636e492df28f 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/remote/NoopRemoteRoutingTableService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/remote/NoopRemoteRoutingTableService.java @@ -9,14 +9,11 @@ package org.opensearch.cluster.routing.remote; import org.opensearch.action.LatchedActionListener; -import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.DiffableUtils; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.common.CheckedRunnable; -import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; -import org.opensearch.core.index.Index; import org.opensearch.gateway.remote.ClusterMetadataManifest; import java.io.IOException; @@ -42,11 +39,12 @@ public DiffableUtils.MapDiff getIndexRoutingAsyncAction( - ClusterState clusterState, + public CheckedRunnable getAsyncIndexRoutingWriteAction( + String clusterUUID, + long term, + long version, IndexRoutingTable indexRouting, - LatchedActionListener latchedActionListener, - BlobPath clusterBasePath + LatchedActionListener latchedActionListener ) { // noop return () -> {}; @@ -64,8 +62,8 @@ public List getAllUploadedIndices @Override public CheckedRunnable getAsyncIndexRoutingReadAction( + String clusterUUID, String uploadedFilename, - Index index, LatchedActionListener latchedActionListener ) { // noop diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java b/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java index d455dfb58eabc..d319123bc2cee 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java @@ -9,16 +9,13 @@ package org.opensearch.cluster.routing.remote; import org.opensearch.action.LatchedActionListener; -import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.DiffableUtils; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.common.CheckedRunnable; -import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.lifecycle.LifecycleComponent; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.index.Index; import org.opensearch.gateway.remote.ClusterMetadataManifest; import java.io.IOException; @@ -47,8 +44,8 @@ public IndexRoutingTable read(StreamInput in, String key) throws IOException { List getIndicesRouting(RoutingTable routingTable); CheckedRunnable getAsyncIndexRoutingReadAction( + String clusterUUID, String uploadedFilename, - Index index, LatchedActionListener latchedActionListener ); @@ -62,11 +59,12 @@ DiffableUtils.MapDiff> RoutingTable after ); - CheckedRunnable getIndexRoutingAsyncAction( - ClusterState clusterState, + CheckedRunnable getAsyncIndexRoutingWriteAction( + String clusterUUID, + long term, + long version, IndexRoutingTable indexRouting, - LatchedActionListener latchedActionListener, - BlobPath clusterBasePath + LatchedActionListener latchedActionListener ); List getAllUploadedIndicesRouting( diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactory.java b/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactory.java index 82837191a30b7..56dfa03215a64 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactory.java +++ b/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactory.java @@ -34,10 +34,11 @@ public static RemoteRoutingTableService getService( Supplier repositoriesService, Settings settings, ClusterSettings clusterSettings, - ThreadPool threadPool + ThreadPool threadPool, + String clusterName ) { if (isRemoteRoutingTableEnabled(settings)) { - return new InternalRemoteRoutingTableService(repositoriesService, settings, clusterSettings, threadPool); + return new InternalRemoteRoutingTableService(repositoriesService, settings, clusterSettings, threadPool, clusterName); } return new NoopRemoteRoutingTableService(); } diff --git a/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java b/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java index 23fc9d3ad77cb..237c077cb673c 100644 --- a/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java +++ b/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java @@ -40,6 +40,10 @@ public AbstractRemoteWritableBlobEntity( this.namedXContentRegistry = namedXContentRegistry; } + public AbstractRemoteWritableBlobEntity(final String clusterUUID, final Compressor compressor) { + this(clusterUUID, compressor, null); + } + public abstract BlobPathParameters getBlobPathParameters(); public abstract String getType(); diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index b4826e1a59428..49801fd3834b8 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -78,7 +78,6 @@ import org.opensearch.cluster.routing.allocation.decider.SameShardAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; -import org.opensearch.cluster.routing.remote.InternalRemoteRoutingTableService; import org.opensearch.cluster.service.ClusterApplierService; import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.cluster.service.ClusterManagerTaskThrottler; @@ -108,6 +107,7 @@ import org.opensearch.gateway.ShardsBatchGatewayAllocator; import org.opensearch.gateway.remote.RemoteClusterStateCleanupManager; import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.gateway.remote.model.RemoteRoutingTableBlobStore; import org.opensearch.http.HttpTransportSettings; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; @@ -730,8 +730,8 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING, IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING, IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING, - InternalRemoteRoutingTableService.REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING, - InternalRemoteRoutingTableService.REMOTE_ROUTING_TABLE_PATH_HASH_ALGO_SETTING, + RemoteRoutingTableBlobStore.REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING, + RemoteRoutingTableBlobStore.REMOTE_ROUTING_TABLE_PATH_HASH_ALGO_SETTING, // Admission Control Settings AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, diff --git a/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java b/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java index d9474b32bdbf6..aaf0d696e1444 100644 --- a/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java @@ -100,7 +100,10 @@ protected Runnable cancelExistingRecoveryForBetterMatch( Metadata metadata = allocation.metadata(); RoutingNodes routingNodes = allocation.routingNodes(); ShardRouting primaryShard = allocation.routingNodes().activePrimary(shard.shardId()); - assert primaryShard != null : "the replica shard can be allocated on at least one node, so there must be an active primary"; + if (primaryShard == null) { + logger.trace("{}: no active primary shard found or allocated, letting actual allocation figure it out", shard); + return null; + } assert primaryShard.currentNodeId() != null; final DiscoveryNode primaryNode = allocation.nodes().get(primaryShard.currentNodeId()); diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index 3e63f9114ea16..e7ca3e8aa7594 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -26,7 +26,6 @@ import org.opensearch.cluster.node.DiscoveryNodes.Builder; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.RoutingTable; -import org.opensearch.cluster.routing.remote.InternalRemoteRoutingTableService; import org.opensearch.cluster.routing.remote.RemoteRoutingTableService; import org.opensearch.cluster.routing.remote.RemoteRoutingTableServiceFactory; import org.opensearch.cluster.service.ClusterService; @@ -43,7 +42,6 @@ import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadataAttribute; @@ -98,7 +96,6 @@ import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.UploadedMetadataResults; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.clusterUUIDContainer; -import static org.opensearch.gateway.remote.RemoteClusterStateUtils.getClusterMetadataBasePath; import static org.opensearch.gateway.remote.model.RemoteClusterStateCustoms.CLUSTER_STATE_CUSTOM; import static org.opensearch.gateway.remote.model.RemoteCoordinationMetadata.COORDINATION_METADATA; import static org.opensearch.gateway.remote.model.RemoteCustomMetadata.CUSTOM_DELIMITER; @@ -107,6 +104,7 @@ import static org.opensearch.gateway.remote.model.RemotePersistentSettingsMetadata.SETTING_METADATA; import static org.opensearch.gateway.remote.model.RemoteTemplatesMetadata.TEMPLATES_METADATA; import static org.opensearch.gateway.remote.model.RemoteTransientSettingsMetadata.TRANSIENT_SETTING_METADATA; +import static org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable.INDEX_ROUTING_METADATA_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; /** @@ -146,7 +144,7 @@ public class RemoteClusterStateService implements Closeable { private final List indexMetadataUploadListeners; private BlobStoreRepository blobStoreRepository; private BlobStoreTransferService blobStoreTransferService; - private final RemoteRoutingTableService remoteRoutingTableService; + private RemoteRoutingTableService remoteRoutingTableService; private volatile TimeValue slowWriteLoggingThreshold; private final RemotePersistenceStats remoteStateStats; @@ -197,16 +195,17 @@ public RemoteClusterStateService( this.remoteStateStats = new RemotePersistenceStats(); this.namedWriteableRegistry = namedWriteableRegistry; this.indexMetadataUploadListeners = indexMetadataUploadListeners; + this.isPublicationEnabled = FeatureFlags.isEnabled(REMOTE_PUBLICATION_EXPERIMENTAL) + && RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled(settings) + && RemoteStoreNodeAttribute.isRemoteRoutingTableEnabled(settings); this.remoteRoutingTableService = RemoteRoutingTableServiceFactory.getService( repositoriesService, settings, clusterSettings, - threadPool + threadpool, + ClusterName.CLUSTER_NAME_SETTING.get(settings).value() ); - this.remoteClusterStateCleanupManager = new RemoteClusterStateCleanupManager(this, clusterService, remoteRoutingTableService); - this.isPublicationEnabled = FeatureFlags.isEnabled(REMOTE_PUBLICATION_EXPERIMENTAL) - && RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled(settings) - && RemoteStoreNodeAttribute.isRemoteRoutingTableEnabled(settings); + remoteClusterStateCleanupManager = new RemoteClusterStateCleanupManager(this, clusterService, remoteRoutingTableService); } /** @@ -259,7 +258,7 @@ public RemoteClusterStateManifestInfo writeFullMetadata(ClusterState clusterStat uploadedMetadataResults.uploadedIndicesRoutingMetadata.size() ); } else { - logger.info( + logger.debug( "writing cluster state took [{}ms]; " + "wrote full state with [{}] indices, [{}] indicesRouting and global metadata", durationMillis, uploadedMetadataResults.uploadedIndexMetadata.size(), @@ -458,8 +457,8 @@ public RemoteClusterStateManifestInfo writeIncrementalMetadata( customsDiff.getUpserts().size() ); } else { - logger.info("{}; {}", clusterStateUploadTimeMessage, metadataUpdateMessage); - logger.info( + logger.debug("{}; {}", clusterStateUploadTimeMessage, metadataUpdateMessage); + logger.debug( "writing cluster state for version [{}] took [{}ms]; " + "wrote metadata for [{}] indices and skipped [{}] unchanged indices, coordination metadata updated : [{}], " + "settings metadata updated : [{}], templates metadata updated : [{}], custom metadata updated : [{}]", @@ -663,16 +662,13 @@ UploadedMetadataResults writeMetadataInParallel( }); indicesRoutingToUpload.forEach(indexRoutingTable -> { uploadTasks.put( - InternalRemoteRoutingTableService.INDEX_ROUTING_METADATA_PREFIX + indexRoutingTable.getIndex().getName(), - remoteRoutingTableService.getIndexRoutingAsyncAction( - clusterState, + INDEX_ROUTING_METADATA_PREFIX + indexRoutingTable.getIndex().getName(), + remoteRoutingTableService.getAsyncIndexRoutingWriteAction( + clusterState.metadata().clusterUUID(), + clusterState.term(), + clusterState.version(), indexRoutingTable, - listener, - getClusterMetadataBasePath( - blobStoreRepository, - clusterState.getClusterName().value(), - clusterState.metadata().clusterUUID() - ) + listener ) ); }); @@ -723,7 +719,7 @@ UploadedMetadataResults writeMetadataInParallel( UploadedMetadataResults response = new UploadedMetadataResults(); results.forEach((name, uploadedMetadata) -> { if (uploadedMetadata.getClass().equals(UploadedIndexMetadata.class) - && uploadedMetadata.getComponent().contains(InternalRemoteRoutingTableService.INDEX_ROUTING_METADATA_PREFIX)) { + && uploadedMetadata.getComponent().contains(INDEX_ROUTING_METADATA_PREFIX)) { response.uploadedIndicesRoutingMetadata.add((UploadedIndexMetadata) uploadedMetadata); } else if (name.startsWith(CUSTOM_METADATA)) { // component name for custom metadata will look like custom-- @@ -897,9 +893,8 @@ public void start() { final Repository repository = repositoriesService.get().repository(remoteStoreRepo); assert repository instanceof BlobStoreRepository : "Repository should be instance of BlobStoreRepository"; blobStoreRepository = (BlobStoreRepository) repository; - this.remoteRoutingTableService.start(); - blobStoreTransferService = new BlobStoreTransferService(getBlobStore(), threadpool); String clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings).value(); + blobStoreTransferService = new BlobStoreTransferService(getBlobStore(), threadpool); remoteGlobalMetadataManager = new RemoteGlobalMetadataManager( clusterSettings, @@ -931,6 +926,8 @@ public void start() { namedWriteableRegistry, threadpool ); + + remoteRoutingTableService.start(); remoteClusterStateCleanupManager.start(); } @@ -1022,10 +1019,10 @@ ClusterState readClusterStateInParallel( LatchedActionListener routingTableLatchedActionListener = new LatchedActionListener<>( ActionListener.wrap(response -> { - logger.debug("Successfully read cluster state component from remote"); + logger.debug(() -> new ParameterizedMessage("Successfully read index-routing for index {}", response.getIndex().getName())); readIndexRoutingTableResults.add(response); }, ex -> { - logger.error("Failed to read cluster state from remote", ex); + logger.error(() -> new ParameterizedMessage("Failed to read index-routing from remote"), ex); exceptionList.add(ex); }), latch @@ -1034,8 +1031,8 @@ ClusterState readClusterStateInParallel( for (UploadedIndexMetadata indexRouting : indicesRoutingToRead) { asyncMetadataReadActions.add( remoteRoutingTableService.getAsyncIndexRoutingReadAction( + clusterUUID, indexRouting.getUploadedFilename(), - new Index(indexRouting.getIndexName(), indexRouting.getIndexUUID()), routingTableLatchedActionListener ) ); diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateBlobStore.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateBlobStore.java index 1dd23443f1252..cd8b8aa41ad65 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateBlobStore.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateBlobStore.java @@ -23,6 +23,8 @@ import java.io.InputStream; import java.util.concurrent.ExecutorService; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN; + /** * Abstract class for a blob type storage * @@ -88,18 +90,26 @@ public void readAsync(final U entity, final ActionListener listener) { }); } - private BlobPath getBlobPathForUpload(final AbstractRemoteWritableBlobEntity obj) { - BlobPath blobPath = blobStoreRepository.basePath() - .add(RemoteClusterStateUtils.encodeString(clusterName)) - .add("cluster-state") - .add(obj.clusterUUID()); + public String getClusterName() { + return clusterName; + } + + public BlobPath getBlobPathPrefix(String clusterUUID) { + return blobStoreRepository.basePath() + .add(RemoteClusterStateUtils.encodeString(getClusterName())) + .add(CLUSTER_STATE_PATH_TOKEN) + .add(clusterUUID); + } + + public BlobPath getBlobPathForUpload(final AbstractRemoteWritableBlobEntity obj) { + BlobPath blobPath = getBlobPathPrefix(obj.clusterUUID()); for (String token : obj.getBlobPathParameters().getPathTokens()) { blobPath = blobPath.add(token); } return blobPath; } - private BlobPath getBlobPathForDownload(final AbstractRemoteWritableBlobEntity obj) { + public BlobPath getBlobPathForDownload(final AbstractRemoteWritableBlobEntity obj) { String[] pathTokens = obj.getBlobPathTokens(); BlobPath blobPath = new BlobPath(); if (pathTokens == null || pathTokens.length < 1) { diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteRoutingTableBlobStore.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteRoutingTableBlobStore.java new file mode 100644 index 0000000000000..7c4a5bf2236a1 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteRoutingTableBlobStore.java @@ -0,0 +1,108 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.model; + +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.RemoteWriteableEntity; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable; +import org.opensearch.index.remote.RemoteStoreEnums; +import org.opensearch.index.remote.RemoteStorePathStrategy; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.threadpool.ThreadPool; + +import static org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable.INDEX_ROUTING_TABLE; + +/** + * Extends the RemoteClusterStateBlobStore to support {@link RemoteIndexRoutingTable} + * + * @param which can be uploaded to / downloaded from blob store + * @param The concrete class implementing {@link RemoteWriteableEntity} which is used as a wrapper for IndexRoutingTable entity. + */ +public class RemoteRoutingTableBlobStore> extends + RemoteClusterStateBlobStore { + + /** + * This setting is used to set the remote routing table store blob store path type strategy. + */ + public static final Setting REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING = new Setting<>( + "cluster.remote_store.routing_table.path_type", + RemoteStoreEnums.PathType.HASHED_PREFIX.toString(), + RemoteStoreEnums.PathType::parseString, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + /** + * This setting is used to set the remote routing table store blob store path hash algorithm strategy. + * This setting will come to effect if the {@link #REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING} + * is either {@code HASHED_PREFIX} or {@code HASHED_INFIX}. + */ + public static final Setting REMOTE_ROUTING_TABLE_PATH_HASH_ALGO_SETTING = new Setting<>( + "cluster.remote_store.routing_table.path_hash_algo", + RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64.toString(), + RemoteStoreEnums.PathHashAlgorithm::parseString, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + private RemoteStoreEnums.PathType pathType; + private RemoteStoreEnums.PathHashAlgorithm pathHashAlgo; + + public RemoteRoutingTableBlobStore( + BlobStoreTransferService blobStoreTransferService, + BlobStoreRepository blobStoreRepository, + String clusterName, + ThreadPool threadPool, + String executor, + ClusterSettings clusterSettings + ) { + super(blobStoreTransferService, blobStoreRepository, clusterName, threadPool, executor); + this.pathType = clusterSettings.get(REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING); + this.pathHashAlgo = clusterSettings.get(REMOTE_ROUTING_TABLE_PATH_HASH_ALGO_SETTING); + clusterSettings.addSettingsUpdateConsumer(REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING, this::setPathTypeSetting); + clusterSettings.addSettingsUpdateConsumer(REMOTE_ROUTING_TABLE_PATH_HASH_ALGO_SETTING, this::setPathHashAlgoSetting); + } + + @Override + public BlobPath getBlobPathForUpload(final AbstractRemoteWritableBlobEntity obj) { + assert obj.getBlobPathParameters().getPathTokens().size() == 1 : "Unexpected tokens in RemoteRoutingTableObject"; + BlobPath indexRoutingPath = getBlobPathPrefix(obj.clusterUUID()).add(INDEX_ROUTING_TABLE); + + BlobPath path = pathType.path( + RemoteStorePathStrategy.PathInput.builder() + .basePath(indexRoutingPath) + .indexUUID(String.join("", obj.getBlobPathParameters().getPathTokens())) + .build(), + pathHashAlgo + ); + return path; + } + + private void setPathTypeSetting(RemoteStoreEnums.PathType pathType) { + this.pathType = pathType; + } + + private void setPathHashAlgoSetting(RemoteStoreEnums.PathHashAlgorithm pathHashAlgo) { + this.pathHashAlgo = pathHashAlgo; + } + + // For testing only + protected RemoteStoreEnums.PathType getPathTypeSetting() { + return pathType; + } + + // For testing only + protected RemoteStoreEnums.PathHashAlgorithm getPathHashAlgoSetting() { + return pathHashAlgo; + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/routingtable/IndexRoutingTableHeader.java b/server/src/main/java/org/opensearch/gateway/remote/routingtable/IndexRoutingTableHeader.java deleted file mode 100644 index 5baea6adba0c7..0000000000000 --- a/server/src/main/java/org/opensearch/gateway/remote/routingtable/IndexRoutingTableHeader.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.gateway.remote.routingtable; - -import org.apache.lucene.codecs.CodecUtil; -import org.apache.lucene.index.CorruptIndexException; -import org.apache.lucene.index.IndexFormatTooNewException; -import org.apache.lucene.index.IndexFormatTooOldException; -import org.apache.lucene.store.InputStreamDataInput; -import org.apache.lucene.store.OutputStreamDataOutput; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; - -import java.io.EOFException; -import java.io.IOException; - -/** - * The stored header information for the individual index routing table - */ -public class IndexRoutingTableHeader implements Writeable { - - public static final String INDEX_ROUTING_HEADER_CODEC = "index_routing_header_codec"; - public static final int INITIAL_VERSION = 1; - public static final int CURRENT_VERSION = INITIAL_VERSION; - private final String indexName; - - public IndexRoutingTableHeader(String indexName) { - this.indexName = indexName; - } - - /** - * Reads the contents on the stream into the corresponding {@link IndexRoutingTableHeader} - * - * @param in streamInput - * @throws IOException exception thrown on failing to read from stream. - */ - public IndexRoutingTableHeader(StreamInput in) throws IOException { - try { - readHeaderVersion(in); - indexName = in.readString(); - } catch (EOFException e) { - throw new IOException("index routing header truncated", e); - } - } - - private void readHeaderVersion(final StreamInput in) throws IOException { - try { - CodecUtil.checkHeader(new InputStreamDataInput(in), INDEX_ROUTING_HEADER_CODEC, INITIAL_VERSION, CURRENT_VERSION); - } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException e) { - throw new IOException("index routing table header corrupted", e); - } - } - - /** - * Write the IndexRoutingTable to given stream. - * - * @param out stream to write - * @throws IOException exception thrown on failing to write to stream. - */ - public void writeTo(StreamOutput out) throws IOException { - try { - CodecUtil.writeHeader(new OutputStreamDataOutput(out), INDEX_ROUTING_HEADER_CODEC, CURRENT_VERSION); - out.writeString(indexName); - out.flush(); - } catch (IOException e) { - throw new IOException("Failed to write IndexRoutingTable header", e); - } - } - - public String getIndexName() { - return indexName; - } - -} diff --git a/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTable.java b/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTable.java index 17c55190da07f..40b5bafde2b13 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTable.java +++ b/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTable.java @@ -9,92 +9,106 @@ package org.opensearch.gateway.remote.routingtable; import org.opensearch.cluster.routing.IndexRoutingTable; -import org.opensearch.cluster.routing.IndexShardRoutingTable; -import org.opensearch.core.common.io.stream.BufferedChecksumStreamInput; -import org.opensearch.core.common.io.stream.BufferedChecksumStreamOutput; -import org.opensearch.core.common.io.stream.InputStreamStreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.common.io.Streams; +import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.BlobPathParameters; +import org.opensearch.core.compress.Compressor; import org.opensearch.core.index.Index; +import org.opensearch.gateway.remote.ClusterMetadataManifest; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.repositories.blobstore.ChecksumWritableBlobStoreFormat; -import java.io.EOFException; import java.io.IOException; import java.io.InputStream; +import java.util.List; + +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; /** * Remote store object for IndexRoutingTable */ -public class RemoteIndexRoutingTable implements Writeable { +public class RemoteIndexRoutingTable extends AbstractRemoteWritableBlobEntity { - private final IndexRoutingTable indexRoutingTable; + public static final String INDEX_ROUTING_TABLE = "index-routing"; + public static final String INDEX_ROUTING_METADATA_PREFIX = "indexRouting--"; + public static final String INDEX_ROUTING_FILE = "index_routing"; + private IndexRoutingTable indexRoutingTable; + private final Index index; + private long term; + private long version; + private BlobPathParameters blobPathParameters; + public static final ChecksumWritableBlobStoreFormat INDEX_ROUTING_TABLE_FORMAT = + new ChecksumWritableBlobStoreFormat<>("index-routing-table", IndexRoutingTable::readFrom); - public RemoteIndexRoutingTable(IndexRoutingTable indexRoutingTable) { + public RemoteIndexRoutingTable( + IndexRoutingTable indexRoutingTable, + String clusterUUID, + Compressor compressor, + long term, + long version + ) { + super(clusterUUID, compressor); + this.index = indexRoutingTable.getIndex(); this.indexRoutingTable = indexRoutingTable; + this.term = term; + this.version = version; } /** * Reads data from inputStream and creates RemoteIndexRoutingTable object with the {@link IndexRoutingTable} - * @param inputStream input stream with index routing data - * @param index index for the current routing data - * @throws IOException exception thrown on failing to read from stream. + * @param blobName name of the blob, which contains the index routing data + * @param clusterUUID UUID of the cluster + * @param compressor Compressor object */ - public RemoteIndexRoutingTable(InputStream inputStream, Index index) throws IOException { - try { - try (BufferedChecksumStreamInput in = new BufferedChecksumStreamInput(new InputStreamStreamInput(inputStream), "assertion")) { - // Read the Table Header first and confirm the index - IndexRoutingTableHeader indexRoutingTableHeader = new IndexRoutingTableHeader(in); - assert indexRoutingTableHeader.getIndexName().equals(index.getName()); + public RemoteIndexRoutingTable(String blobName, String clusterUUID, Compressor compressor) { + super(clusterUUID, compressor); + this.index = null; + this.term = -1; + this.version = -1; + this.blobName = blobName; + } - int numberOfShardRouting = in.readVInt(); - IndexRoutingTable.Builder indicesRoutingTable = IndexRoutingTable.builder(index); - for (int idx = 0; idx < numberOfShardRouting; idx++) { - IndexShardRoutingTable indexShardRoutingTable = IndexShardRoutingTable.Builder.readFrom(in); - indicesRoutingTable.addIndexShard(indexShardRoutingTable); - } - verifyCheckSum(in); - indexRoutingTable = indicesRoutingTable.build(); - } - } catch (EOFException e) { - throw new IOException("Indices Routing table is corrupted", e); + @Override + public BlobPathParameters getBlobPathParameters() { + if (blobPathParameters == null) { + blobPathParameters = new BlobPathParameters(List.of(indexRoutingTable.getIndex().getUUID()), INDEX_ROUTING_FILE); } + return blobPathParameters; } - public IndexRoutingTable getIndexRoutingTable() { - return indexRoutingTable; + @Override + public String getType() { + return INDEX_ROUTING_TABLE; } - /** - * Writes {@link IndexRoutingTable} to the given stream - * @param streamOutput output stream to write - * @throws IOException exception thrown on failing to write to stream. - */ @Override - public void writeTo(StreamOutput streamOutput) throws IOException { - try { - BufferedChecksumStreamOutput out = new BufferedChecksumStreamOutput(streamOutput); - IndexRoutingTableHeader indexRoutingTableHeader = new IndexRoutingTableHeader(indexRoutingTable.getIndex().getName()); - indexRoutingTableHeader.writeTo(out); - out.writeVInt(indexRoutingTable.shards().size()); - for (IndexShardRoutingTable next : indexRoutingTable) { - IndexShardRoutingTable.Builder.writeTo(next, out); - } - out.writeLong(out.getChecksum()); - out.flush(); - } catch (IOException e) { - throw new IOException("Failed to write IndexRoutingTable to stream", e); + public String generateBlobFileName() { + if (blobFileName == null) { + blobFileName = String.join( + DELIMITER, + getBlobPathParameters().getFilePrefix(), + RemoteStoreUtils.invertLong(term), + RemoteStoreUtils.invertLong(version), + RemoteStoreUtils.invertLong(System.currentTimeMillis()) + ); } + return blobFileName; } - private void verifyCheckSum(BufferedChecksumStreamInput in) throws IOException { - long expectedChecksum = in.getChecksum(); - long readChecksum = in.readLong(); - if (readChecksum != expectedChecksum) { - throw new IOException( - "checksum verification failed - expected: 0x" - + Long.toHexString(expectedChecksum) - + ", got: 0x" - + Long.toHexString(readChecksum) - ); - } + @Override + public ClusterMetadataManifest.UploadedMetadata getUploadedMetadata() { + assert blobName != null; + assert index != null; + return new ClusterMetadataManifest.UploadedIndexMetadata(index.getName(), index.getUUID(), blobName, INDEX_ROUTING_METADATA_PREFIX); + } + + @Override + public InputStream serialize() throws IOException { + return INDEX_ROUTING_TABLE_FORMAT.serialize(indexRoutingTable, generateBlobFileName(), getCompressor()).streamInput(); + } + + @Override + public IndexRoutingTable deserialize(InputStream in) throws IOException { + return INDEX_ROUTING_TABLE_FORMAT.deserialize(blobName, Streams.readFully(in)); } } diff --git a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesWriter.java b/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesWriter.java index 75bbf78dbdad2..3753b20a8bea3 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesWriter.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesWriter.java @@ -14,6 +14,7 @@ import org.apache.lucene.index.MergeState; import org.apache.lucene.index.SegmentWriteState; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.compositeindex.datacube.startree.builder.StarTreesBuilder; import org.opensearch.index.mapper.CompositeMappedFieldType; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.StarTreeMapper; @@ -98,7 +99,9 @@ private void createCompositeIndicesIfPossible(DocValuesProducer valuesProducer, if (compositeFieldSet.isEmpty()) { for (CompositeMappedFieldType mappedType : compositeMappedFieldTypes) { if (mappedType instanceof StarTreeMapper.StarTreeFieldType) { - // TODO : Call StarTree builder + try (StarTreesBuilder starTreesBuilder = new StarTreesBuilder(fieldProducerMap, state, mapperService)) { + starTreesBuilder.build(); + } } } } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeDocument.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeDocument.java new file mode 100644 index 0000000000000..0ce2b3a5cdac5 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeDocument.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.util.Arrays; + +/** + * Star tree document + * + * @opensearch.experimental + */ +@ExperimentalApi +public class StarTreeDocument { + public final Long[] dimensions; + public final Object[] metrics; + + public StarTreeDocument(Long[] dimensions, Object[] metrics) { + this.dimensions = dimensions; + this.metrics = metrics; + } + + @Override + public String toString() { + return Arrays.toString(dimensions) + " | " + Arrays.toString(metrics); + } +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregator.java new file mode 100644 index 0000000000000..d72f4a292dc0a --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregator.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.index.compositeindex.datacube.startree.aggregators; + +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.startree.aggregators.numerictype.StarTreeNumericType; + +/** + * Count value aggregator for star tree + * + * @opensearch.experimental + */ +public class CountValueAggregator implements ValueAggregator { + public static final StarTreeNumericType VALUE_AGGREGATOR_TYPE = StarTreeNumericType.LONG; + public static final long DEFAULT_INITIAL_VALUE = 1L; + + @Override + public MetricStat getAggregationType() { + return MetricStat.COUNT; + } + + @Override + public StarTreeNumericType getAggregatedValueType() { + return VALUE_AGGREGATOR_TYPE; + } + + @Override + public Long getInitialAggregatedValueForSegmentDocValue(Long segmentDocValue, StarTreeNumericType starTreeNumericType) { + return DEFAULT_INITIAL_VALUE; + } + + @Override + public Long mergeAggregatedValueAndSegmentValue(Long value, Long segmentDocValue, StarTreeNumericType starTreeNumericType) { + return value + 1; + } + + @Override + public Long mergeAggregatedValues(Long value, Long aggregatedValue) { + return value + aggregatedValue; + } + + @Override + public Long getInitialAggregatedValue(Long value) { + return value; + } + + @Override + public int getMaxAggregatedValueByteSize() { + return Long.BYTES; + } + + @Override + public Long toLongValue(Long value) { + return value; + } + + @Override + public Long toStarTreeNumericTypeValue(Long value, StarTreeNumericType type) { + return value; + } +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/MetricAggregatorInfo.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/MetricAggregatorInfo.java new file mode 100644 index 0000000000000..46f1b1ac11063 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/MetricAggregatorInfo.java @@ -0,0 +1,130 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.index.compositeindex.datacube.startree.aggregators; + +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.startree.aggregators.numerictype.StarTreeNumericType; +import org.opensearch.index.compositeindex.datacube.startree.utils.SequentialDocValuesIterator; +import org.opensearch.index.fielddata.IndexNumericFieldData; + +import java.util.Comparator; +import java.util.Objects; + +/** + * Builds aggregation function and doc values field pair to support various aggregations + * + * @opensearch.experimental + */ +public class MetricAggregatorInfo implements Comparable { + + public static final String DELIMITER = "_"; + private final String metric; + private final String starFieldName; + private final MetricStat metricStat; + private final String field; + private final ValueAggregator valueAggregators; + private final StarTreeNumericType starTreeNumericType; + private final SequentialDocValuesIterator metricStatReader; + + /** + * Constructor for MetricAggregatorInfo + */ + public MetricAggregatorInfo( + MetricStat metricStat, + String field, + String starFieldName, + IndexNumericFieldData.NumericType numericType, + SequentialDocValuesIterator metricStatReader + ) { + this.metricStat = metricStat; + this.valueAggregators = ValueAggregatorFactory.getValueAggregator(metricStat); + this.starTreeNumericType = StarTreeNumericType.fromNumericType(numericType); + this.metricStatReader = metricStatReader; + this.field = field; + this.starFieldName = starFieldName; + this.metric = toFieldName(); + } + + /** + * @return metric type + */ + public MetricStat getMetricStat() { + return metricStat; + } + + /** + * @return field Name + */ + public String getField() { + return field; + } + + /** + * @return the metric stat name + */ + public String getMetric() { + return metric; + } + + /** + * @return aggregator for the field value + */ + public ValueAggregator getValueAggregators() { + return valueAggregators; + } + + /** + * @return star tree aggregated value type + */ + public StarTreeNumericType getAggregatedValueType() { + return starTreeNumericType; + } + + /** + * @return metric value reader iterator + */ + public SequentialDocValuesIterator getMetricStatReader() { + return metricStatReader; + } + + /** + * @return field name with metric type and field + */ + public String toFieldName() { + return starFieldName + DELIMITER + field + DELIMITER + metricStat.getTypeName(); + } + + @Override + public int hashCode() { + return Objects.hashCode(toFieldName()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj instanceof MetricAggregatorInfo) { + MetricAggregatorInfo anotherPair = (MetricAggregatorInfo) obj; + return metricStat == anotherPair.metricStat && field.equals(anotherPair.field); + } + return false; + } + + @Override + public String toString() { + return toFieldName(); + } + + @Override + public int compareTo(MetricAggregatorInfo other) { + return Comparator.comparing((MetricAggregatorInfo o) -> o.field) + .thenComparing((MetricAggregatorInfo o) -> o.metricStat) + .compare(this, other); + } +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/SumValueAggregator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/SumValueAggregator.java new file mode 100644 index 0000000000000..543b0f7f42374 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/SumValueAggregator.java @@ -0,0 +1,97 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.index.compositeindex.datacube.startree.aggregators; + +import org.apache.lucene.util.NumericUtils; +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.startree.aggregators.numerictype.StarTreeNumericType; +import org.opensearch.search.aggregations.metrics.CompensatedSum; + +/** + * Sum value aggregator for star tree + * + * @opensearch.experimental + */ +public class SumValueAggregator implements ValueAggregator { + + public static final StarTreeNumericType VALUE_AGGREGATOR_TYPE = StarTreeNumericType.DOUBLE; + private double sum = 0; + private double compensation = 0; + private CompensatedSum kahanSummation = new CompensatedSum(0, 0); + + @Override + public MetricStat getAggregationType() { + return MetricStat.SUM; + } + + @Override + public StarTreeNumericType getAggregatedValueType() { + return VALUE_AGGREGATOR_TYPE; + } + + @Override + public Double getInitialAggregatedValueForSegmentDocValue(Long segmentDocValue, StarTreeNumericType starTreeNumericType) { + kahanSummation.reset(0, 0); + kahanSummation.add(starTreeNumericType.getDoubleValue(segmentDocValue)); + compensation = kahanSummation.delta(); + sum = kahanSummation.value(); + return kahanSummation.value(); + } + + @Override + public Double mergeAggregatedValueAndSegmentValue(Double value, Long segmentDocValue, StarTreeNumericType starTreeNumericType) { + assert kahanSummation.value() == value; + kahanSummation.reset(sum, compensation); + kahanSummation.add(starTreeNumericType.getDoubleValue(segmentDocValue)); + compensation = kahanSummation.delta(); + sum = kahanSummation.value(); + return kahanSummation.value(); + } + + @Override + public Double mergeAggregatedValues(Double value, Double aggregatedValue) { + assert kahanSummation.value() == aggregatedValue; + kahanSummation.reset(sum, compensation); + kahanSummation.add(value); + compensation = kahanSummation.delta(); + sum = kahanSummation.value(); + return kahanSummation.value(); + } + + @Override + public Double getInitialAggregatedValue(Double value) { + kahanSummation.reset(0, 0); + kahanSummation.add(value); + compensation = kahanSummation.delta(); + sum = kahanSummation.value(); + return kahanSummation.value(); + } + + @Override + public int getMaxAggregatedValueByteSize() { + return Double.BYTES; + } + + @Override + public Long toLongValue(Double value) { + try { + return NumericUtils.doubleToSortableLong(value); + } catch (Exception e) { + throw new IllegalStateException("Cannot convert " + value + " to sortable long", e); + } + } + + @Override + public Double toStarTreeNumericTypeValue(Long value, StarTreeNumericType type) { + try { + return type.getDoubleValue(value); + } catch (Exception e) { + throw new IllegalStateException("Cannot convert " + value + " to sortable aggregation type", e); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregator.java new file mode 100644 index 0000000000000..3dd1f85845c17 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregator.java @@ -0,0 +1,64 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.index.compositeindex.datacube.startree.aggregators; + +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.startree.aggregators.numerictype.StarTreeNumericType; + +/** + * A value aggregator that pre-aggregates on the input values for a specific type of aggregation. + * + * @opensearch.experimental + */ +public interface ValueAggregator { + + /** + * Returns the type of the aggregation. + */ + MetricStat getAggregationType(); + + /** + * Returns the data type of the aggregated value. + */ + StarTreeNumericType getAggregatedValueType(); + + /** + * Returns the initial aggregated value. + */ + A getInitialAggregatedValueForSegmentDocValue(Long segmentDocValue, StarTreeNumericType starTreeNumericType); + + /** + * Applies a segment doc value to the current aggregated value. + */ + A mergeAggregatedValueAndSegmentValue(A value, Long segmentDocValue, StarTreeNumericType starTreeNumericType); + + /** + * Applies an aggregated value to the current aggregated value. + */ + A mergeAggregatedValues(A value, A aggregatedValue); + + /** + * Clones an aggregated value. + */ + A getInitialAggregatedValue(A value); + + /** + * Returns the maximum size in bytes of the aggregated values seen so far. + */ + int getMaxAggregatedValueByteSize(); + + /** + * Converts an aggregated value into a Long type. + */ + Long toLongValue(A value); + + /** + * Converts an aggregated value from a Long type. + */ + A toStarTreeNumericTypeValue(Long rawValue, StarTreeNumericType type); +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactory.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactory.java new file mode 100644 index 0000000000000..4ee0b0b5b13f8 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactory.java @@ -0,0 +1,56 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.index.compositeindex.datacube.startree.aggregators; + +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.startree.aggregators.numerictype.StarTreeNumericType; + +/** + * Value aggregator factory for a given aggregation type + * + * @opensearch.experimental + */ +public class ValueAggregatorFactory { + private ValueAggregatorFactory() {} + + /** + * Returns a new instance of value aggregator for the given aggregation type. + * + * @param aggregationType Aggregation type + * @return Value aggregator + */ + public static ValueAggregator getValueAggregator(MetricStat aggregationType) { + switch (aggregationType) { + // other metric types (count, min, max, avg) will be supported in the future + case SUM: + return new SumValueAggregator(); + case COUNT: + return new CountValueAggregator(); + default: + throw new IllegalStateException("Unsupported aggregation type: " + aggregationType); + } + } + + /** + * Returns the data type of the aggregated value for the given aggregation type. + * + * @param aggregationType Aggregation type + * @return Data type of the aggregated value + */ + public static StarTreeNumericType getAggregatedValueType(MetricStat aggregationType) { + switch (aggregationType) { + // other metric types (count, min, max, avg) will be supported in the future + case SUM: + return SumValueAggregator.VALUE_AGGREGATOR_TYPE; + case COUNT: + return CountValueAggregator.VALUE_AGGREGATOR_TYPE; + default: + throw new IllegalStateException("Unsupported aggregation type: " + aggregationType); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/numerictype/StarTreeNumericType.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/numerictype/StarTreeNumericType.java new file mode 100644 index 0000000000000..57fe573a6a93c --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/numerictype/StarTreeNumericType.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.aggregators.numerictype; + +import org.opensearch.index.fielddata.IndexNumericFieldData; + +import java.util.function.Function; + +/** + * Enum to map Star Tree Numeric Types to Lucene's Numeric Type + * + * @opensearch.experimental + */ +public enum StarTreeNumericType { + + // TODO: Handle scaled floats + HALF_FLOAT(IndexNumericFieldData.NumericType.HALF_FLOAT, StarTreeNumericTypeConverters::halfFloatPointToDouble), + FLOAT(IndexNumericFieldData.NumericType.FLOAT, StarTreeNumericTypeConverters::floatPointToDouble), + LONG(IndexNumericFieldData.NumericType.LONG, StarTreeNumericTypeConverters::longToDouble), + DOUBLE(IndexNumericFieldData.NumericType.DOUBLE, StarTreeNumericTypeConverters::sortableLongtoDouble), + INT(IndexNumericFieldData.NumericType.INT, StarTreeNumericTypeConverters::intToDouble), + SHORT(IndexNumericFieldData.NumericType.SHORT, StarTreeNumericTypeConverters::shortToDouble), + BYTE(IndexNumericFieldData.NumericType.BYTE, StarTreeNumericTypeConverters::bytesToDouble), + UNSIGNED_LONG(IndexNumericFieldData.NumericType.UNSIGNED_LONG, StarTreeNumericTypeConverters::unsignedlongToDouble); + + final IndexNumericFieldData.NumericType numericType; + final Function converter; + + StarTreeNumericType(IndexNumericFieldData.NumericType numericType, Function converter) { + this.numericType = numericType; + this.converter = converter; + } + + public double getDoubleValue(long rawValue) { + return this.converter.apply(rawValue); + } + + public static StarTreeNumericType fromNumericType(IndexNumericFieldData.NumericType numericType) { + switch (numericType) { + case HALF_FLOAT: + return StarTreeNumericType.HALF_FLOAT; + case FLOAT: + return StarTreeNumericType.FLOAT; + case LONG: + return StarTreeNumericType.LONG; + case DOUBLE: + return StarTreeNumericType.DOUBLE; + case INT: + return StarTreeNumericType.INT; + case SHORT: + return StarTreeNumericType.SHORT; + case UNSIGNED_LONG: + return StarTreeNumericType.UNSIGNED_LONG; + case BYTE: + return StarTreeNumericType.BYTE; + default: + throw new UnsupportedOperationException("Unknown numeric type [" + numericType + "]"); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/numerictype/StarTreeNumericTypeConverters.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/numerictype/StarTreeNumericTypeConverters.java new file mode 100644 index 0000000000000..eb7647c4f9851 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/numerictype/StarTreeNumericTypeConverters.java @@ -0,0 +1,58 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.aggregators.numerictype; + +import org.apache.lucene.sandbox.document.HalfFloatPoint; +import org.apache.lucene.util.NumericUtils; +import org.opensearch.common.Numbers; +import org.opensearch.common.annotation.ExperimentalApi; + +/** + * Numeric converters used during aggregations of metric values + * + * @opensearch.experimental + */ +@ExperimentalApi +public class StarTreeNumericTypeConverters { + + public static double halfFloatPointToDouble(Long value) { + return HalfFloatPoint.sortableShortToHalfFloat((short) value.longValue()); + } + + public static double floatPointToDouble(Long value) { + return NumericUtils.sortableIntToFloat((int) value.longValue()); + } + + public static double longToDouble(Long value) { + return (double) value; + } + + public static double intToDouble(Long value) { + return (double) value; + } + + public static double shortToDouble(Long value) { + return (double) value; + } + + public static Double sortableLongtoDouble(Long value) { + return NumericUtils.sortableLongToDouble(value); + } + + public static double unsignedlongToDouble(Long value) { + return Numbers.unsignedLongToDouble(value); + } + + public static double bytesToDouble(Long value) { + byte[] bytes = new byte[8]; + NumericUtils.longToSortableBytes(value, bytes, 0); + return NumericUtils.sortableLongToDouble(NumericUtils.sortableBytesToLong(bytes, 0)); + } + +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/numerictype/package-info.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/numerictype/package-info.java new file mode 100644 index 0000000000000..fe5c2a7ceb254 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/numerictype/package-info.java @@ -0,0 +1,14 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Numeric Types for Composite Index Star Tree + * + * @opensearch.experimental + */ +package org.opensearch.index.compositeindex.datacube.startree.aggregators.numerictype; diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/package-info.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/package-info.java new file mode 100644 index 0000000000000..bddd6a46fbbe8 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/package-info.java @@ -0,0 +1,14 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Aggregators for Composite Index Star Tree + * + * @opensearch.experimental + */ +package org.opensearch.index.compositeindex.datacube.startree.aggregators; diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java new file mode 100644 index 0000000000000..0a363bfad8fe1 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java @@ -0,0 +1,668 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.index.compositeindex.datacube.startree.builder; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.SegmentWriteState; +import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.Metric; +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; +import org.opensearch.index.compositeindex.datacube.startree.aggregators.MetricAggregatorInfo; +import org.opensearch.index.compositeindex.datacube.startree.aggregators.ValueAggregator; +import org.opensearch.index.compositeindex.datacube.startree.aggregators.numerictype.StarTreeNumericType; +import org.opensearch.index.compositeindex.datacube.startree.utils.SequentialDocValuesIterator; +import org.opensearch.index.compositeindex.datacube.startree.utils.TreeNode; +import org.opensearch.index.fielddata.IndexNumericFieldData; +import org.opensearch.index.mapper.Mapper; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.NumberFieldMapper; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.opensearch.index.compositeindex.datacube.startree.utils.TreeNode.ALL; + +/** + * Builder for star tree. Defines the algorithm to construct star-tree + * See {@link StarTreesBuilder} for information around the construction of star-trees based on star-tree fields + * + * @opensearch.experimental + */ +public abstract class BaseStarTreeBuilder implements StarTreeBuilder { + + private static final Logger logger = LogManager.getLogger(BaseStarTreeBuilder.class); + + /** + * Default value for star node + */ + public static final int STAR_IN_DOC_VALUES_INDEX = -1; + + protected final Set skipStarNodeCreationForDimensions; + + protected final List metricAggregatorInfos; + protected final int numMetrics; + protected final int numDimensions; + protected int numStarTreeDocs; + protected int totalSegmentDocs; + protected int numStarTreeNodes; + protected final int maxLeafDocuments; + + protected final TreeNode rootNode = getNewNode(); + + protected SequentialDocValuesIterator[] dimensionReaders; + + // We do not close these producers as they are empty doc value producers (where close() is unsupported) + protected Map fieldProducerMap; + + private final StarTreeDocValuesIteratorAdapter starTreeDocValuesIteratorAdapter; + private final StarTreeField starTreeField; + + /** + * Reads all the configuration related to dimensions and metrics, builds a star-tree based on the different construction parameters. + * + * @param starTreeField holds the configuration for the star tree + * @param fieldProducerMap helps return the doc values iterator for each type based on field name + * @param state stores the segment write state + * @param mapperService helps to find the original type of the field + */ + protected BaseStarTreeBuilder( + StarTreeField starTreeField, + Map fieldProducerMap, + SegmentWriteState state, + MapperService mapperService + ) throws IOException { + + logger.debug("Building in base star tree builder"); + + this.starTreeField = starTreeField; + StarTreeFieldConfiguration starTreeFieldSpec = starTreeField.getStarTreeConfig(); + this.fieldProducerMap = fieldProducerMap; + this.starTreeDocValuesIteratorAdapter = new StarTreeDocValuesIteratorAdapter(); + + List dimensionsSplitOrder = starTreeField.getDimensionsOrder(); + this.numDimensions = dimensionsSplitOrder.size(); + + this.skipStarNodeCreationForDimensions = new HashSet<>(); + this.totalSegmentDocs = state.segmentInfo.maxDoc(); + this.dimensionReaders = new SequentialDocValuesIterator[numDimensions]; + Set skipStarNodeCreationForDimensions = starTreeFieldSpec.getSkipStarNodeCreationInDims(); + + for (int i = 0; i < numDimensions; i++) { + String dimension = dimensionsSplitOrder.get(i).getField(); + if (skipStarNodeCreationForDimensions.contains(dimensionsSplitOrder.get(i).getField())) { + this.skipStarNodeCreationForDimensions.add(i); + } + FieldInfo dimensionFieldInfos = state.fieldInfos.fieldInfo(dimension); + DocValuesType dimensionDocValuesType = dimensionFieldInfos.getDocValuesType(); + dimensionReaders[i] = starTreeDocValuesIteratorAdapter.getDocValuesIterator( + dimensionDocValuesType, + dimensionFieldInfos, + fieldProducerMap.get(dimensionFieldInfos.name) + ); + } + + this.metricAggregatorInfos = generateMetricAggregatorInfos(mapperService, state); + this.numMetrics = metricAggregatorInfos.size(); + this.maxLeafDocuments = starTreeFieldSpec.maxLeafDocs(); + } + + /** + * Generates the configuration required to perform aggregation for all the metrics on a field + * + * @return list of MetricAggregatorInfo + */ + public List generateMetricAggregatorInfos(MapperService mapperService, SegmentWriteState state) + throws IOException { + List metricAggregatorInfos = new ArrayList<>(); + for (Metric metric : this.starTreeField.getMetrics()) { + for (MetricStat metricStat : metric.getMetrics()) { + IndexNumericFieldData.NumericType numericType; + SequentialDocValuesIterator metricStatReader; + Mapper fieldMapper = mapperService.documentMapper().mappers().getMapper(metric.getField()); + if (fieldMapper instanceof NumberFieldMapper) { + numericType = ((NumberFieldMapper) fieldMapper).fieldType().numericType(); + } else { + logger.error("unsupported mapper type"); + throw new IllegalStateException("unsupported mapper type"); + } + + FieldInfo metricFieldInfos = state.fieldInfos.fieldInfo(metric.getField()); + DocValuesType metricDocValuesType = metricFieldInfos.getDocValuesType(); + if (metricStat != MetricStat.COUNT) { + metricStatReader = starTreeDocValuesIteratorAdapter.getDocValuesIterator( + metricDocValuesType, + metricFieldInfos, + fieldProducerMap.get(metricFieldInfos.name) + ); + } else { + metricStatReader = new SequentialDocValuesIterator(); + } + + MetricAggregatorInfo metricAggregatorInfo = new MetricAggregatorInfo( + metricStat, + metric.getField(), + starTreeField.getName(), + numericType, + metricStatReader + ); + metricAggregatorInfos.add(metricAggregatorInfo); + } + } + return metricAggregatorInfos; + } + + /** + * Adds a document to the star-tree. + * + * @param starTreeDocument star tree document to be added + * @throws IOException if an I/O error occurs while adding the document + */ + public abstract void appendStarTreeDocument(StarTreeDocument starTreeDocument) throws IOException; + + /** + * Returns the document of the given document id in the star-tree. + * + * @param docId document id + * @return star tree document + * @throws IOException if an I/O error occurs while fetching the star-tree document + */ + public abstract StarTreeDocument getStarTreeDocument(int docId) throws IOException; + + /** + * Retrieves the list of star-tree documents in the star-tree. + * + * @return Star tree documents + */ + public abstract List getStarTreeDocuments(); + + /** + * Returns the value of the dimension for the given dimension id and document in the star-tree. + * + * @param docId document id + * @param dimensionId dimension id + * @return dimension value + */ + public abstract Long getDimensionValue(int docId, int dimensionId) throws IOException; + + /** + * Sorts and aggregates the star-tree document in the segment, and returns a star-tree document iterator for all the + * aggregated star-tree document. + * + * @return Iterator for the aggregated star-tree document + */ + public abstract Iterator sortAndAggregateStarTreeDocuments() throws IOException; + + /** + * Generates aggregated star-tree documents for star-node. + * + * @param startDocId start document id (inclusive) in the star-tree + * @param endDocId end document id (exclusive) in the star-tree + * @param dimensionId dimension id of the star-node + * @return Iterator for the aggregated star-tree documents + */ + public abstract Iterator generateStarTreeDocumentsForStarNode(int startDocId, int endDocId, int dimensionId) + throws IOException; + + /** + * Returns the star-tree document from the segment + * + * @throws IOException when we are unable to build a star tree document from the segment + */ + protected StarTreeDocument getSegmentStarTreeDocument(int currentDocId) throws IOException { + Long[] dimensions = getStarTreeDimensionsFromSegment(currentDocId); + Object[] metrics = getStarTreeMetricsFromSegment(currentDocId); + return new StarTreeDocument(dimensions, metrics); + } + + /** + * Returns the dimension values for the next document from the segment + * + * @return dimension values for each of the star-tree dimension + * @throws IOException when we are unable to iterate to the next doc for the given dimension readers + */ + private Long[] getStarTreeDimensionsFromSegment(int currentDocId) throws IOException { + Long[] dimensions = new Long[numDimensions]; + for (int i = 0; i < numDimensions; i++) { + try { + dimensions[i] = getValuesFromSegment(dimensionReaders[i], currentDocId); + } catch (Exception e) { + logger.error("unable to read the dimension values from the segment", e); + throw new IllegalStateException("unable to read the dimension values from the segment", e); + } + + } + return dimensions; + } + + /** + * Returns the next value from the iterator of respective field + * + * @param iterator respective field iterator + * @param currentDocId current document id + * @return the next value for the field + * @throws IOException when we are unable to iterate to the next doc for the given iterator + */ + private Long getValuesFromSegment(SequentialDocValuesIterator iterator, int currentDocId) throws IOException { + try { + starTreeDocValuesIteratorAdapter.nextDoc(iterator, currentDocId); + } catch (IOException e) { + logger.error("unable to iterate to next doc", e); + throw new RuntimeException("unable to iterate to next doc", e); + } + return starTreeDocValuesIteratorAdapter.getNextValue(iterator, currentDocId); + } + + /** + * Returns the metric values for the next document from the segment + * + * @return metric values for each of the star-tree metric + * @throws IOException when we are unable to iterate to the next doc for the given metric readers + */ + private Object[] getStarTreeMetricsFromSegment(int currentDocId) throws IOException { + Object[] metrics = new Object[numMetrics]; + for (int i = 0; i < numMetrics; i++) { + SequentialDocValuesIterator metricStatReader = metricAggregatorInfos.get(i).getMetricStatReader(); + if (metricStatReader != null) { + try { + metrics[i] = getValuesFromSegment(metricStatReader, currentDocId); + } catch (Exception e) { + logger.error("unable to read the metric values from the segment", e); + throw new IllegalStateException("unable to read the metric values from the segment", e); + } + } else { + throw new IllegalStateException("metric readers are empty"); + } + } + return metrics; + } + + /** + * Merges a star-tree document from the segment into an aggregated star-tree document. + * A new aggregated star-tree document is created if the aggregated segment document is null. + * + * @param aggregatedSegmentDocument aggregated star-tree document + * @param segmentDocument segment star-tree document + * @return merged star-tree document + */ + @SuppressWarnings({ "unchecked", "rawtypes" }) + protected StarTreeDocument reduceSegmentStarTreeDocuments( + StarTreeDocument aggregatedSegmentDocument, + StarTreeDocument segmentDocument + ) { + if (aggregatedSegmentDocument == null) { + Long[] dimensions = Arrays.copyOf(segmentDocument.dimensions, numDimensions); + Object[] metrics = new Object[numMetrics]; + for (int i = 0; i < numMetrics; i++) { + try { + ValueAggregator metricValueAggregator = metricAggregatorInfos.get(i).getValueAggregators(); + StarTreeNumericType starTreeNumericType = metricAggregatorInfos.get(i).getAggregatedValueType(); + metrics[i] = metricValueAggregator.getInitialAggregatedValueForSegmentDocValue( + getLong(segmentDocument.metrics[i]), + starTreeNumericType + ); + } catch (Exception e) { + logger.error("Cannot parse initial segment doc value", e); + throw new IllegalStateException("Cannot parse initial segment doc value [" + segmentDocument.metrics[i] + "]"); + } + } + return new StarTreeDocument(dimensions, metrics); + } else { + for (int i = 0; i < numMetrics; i++) { + try { + ValueAggregator metricValueAggregator = metricAggregatorInfos.get(i).getValueAggregators(); + StarTreeNumericType starTreeNumericType = metricAggregatorInfos.get(i).getAggregatedValueType(); + aggregatedSegmentDocument.metrics[i] = metricValueAggregator.mergeAggregatedValueAndSegmentValue( + aggregatedSegmentDocument.metrics[i], + getLong(segmentDocument.metrics[i]), + starTreeNumericType + ); + } catch (Exception e) { + logger.error("Cannot apply segment doc value for aggregation", e); + throw new IllegalStateException("Cannot apply segment doc value for aggregation [" + segmentDocument.metrics[i] + "]"); + } + } + return aggregatedSegmentDocument; + } + } + + /** + * Safely converts the metric value of object type to long. + * + * @param metric value of the metric + * @return converted metric value to long + */ + private static long getLong(Object metric) { + + Long metricValue = null; + try { + if (metric instanceof Long) { + metricValue = (long) metric; + } else if (metric != null) { + metricValue = Long.valueOf(String.valueOf(metric)); + } + } catch (Exception e) { + throw new IllegalStateException("unable to cast segment metric", e); + } + + if (metricValue == null) { + throw new IllegalStateException("unable to cast segment metric"); + } + return metricValue; + } + + /** + * Merges a star-tree document into an aggregated star-tree document. + * A new aggregated star-tree document is created if the aggregated document is null. + * + * @param aggregatedDocument aggregated star-tree document + * @param starTreeDocument segment star-tree document + * @return merged star-tree document + */ + @SuppressWarnings("unchecked") + public StarTreeDocument reduceStarTreeDocuments(StarTreeDocument aggregatedDocument, StarTreeDocument starTreeDocument) { + // aggregate the documents + if (aggregatedDocument == null) { + Long[] dimensions = Arrays.copyOf(starTreeDocument.dimensions, numDimensions); + Object[] metrics = new Object[numMetrics]; + for (int i = 0; i < numMetrics; i++) { + try { + metrics[i] = metricAggregatorInfos.get(i).getValueAggregators().getInitialAggregatedValue(starTreeDocument.metrics[i]); + } catch (Exception e) { + logger.error("Cannot get value for aggregation", e); + throw new IllegalStateException("Cannot get value for aggregation[" + starTreeDocument.metrics[i] + "]"); + } + } + return new StarTreeDocument(dimensions, metrics); + } else { + for (int i = 0; i < numMetrics; i++) { + try { + aggregatedDocument.metrics[i] = metricAggregatorInfos.get(i) + .getValueAggregators() + .mergeAggregatedValues(starTreeDocument.metrics[i], aggregatedDocument.metrics[i]); + } catch (Exception e) { + logger.error("Cannot apply value to aggregated document for aggregation", e); + throw new IllegalStateException( + "Cannot apply value to aggregated document for aggregation [" + starTreeDocument.metrics[i] + "]" + ); + } + } + return aggregatedDocument; + } + } + + /** + * Builds the star tree using total segment documents + * + * @throws IOException when we are unable to build star-tree + */ + public void build() throws IOException { + long startTime = System.currentTimeMillis(); + logger.debug("Star-tree build is a go with star tree field {}", starTreeField.getName()); + + if (totalSegmentDocs == 0) { + logger.debug("No documents found in the segment"); + return; + } + + Iterator starTreeDocumentIterator = sortAndAggregateStarTreeDocuments(); + logger.debug("Sorting and aggregating star-tree in ms : {}", (System.currentTimeMillis() - startTime)); + build(starTreeDocumentIterator); + logger.debug("Finished Building star-tree in ms : {}", (System.currentTimeMillis() - startTime)); + } + + /** + * Builds the star tree using Star-Tree Document + * + * @param starTreeDocumentIterator contains the sorted and aggregated documents + * @throws IOException when we are unable to build star-tree + */ + void build(Iterator starTreeDocumentIterator) throws IOException { + int numSegmentStarTreeDocument = totalSegmentDocs; + + while (starTreeDocumentIterator.hasNext()) { + appendToStarTree(starTreeDocumentIterator.next()); + } + int numStarTreeDocument = numStarTreeDocs; + logger.debug("Generated star tree docs : [{}] from segment docs : [{}]", numStarTreeDocument, numSegmentStarTreeDocument); + + if (numStarTreeDocs == 0) { + // TODO: Uncomment when segment codec and file formats is ready + // StarTreeBuilderUtils.serializeTree(indexOutput, rootNode, dimensionsSplitOrder, numNodes); + return; + } + + constructStarTree(rootNode, 0, numStarTreeDocs); + int numStarTreeDocumentUnderStarNode = numStarTreeDocs - numStarTreeDocument; + logger.debug( + "Finished constructing star-tree, got [ {} ] tree nodes and [ {} ] starTreeDocument under star-node", + numStarTreeNodes, + numStarTreeDocumentUnderStarNode + ); + + createAggregatedDocs(rootNode); + int numAggregatedStarTreeDocument = numStarTreeDocs - numStarTreeDocument - numStarTreeDocumentUnderStarNode; + logger.debug("Finished creating aggregated documents : {}", numAggregatedStarTreeDocument); + + // TODO: When StarTree Codec is ready + // Create doc values indices in disk + // Serialize and save in disk + // Write star tree metadata for off heap implementation + + } + + /** + * Adds a document to star-tree + * + * @param starTreeDocument star-tree document + * @throws IOException throws an exception if we are unable to add the doc + */ + private void appendToStarTree(StarTreeDocument starTreeDocument) throws IOException { + appendStarTreeDocument(starTreeDocument); + numStarTreeDocs++; + } + + /** + * Returns a new star-tree node + * + * @return return new star-tree node + */ + private TreeNode getNewNode() { + numStarTreeNodes++; + return new TreeNode(); + } + + /** + * Implements the algorithm to construct a star-tree + * + * @param node star-tree node + * @param startDocId start document id + * @param endDocId end document id + * @throws IOException throws an exception if we are unable to construct the tree + */ + private void constructStarTree(TreeNode node, int startDocId, int endDocId) throws IOException { + + int childDimensionId = node.dimensionId + 1; + if (childDimensionId == numDimensions) { + return; + } + + // Construct all non-star children nodes + node.childDimensionId = childDimensionId; + Map children = constructNonStarNodes(startDocId, endDocId, childDimensionId); + node.children = children; + + // Construct star-node if required + if (!skipStarNodeCreationForDimensions.contains(childDimensionId) && children.size() > 1) { + children.put((long) ALL, constructStarNode(startDocId, endDocId, childDimensionId)); + } + + // Further split on child nodes if required + for (TreeNode child : children.values()) { + if (child.endDocId - child.startDocId > maxLeafDocuments) { + constructStarTree(child, child.startDocId, child.endDocId); + } + } + } + + /** + * Constructs non star tree nodes + * + * @param startDocId start document id (inclusive) + * @param endDocId end document id (exclusive) + * @param dimensionId id of the dimension in the star tree + * @return root node with non-star nodes constructed + * @throws IOException throws an exception if we are unable to construct non-star nodes + */ + private Map constructNonStarNodes(int startDocId, int endDocId, int dimensionId) throws IOException { + Map nodes = new HashMap<>(); + int nodeStartDocId = startDocId; + Long nodeDimensionValue = getDimensionValue(startDocId, dimensionId); + for (int i = startDocId + 1; i < endDocId; i++) { + Long dimensionValue = getDimensionValue(i, dimensionId); + if (!dimensionValue.equals(nodeDimensionValue)) { + TreeNode child = getNewNode(); + child.dimensionId = dimensionId; + child.dimensionValue = nodeDimensionValue; + child.startDocId = nodeStartDocId; + child.endDocId = i; + nodes.put(nodeDimensionValue, child); + + nodeStartDocId = i; + nodeDimensionValue = dimensionValue; + } + } + TreeNode lastNode = getNewNode(); + lastNode.dimensionId = dimensionId; + lastNode.dimensionValue = nodeDimensionValue; + lastNode.startDocId = nodeStartDocId; + lastNode.endDocId = endDocId; + nodes.put(nodeDimensionValue, lastNode); + return nodes; + } + + /** + * Constructs star tree nodes + * + * @param startDocId start document id (inclusive) + * @param endDocId end document id (exclusive) + * @param dimensionId id of the dimension in the star tree + * @return root node with star nodes constructed + * @throws IOException throws an exception if we are unable to construct non-star nodes + */ + private TreeNode constructStarNode(int startDocId, int endDocId, int dimensionId) throws IOException { + TreeNode starNode = getNewNode(); + starNode.dimensionId = dimensionId; + starNode.dimensionValue = ALL; + starNode.isStarNode = true; + starNode.startDocId = numStarTreeDocs; + Iterator starTreeDocumentIterator = generateStarTreeDocumentsForStarNode(startDocId, endDocId, dimensionId); + while (starTreeDocumentIterator.hasNext()) { + appendToStarTree(starTreeDocumentIterator.next()); + } + starNode.endDocId = numStarTreeDocs; + return starNode; + } + + /** + * Returns aggregated star-tree document + * + * @param node star-tree node + * @return aggregated star-tree documents + * @throws IOException throws an exception upon failing to create new aggregated docs based on star tree + */ + private StarTreeDocument createAggregatedDocs(TreeNode node) throws IOException { + StarTreeDocument aggregatedStarTreeDocument = null; + if (node.children == null) { + + // For leaf node + if (node.startDocId == node.endDocId - 1) { + // If it has only one document, use it as the aggregated document + aggregatedStarTreeDocument = getStarTreeDocument(node.startDocId); + node.aggregatedDocId = node.startDocId; + } else { + // If it has multiple documents, aggregate all of them + for (int i = node.startDocId; i < node.endDocId; i++) { + aggregatedStarTreeDocument = reduceStarTreeDocuments(aggregatedStarTreeDocument, getStarTreeDocument(i)); + } + if (null == aggregatedStarTreeDocument) { + throw new IllegalStateException("aggregated star-tree document is null after reducing the documents"); + } + for (int i = node.dimensionId + 1; i < numDimensions; i++) { + aggregatedStarTreeDocument.dimensions[i] = Long.valueOf(STAR_IN_DOC_VALUES_INDEX); + } + node.aggregatedDocId = numStarTreeDocs; + appendToStarTree(aggregatedStarTreeDocument); + } + } else { + // For non-leaf node + if (node.children.containsKey((long) ALL)) { + // If it has star child, use the star child aggregated document directly + for (TreeNode child : node.children.values()) { + if (child.isStarNode) { + aggregatedStarTreeDocument = createAggregatedDocs(child); + node.aggregatedDocId = child.aggregatedDocId; + } else { + createAggregatedDocs(child); + } + } + } else { + // If no star child exists, aggregate all aggregated documents from non-star children + if (node.children.values().size() == 1) { + for (TreeNode child : node.children.values()) { + aggregatedStarTreeDocument = reduceStarTreeDocuments(aggregatedStarTreeDocument, createAggregatedDocs(child)); + node.aggregatedDocId = child.aggregatedDocId; + } + } else { + for (TreeNode child : node.children.values()) { + aggregatedStarTreeDocument = reduceStarTreeDocuments(aggregatedStarTreeDocument, createAggregatedDocs(child)); + } + if (null == aggregatedStarTreeDocument) { + throw new IllegalStateException("aggregated star-tree document is null after reducing the documents"); + } + for (int i = node.dimensionId + 1; i < numDimensions; i++) { + aggregatedStarTreeDocument.dimensions[i] = Long.valueOf(STAR_IN_DOC_VALUES_INDEX); + } + node.aggregatedDocId = numStarTreeDocs; + appendToStarTree(aggregatedStarTreeDocument); + } + } + } + return aggregatedStarTreeDocument; + } + + /** + * Handles the dimension of date time field type + * + * @param fieldName name of the field + * @param val value of the field + * @return returns the converted dimension of the field to a particular granularity + */ + private long handleDateDimension(final String fieldName, final long val) { + // TODO: handle timestamp granularity + return val; + } + + public void close() throws IOException { + + } + +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilder.java new file mode 100644 index 0000000000000..caeb24838da62 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilder.java @@ -0,0 +1,213 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.index.compositeindex.datacube.startree.builder; + +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.index.SegmentWriteState; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; +import org.opensearch.index.mapper.MapperService; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * On heap single tree builder + * + * @opensearch.experimental + */ +@ExperimentalApi +public class OnHeapStarTreeBuilder extends BaseStarTreeBuilder { + + private final List starTreeDocuments = new ArrayList<>(); + + /** + * Constructor for OnHeapStarTreeBuilder + * + * @param starTreeField star-tree field + * @param fieldProducerMap helps with document values producer for a particular field + * @param segmentWriteState segment write state + * @param mapperService helps with the numeric type of field + * @throws IOException throws an exception we are unable to construct an onheap star-tree + */ + public OnHeapStarTreeBuilder( + StarTreeField starTreeField, + Map fieldProducerMap, + SegmentWriteState segmentWriteState, + MapperService mapperService + ) throws IOException { + super(starTreeField, fieldProducerMap, segmentWriteState, mapperService); + } + + @Override + public void appendStarTreeDocument(StarTreeDocument starTreeDocument) throws IOException { + starTreeDocuments.add(starTreeDocument); + } + + @Override + public StarTreeDocument getStarTreeDocument(int docId) throws IOException { + return starTreeDocuments.get(docId); + } + + @Override + public List getStarTreeDocuments() { + return starTreeDocuments; + } + + @Override + public Long getDimensionValue(int docId, int dimensionId) throws IOException { + return starTreeDocuments.get(docId).dimensions[dimensionId]; + } + + @Override + public Iterator sortAndAggregateStarTreeDocuments() throws IOException { + int numDocs = totalSegmentDocs; + StarTreeDocument[] starTreeDocuments = new StarTreeDocument[numDocs]; + for (int currentDocId = 0; currentDocId < numDocs; currentDocId++) { + starTreeDocuments[currentDocId] = getSegmentStarTreeDocument(currentDocId); + } + + return sortAndAggregateStarTreeDocuments(starTreeDocuments); + } + + /** + * Sort, aggregates and merges the star-tree documents + * + * @param starTreeDocuments star-tree documents + * @return iterator for star-tree documents + */ + Iterator sortAndAggregateStarTreeDocuments(StarTreeDocument[] starTreeDocuments) { + + // sort all the documents + sortStarTreeDocumentsFromDimensionId(starTreeDocuments, 0); + + // merge the documents + return mergeStarTreeDocuments(starTreeDocuments); + } + + /** + * Merges the star-tree documents + * + * @param starTreeDocuments star-tree documents + * @return iterator to aggregate star-tree documents + */ + private Iterator mergeStarTreeDocuments(StarTreeDocument[] starTreeDocuments) { + return new Iterator<>() { + boolean hasNext = true; + StarTreeDocument currentStarTreeDocument = starTreeDocuments[0]; + // starting from 1 since we have already fetched the 0th document + int docId = 1; + + @Override + public boolean hasNext() { + return hasNext; + } + + @Override + public StarTreeDocument next() { + // aggregate as we move on to the next doc + StarTreeDocument next = reduceSegmentStarTreeDocuments(null, currentStarTreeDocument); + while (docId < starTreeDocuments.length) { + StarTreeDocument starTreeDocument = starTreeDocuments[docId]; + docId++; + if (Arrays.equals(starTreeDocument.dimensions, next.dimensions) == false) { + currentStarTreeDocument = starTreeDocument; + return next; + } else { + next = reduceSegmentStarTreeDocuments(next, starTreeDocument); + } + } + hasNext = false; + return next; + } + }; + } + + /** + * Generates a star-tree for a given star-node + * + * @param startDocId Start document id in the star-tree + * @param endDocId End document id (exclusive) in the star-tree + * @param dimensionId Dimension id of the star-node + * @return iterator for star-tree documents of star-node + * @throws IOException throws when unable to generate star-tree for star-node + */ + @Override + public Iterator generateStarTreeDocumentsForStarNode(int startDocId, int endDocId, int dimensionId) + throws IOException { + int numDocs = endDocId - startDocId; + StarTreeDocument[] starTreeDocuments = new StarTreeDocument[numDocs]; + for (int i = 0; i < numDocs; i++) { + starTreeDocuments[i] = getStarTreeDocument(startDocId + i); + } + + // sort star tree documents from given dimension id (as previous dimension ids have already been processed) + sortStarTreeDocumentsFromDimensionId(starTreeDocuments, dimensionId + 1); + + return new Iterator() { + boolean hasNext = true; + StarTreeDocument currentStarTreeDocument = starTreeDocuments[0]; + int docId = 1; + + private boolean hasSameDimensions(StarTreeDocument starTreeDocument1, StarTreeDocument starTreeDocument2) { + for (int i = dimensionId + 1; i < numDimensions; i++) { + if (!Objects.equals(starTreeDocument1.dimensions[i], starTreeDocument2.dimensions[i])) { + return false; + } + } + return true; + } + + @Override + public boolean hasNext() { + return hasNext; + } + + @Override + public StarTreeDocument next() { + StarTreeDocument next = reduceStarTreeDocuments(null, currentStarTreeDocument); + next.dimensions[dimensionId] = Long.valueOf(STAR_IN_DOC_VALUES_INDEX); + while (docId < numDocs) { + StarTreeDocument starTreeDocument = starTreeDocuments[docId]; + docId++; + if (!hasSameDimensions(starTreeDocument, currentStarTreeDocument)) { + currentStarTreeDocument = starTreeDocument; + return next; + } else { + next = reduceStarTreeDocuments(next, starTreeDocument); + } + } + hasNext = false; + return next; + } + }; + } + + /** + * Sorts the star-tree documents from the given dimension id + * + * @param starTreeDocuments star-tree documents + * @param dimensionId id of the dimension + */ + private void sortStarTreeDocumentsFromDimensionId(StarTreeDocument[] starTreeDocuments, int dimensionId) { + Arrays.sort(starTreeDocuments, (o1, o2) -> { + for (int i = dimensionId; i < numDimensions; i++) { + if (!Objects.equals(o1.dimensions[i], o2.dimensions[i])) { + return Long.compare(o1.dimensions[i], o2.dimensions[i]); + } + } + return 0; + }); + } +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilder.java new file mode 100644 index 0000000000000..20af1b3bc7935 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilder.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.builder; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.io.Closeable; +import java.io.IOException; + +/** + * A star-tree builder that builds a single star-tree. + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface StarTreeBuilder extends Closeable { + + /** + * Builds the star tree based on star-tree field + * @throws IOException when we are unable to build star-tree + */ + void build() throws IOException; +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeDocValuesIteratorAdapter.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeDocValuesIteratorAdapter.java new file mode 100644 index 0000000000000..cb0350bb110b0 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeDocValuesIteratorAdapter.java @@ -0,0 +1,82 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.builder; + +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.DocIdSetIterator; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.compositeindex.datacube.startree.utils.SequentialDocValuesIterator; + +import java.io.IOException; + +/** + * A factory class to return respective doc values iterator based on the doc volues type. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class StarTreeDocValuesIteratorAdapter { + + /** + * Creates an iterator for the given doc values type and field using the doc values producer + */ + public SequentialDocValuesIterator getDocValuesIterator(DocValuesType type, FieldInfo field, DocValuesProducer producer) + throws IOException { + switch (type) { + case SORTED_NUMERIC: + return new SequentialDocValuesIterator(producer.getSortedNumeric(field)); + default: + throw new IllegalArgumentException("Unsupported DocValuesType: " + type); + } + } + + /** + * Returns the next value for the given iterator + */ + public Long getNextValue(SequentialDocValuesIterator sequentialDocValuesIterator, int currentDocId) throws IOException { + if (sequentialDocValuesIterator.getDocIdSetIterator() instanceof SortedNumericDocValues) { + SortedNumericDocValues sortedNumericDocValues = (SortedNumericDocValues) sequentialDocValuesIterator.getDocIdSetIterator(); + if (sequentialDocValuesIterator.getDocId() < 0 || sequentialDocValuesIterator.getDocId() == DocIdSetIterator.NO_MORE_DOCS) { + throw new IllegalStateException("invalid doc id to fetch the next value"); + } + + if (sequentialDocValuesIterator.getDocValue() == null) { + sequentialDocValuesIterator.setDocValue(sortedNumericDocValues.nextValue()); + return sequentialDocValuesIterator.getDocValue(); + } + + if (sequentialDocValuesIterator.getDocId() == currentDocId) { + Long nextValue = sequentialDocValuesIterator.getDocValue(); + sequentialDocValuesIterator.setDocValue(null); + return nextValue; + } else { + return null; + } + } else { + throw new IllegalStateException("Unsupported Iterator: " + sequentialDocValuesIterator.getDocIdSetIterator().toString()); + } + } + + /** + * Moves to the next doc in the iterator + * Returns the doc id for the next document from the given iterator + */ + public int nextDoc(SequentialDocValuesIterator iterator, int currentDocId) throws IOException { + if (iterator.getDocValue() != null) { + return iterator.getDocId(); + } + iterator.setDocId(iterator.getDocIdSetIterator().nextDoc()); + iterator.setDocValue(this.getNextValue(iterator, currentDocId)); + return iterator.getDocId(); + } + +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilder.java new file mode 100644 index 0000000000000..eaf9ae1dcdaa1 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilder.java @@ -0,0 +1,114 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.builder; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.index.SegmentWriteState; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; +import org.opensearch.index.mapper.CompositeMappedFieldType; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.StarTreeMapper; + +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +/** + * Builder to construct star-trees based on multiple star-tree fields. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class StarTreesBuilder implements Closeable { + + private static final Logger logger = LogManager.getLogger(StarTreesBuilder.class); + + private final List starTreeFields; + private final SegmentWriteState state; + private final Map fieldProducerMap; + private final MapperService mapperService; + + public StarTreesBuilder( + Map fieldProducerMap, + SegmentWriteState segmentWriteState, + MapperService mapperService + ) { + List starTreeFields = new ArrayList<>(); + for (CompositeMappedFieldType compositeMappedFieldType : mapperService.getCompositeFieldTypes()) { + if (compositeMappedFieldType instanceof StarTreeMapper.StarTreeFieldType) { + StarTreeMapper.StarTreeFieldType starTreeFieldType = (StarTreeMapper.StarTreeFieldType) compositeMappedFieldType; + starTreeFields.add( + new StarTreeField( + starTreeFieldType.name(), + starTreeFieldType.getDimensions(), + starTreeFieldType.getMetrics(), + starTreeFieldType.getStarTreeConfig() + ) + ); + } + } + + this.starTreeFields = starTreeFields; + this.fieldProducerMap = fieldProducerMap; + this.state = segmentWriteState; + this.mapperService = mapperService; + } + + /** + * Builds the star-trees. + */ + public void build() throws IOException { + if (starTreeFields.isEmpty()) { + logger.debug("no star-tree fields found, returning from star-tree builder"); + return; + } + long startTime = System.currentTimeMillis(); + int numStarTrees = starTreeFields.size(); + logger.debug("Starting building {} star-trees with star-tree fields", numStarTrees); + + // Build all star-trees + for (StarTreeField starTreeField : starTreeFields) { + try (StarTreeBuilder starTreeBuilder = getStarTreeBuilder(starTreeField, fieldProducerMap, state, mapperService)) { + starTreeBuilder.build(); + } + } + logger.debug("Took {} ms to building {} star-trees with star-tree fields", System.currentTimeMillis() - startTime, numStarTrees); + } + + @Override + public void close() throws IOException { + + } + + StarTreeBuilder getStarTreeBuilder( + StarTreeField starTreeField, + Map fieldProducerMap, + SegmentWriteState state, + MapperService mapperService + ) throws IOException { + switch (starTreeField.getStarTreeConfig().getBuildMode()) { + case ON_HEAP: + return new OnHeapStarTreeBuilder(starTreeField, fieldProducerMap, state, mapperService); + default: + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "No star tree implementation is available for [%s] build mode", + starTreeField.getStarTreeConfig().getBuildMode() + ) + ); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/package-info.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/package-info.java new file mode 100644 index 0000000000000..9c97b076371a3 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/package-info.java @@ -0,0 +1,14 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Builders for Composite Index Star Tree + * + * @opensearch.experimental + */ +package org.opensearch.index.compositeindex.datacube.startree.builder; diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/package-info.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/package-info.java index 4f4e670478e2f..6d6cb420f4a9e 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/package-info.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/package-info.java @@ -7,5 +7,7 @@ */ /** * Core classes for handling star tree index. + * + * @opensearch.experimental */ package org.opensearch.index.compositeindex.datacube.startree; diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIterator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIterator.java new file mode 100644 index 0000000000000..cf5f3e94c1ca6 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIterator.java @@ -0,0 +1,137 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.utils; + +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.DocIdSetIterator; +import org.opensearch.common.annotation.ExperimentalApi; + +import java.io.IOException; + +/** + * Coordinates the reading of documents across multiple DocIdSetIterators. + * It encapsulates a single DocIdSetIterator and maintains the latest document ID and its associated value. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class SequentialDocValuesIterator { + + /** + * The doc id set iterator associated for each field. + */ + private final DocIdSetIterator docIdSetIterator; + + /** + * The value associated with the latest document. + */ + private Long docValue; + + /** + * The id of the latest document. + */ + private int docId; + + /** + * Constructs a new SequentialDocValuesIterator instance with the given DocIdSetIterator. + * + * @param docIdSetIterator the DocIdSetIterator to be associated with this instance + */ + public SequentialDocValuesIterator(DocIdSetIterator docIdSetIterator) { + this.docIdSetIterator = docIdSetIterator; + } + + /** + * Constructs a new SequentialDocValuesIterator instance with the given SortedNumericDocValues. + * + */ + public SequentialDocValuesIterator() { + this.docIdSetIterator = new SortedNumericDocValues() { + @Override + public long nextValue() throws IOException { + return 0; + } + + @Override + public int docValueCount() { + return 0; + } + + @Override + public boolean advanceExact(int i) throws IOException { + return false; + } + + @Override + public int docID() { + return 0; + } + + @Override + public int nextDoc() throws IOException { + return 0; + } + + @Override + public int advance(int i) throws IOException { + return 0; + } + + @Override + public long cost() { + return 0; + } + }; + } + + /** + * Returns the value associated with the latest document. + * + * @return the value associated with the latest document + */ + public Long getDocValue() { + return docValue; + } + + /** + * Sets the value associated with the latest document. + * + * @param docValue the value to be associated with the latest document + */ + public void setDocValue(Long docValue) { + this.docValue = docValue; + } + + /** + * Returns the id of the latest document. + * + * @return the id of the latest document + */ + public int getDocId() { + return docId; + } + + /** + * Sets the id of the latest document. + * + * @param docId the ID of the latest document + */ + public void setDocId(int docId) { + this.docId = docId; + } + + /** + * Returns the DocIdSetIterator associated with this instance. + * + * @return the DocIdSetIterator associated with this instance + */ + public DocIdSetIterator getDocIdSetIterator() { + return docIdSetIterator; + } +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/TreeNode.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/TreeNode.java new file mode 100644 index 0000000000000..5cf737c61ab2d --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/TreeNode.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.index.compositeindex.datacube.startree.utils; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.util.Map; + +/** + * /** + * Represents a node in a tree data structure, specifically designed for a star-tree implementation. + * A star-tree node will represent both star and non-star nodes. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class TreeNode { + + public static final int ALL = -1; + + /** + * The dimension id for the dimension (field) associated with this star-tree node. + */ + public int dimensionId = ALL; + + /** + * The starting document id (inclusive) associated with this star-tree node. + */ + public int startDocId = ALL; + + /** + * The ending document id (exclusive) associated with this star-tree node. + */ + public int endDocId = ALL; + + /** + * The aggregated document id associated with this star-tree node. + */ + public int aggregatedDocId = ALL; + + /** + * The child dimension identifier associated with this star-tree node. + */ + public int childDimensionId = ALL; + + /** + * The value of the dimension associated with this star-tree node. + */ + public long dimensionValue = ALL; + + /** + * A flag indicating whether this node is a star node (a node that represents an aggregation of all dimensions). + */ + public boolean isStarNode = false; + + /** + * A map containing the child nodes of this star-tree node, keyed by their dimension id. + */ + public Map children; +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/package-info.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/package-info.java new file mode 100644 index 0000000000000..c7e8b04d42178 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/package-info.java @@ -0,0 +1,14 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Utility to support Composite Index Star Tree + * + * @opensearch.experimental + */ +package org.opensearch.index.compositeindex.datacube.startree.utils; diff --git a/server/src/main/java/org/opensearch/index/mapper/ConstantKeywordFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/ConstantKeywordFieldMapper.java index f4730c70362d1..2edd817f61f61 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ConstantKeywordFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/ConstantKeywordFieldMapper.java @@ -68,11 +68,11 @@ private static ConstantKeywordFieldMapper toType(FieldMapper in) { */ public static class Builder extends ParametrizedFieldMapper.Builder { - private final Parameter value; + private final Parameter value = Parameter.stringParam(valuePropertyName, false, m -> toType(m).value, null); public Builder(String name, String value) { super(name); - this.value = Parameter.stringParam(valuePropertyName, false, m -> toType(m).value, value); + this.value.setValue(value); } @Override diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java index 9a9de6c819424..a5e0c10f72301 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java @@ -11,17 +11,23 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.Version; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Base64; +import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Locale; @@ -29,7 +35,9 @@ import java.util.Objects; import java.util.Optional; import java.util.function.Function; +import java.util.stream.Collectors; +import static org.opensearch.index.remote.RemoteMigrationIndexMetadataUpdater.indexHasRemoteStoreSettings; import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING; import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA; @@ -214,13 +222,13 @@ public static Map determineRemoteStoreCustomMetadataDuringMigrat // does not support custom metadata. // https://github.com/opensearch-project/OpenSearch/issues/13745 boolean blobStoreMetadataEnabled = false; - boolean translogMetadata = Version.CURRENT.compareTo(minNodeVersion) <= 0 + boolean translogMetadata = Version.V_2_15_0.compareTo(minNodeVersion) <= 0 && CLUSTER_REMOTE_STORE_TRANSLOG_METADATA.get(clusterSettings) && blobStoreMetadataEnabled; remoteCustomData.put(IndexMetadata.TRANSLOG_METADATA_KEY, Boolean.toString(translogMetadata)); - RemoteStoreEnums.PathType pathType = Version.CURRENT.compareTo(minNodeVersion) <= 0 + RemoteStoreEnums.PathType pathType = Version.V_2_15_0.compareTo(minNodeVersion) <= 0 ? CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.get(clusterSettings) : RemoteStoreEnums.PathType.FIXED; RemoteStoreEnums.PathHashAlgorithm pathHashAlgorithm = pathType == RemoteStoreEnums.PathType.FIXED @@ -250,4 +258,119 @@ public static Map getRemoteStoreRepoName(DiscoveryNodes discover .findFirst(); return remoteNode.map(RemoteStoreNodeAttribute::getDataRepoNames).orElseGet(HashMap::new); } + + /** + * Invoked after a cluster settings update. + * Checks if the applied cluster settings has switched the cluster to STRICT mode. + * If so, checks and applies appropriate index settings depending on the current set + * of node types in the cluster + * This has been intentionally done after the cluster settings update + * flow. That way we are not interfering with the usual settings update + * and the cluster state mutation that comes along with it + * + * @param isCompatibilityModeChanging flag passed from cluster settings update call to denote if a compatibility mode change has been done + * @param request request payload passed from cluster settings update + * @param currentState cluster state generated after changing cluster settings were applied + * @param logger Logger reference + * @return Mutated cluster state with remote store index settings applied, no-op if the cluster is not switching to `STRICT` compatibility mode + */ + public static ClusterState checkAndFinalizeRemoteStoreMigration( + boolean isCompatibilityModeChanging, + ClusterUpdateSettingsRequest request, + ClusterState currentState, + Logger logger + ) { + if (isCompatibilityModeChanging && isSwitchToStrictCompatibilityMode(request)) { + return finalizeMigration(currentState, logger); + } + return currentState; + } + + /** + * Finalizes the docrep to remote-store migration process by applying remote store based index settings + * on indices that are missing them. No-Op if all indices already have the settings applied through + * IndexMetadataUpdater + * + * @param incomingState mutated cluster state after cluster settings were applied + * @return new cluster state with index settings updated + */ + public static ClusterState finalizeMigration(ClusterState incomingState, Logger logger) { + Map discoveryNodeMap = incomingState.nodes().getNodes(); + if (discoveryNodeMap.isEmpty() == false) { + // At this point, we have already validated that all nodes in the cluster are of uniform type. + // Either all of them are remote store enabled, or all of them are docrep enabled + boolean remoteStoreEnabledNodePresent = discoveryNodeMap.values().stream().findFirst().get().isRemoteStoreNode(); + if (remoteStoreEnabledNodePresent == true) { + List indicesWithoutRemoteStoreSettings = getIndicesWithoutRemoteStoreSettings(incomingState, logger); + if (indicesWithoutRemoteStoreSettings.isEmpty() == true) { + logger.info("All indices in the cluster has remote store based index settings"); + } else { + Metadata mutatedMetadata = applyRemoteStoreSettings(incomingState, indicesWithoutRemoteStoreSettings, logger); + return ClusterState.builder(incomingState).metadata(mutatedMetadata).build(); + } + } else { + logger.debug("All nodes in the cluster are not remote nodes. Skipping."); + } + } + return incomingState; + } + + /** + * Filters out indices which does not have remote store based + * index settings applied even after all shard copies have + * migrated to remote store enabled nodes + */ + private static List getIndicesWithoutRemoteStoreSettings(ClusterState clusterState, Logger logger) { + Collection allIndicesMetadata = clusterState.metadata().indices().values(); + if (allIndicesMetadata.isEmpty() == false) { + List indicesWithoutRemoteSettings = allIndicesMetadata.stream() + .filter(idxMd -> indexHasRemoteStoreSettings(idxMd.getSettings()) == false) + .collect(Collectors.toList()); + logger.debug( + "Attempting to switch to strict mode. Count of indices without remote store settings {}", + indicesWithoutRemoteSettings.size() + ); + return indicesWithoutRemoteSettings; + } + return Collections.emptyList(); + } + + /** + * Applies remote store index settings through {@link RemoteMigrationIndexMetadataUpdater} + */ + private static Metadata applyRemoteStoreSettings( + ClusterState clusterState, + List indicesWithoutRemoteStoreSettings, + Logger logger + ) { + Metadata.Builder metadataBuilder = Metadata.builder(clusterState.getMetadata()); + RoutingTable currentRoutingTable = clusterState.getRoutingTable(); + DiscoveryNodes currentDiscoveryNodes = clusterState.getNodes(); + Settings currentClusterSettings = clusterState.metadata().settings(); + for (IndexMetadata indexMetadata : indicesWithoutRemoteStoreSettings) { + IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexMetadata); + RemoteMigrationIndexMetadataUpdater indexMetadataUpdater = new RemoteMigrationIndexMetadataUpdater( + currentDiscoveryNodes, + currentRoutingTable, + indexMetadata, + currentClusterSettings, + logger + ); + indexMetadataUpdater.maybeAddRemoteIndexSettings(indexMetadataBuilder, indexMetadata.getIndex().getName()); + metadataBuilder.put(indexMetadataBuilder); + } + return metadataBuilder.build(); + } + + /** + * Checks if the incoming cluster settings payload is attempting to switch + * the cluster to `STRICT` compatibility mode + * Visible only for tests + */ + public static boolean isSwitchToStrictCompatibilityMode(ClusterUpdateSettingsRequest request) { + Settings incomingSettings = Settings.builder().put(request.persistentSettings()).put(request.transientSettings()).build(); + return RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING.get( + incomingSettings + ) == RemoteStoreNodeService.CompatibilityMode.STRICT; + } } diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 02290b6a5e566..c4908f8c5fc4b 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -109,6 +109,7 @@ import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.remote.RemoteStorePathStrategy; +import org.opensearch.index.remote.RemoteStorePathStrategy.PathInput; import org.opensearch.index.snapshots.IndexShardRestoreFailedException; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; @@ -157,6 +158,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.concurrent.BlockingQueue; @@ -174,6 +176,8 @@ import java.util.stream.LongStream; import java.util.stream.Stream; +import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A_COMPOSITE_1; +import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_PREFIX; import static org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.canonicalName; import static org.opensearch.repositories.blobstore.ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS; @@ -302,6 +306,16 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp Setting.Property.NodeScope ); + /** + * Setting to enable prefix mode verification. In this mode, a hashed string is prepended at the prefix of the base + * path during repository verification. + */ + public static final Setting PREFIX_MODE_VERIFICATION_SETTING = Setting.boolSetting( + "prefix_mode_verification", + false, + Setting.Property.NodeScope + ); + protected volatile boolean supportURLRepo; private volatile int maxShardBlobDeleteBatch; @@ -369,6 +383,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private final boolean isSystemRepository; + private final boolean prefixModeVerification; + private final Object lock = new Object(); private final SetOnce blobContainer = new SetOnce<>(); @@ -426,6 +442,7 @@ protected BlobStoreRepository( readRepositoryMetadata(repositoryMetadata); isSystemRepository = SYSTEM_REPOSITORY_SETTING.get(metadata.settings()); + prefixModeVerification = PREFIX_MODE_VERIFICATION_SETTING.get(metadata.settings()); this.namedXContentRegistry = namedXContentRegistry; this.threadPool = clusterService.getClusterApplierService().threadPool(); this.clusterService = clusterService; @@ -767,6 +784,10 @@ protected BlobStore getBlobStore() { return blobStore.get(); } + boolean getPrefixModeVerification() { + return prefixModeVerification; + } + /** * maintains single lazy instance of {@link BlobContainer} */ @@ -1918,7 +1939,7 @@ public String startVerification() { } else { String seed = UUIDs.randomBase64UUID(); byte[] testBytes = Strings.toUTF8Bytes(seed); - BlobContainer testContainer = blobStore().blobContainer(basePath().add(testBlobPrefix(seed))); + BlobContainer testContainer = testContainer(seed); BytesArray bytes = new BytesArray(testBytes); if (isSystemRepository == false) { try (InputStream stream = bytes.streamInput()) { @@ -1936,12 +1957,26 @@ public String startVerification() { } } + /** + * Returns the blobContainer depending on the seed and {@code prefixModeVerification}. + */ + private BlobContainer testContainer(String seed) { + BlobPath testBlobPath; + if (prefixModeVerification == true) { + PathInput pathInput = PathInput.builder().basePath(basePath()).indexUUID(seed).build(); + testBlobPath = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + } else { + testBlobPath = basePath(); + } + assert Objects.nonNull(testBlobPath); + return blobStore().blobContainer(testBlobPath.add(testBlobPrefix(seed))); + } + @Override public void endVerification(String seed) { if (isReadOnly() == false) { try { - final String testPrefix = testBlobPrefix(seed); - blobStore().blobContainer(basePath().add(testPrefix)).delete(); + testContainer(seed).delete(); } catch (Exception exp) { throw new RepositoryVerificationException(metadata.name(), "cannot delete test data at " + basePath(), exp); } @@ -3266,7 +3301,7 @@ public void verify(String seed, DiscoveryNode localNode) { ); } } else { - BlobContainer testBlobContainer = blobStore().blobContainer(basePath().add(testBlobPrefix(seed))); + BlobContainer testBlobContainer = testContainer(seed); try { BytesArray bytes = new BytesArray(seed); try (InputStream stream = bytes.streamInput()) { diff --git a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java index 35c5c5e605b4d..37e884502b613 100644 --- a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java @@ -47,7 +47,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.action.ActionListener; import org.opensearch.core.action.ActionResponse; @@ -85,8 +84,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import static org.opensearch.common.util.FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL; -import static org.opensearch.index.remote.RemoteMigrationIndexMetadataUpdaterTests.createIndexMetadataWithDocrepSettings; import static org.opensearch.index.remote.RemoteMigrationIndexMetadataUpdaterTests.createIndexMetadataWithRemoteStoreSettings; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; @@ -718,9 +715,6 @@ protected void masterOperation(Task task, Request request, ClusterState state, A } public void testDontAllowSwitchingToStrictCompatibilityModeForMixedCluster() { - Settings nodeSettings = Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); - FeatureFlags.initializeFeatureFlags(nodeSettings); - // request to change cluster compatibility mode to STRICT Settings currentCompatibilityModeSettings = Settings.builder() .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), RemoteStoreNodeService.CompatibilityMode.MIXED) @@ -809,84 +803,7 @@ public void testDontAllowSwitchingToStrictCompatibilityModeForMixedCluster() { transportClusterUpdateSettingsAction.validateCompatibilityModeSettingRequest(request, sameTypeClusterState); } - public void testDontAllowSwitchingToStrictCompatibilityModeWithoutRemoteIndexSettings() { - Settings nodeSettings = Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); - FeatureFlags.initializeFeatureFlags(nodeSettings); - Settings currentCompatibilityModeSettings = Settings.builder() - .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), RemoteStoreNodeService.CompatibilityMode.MIXED) - .build(); - Settings intendedCompatibilityModeSettings = Settings.builder() - .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), RemoteStoreNodeService.CompatibilityMode.STRICT) - .build(); - ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(); - request.persistentSettings(intendedCompatibilityModeSettings); - DiscoveryNode remoteNode1 = new DiscoveryNode( - UUIDs.base64UUID(), - buildNewFakeTransportAddress(), - getRemoteStoreNodeAttributes(), - DiscoveryNodeRole.BUILT_IN_ROLES, - Version.CURRENT - ); - DiscoveryNode remoteNode2 = new DiscoveryNode( - UUIDs.base64UUID(), - buildNewFakeTransportAddress(), - getRemoteStoreNodeAttributes(), - DiscoveryNodeRole.BUILT_IN_ROLES, - Version.CURRENT - ); - DiscoveryNodes discoveryNodes = DiscoveryNodes.builder() - .add(remoteNode1) - .localNodeId(remoteNode1.getId()) - .add(remoteNode2) - .localNodeId(remoteNode2.getId()) - .build(); - AllocationService allocationService = new AllocationService( - new AllocationDeciders(Collections.singleton(new MaxRetryAllocationDecider())), - new TestGatewayAllocator(), - new BalancedShardsAllocator(Settings.EMPTY), - EmptyClusterInfoService.INSTANCE, - EmptySnapshotsInfoService.INSTANCE - ); - TransportClusterUpdateSettingsAction transportClusterUpdateSettingsAction = new TransportClusterUpdateSettingsAction( - transportService, - clusterService, - threadPool, - allocationService, - new ActionFilters(Collections.emptySet()), - new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY)), - clusterService.getClusterSettings() - ); - - Metadata nonRemoteIndexMd = Metadata.builder(createIndexMetadataWithDocrepSettings("test")) - .persistentSettings(currentCompatibilityModeSettings) - .build(); - final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) - .metadata(nonRemoteIndexMd) - .nodes(discoveryNodes) - .build(); - final SettingsException exception = expectThrows( - SettingsException.class, - () -> transportClusterUpdateSettingsAction.validateCompatibilityModeSettingRequest(request, clusterState) - ); - assertEquals( - "can not switch to STRICT compatibility mode since all indices in the cluster does not have remote store based index settings", - exception.getMessage() - ); - - Metadata remoteIndexMd = Metadata.builder(createIndexMetadataWithRemoteStoreSettings("test")) - .persistentSettings(currentCompatibilityModeSettings) - .build(); - ClusterState clusterStateWithRemoteIndices = ClusterState.builder(ClusterName.DEFAULT) - .metadata(remoteIndexMd) - .nodes(discoveryNodes) - .build(); - transportClusterUpdateSettingsAction.validateCompatibilityModeSettingRequest(request, clusterStateWithRemoteIndices); - } - public void testDontAllowSwitchingCompatibilityModeForClusterWithMultipleVersions() { - Settings nodeSettings = Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); - FeatureFlags.initializeFeatureFlags(nodeSettings); - // request to change cluster compatibility mode boolean toStrictMode = randomBoolean(); Settings currentCompatibilityModeSettings = Settings.builder() @@ -988,5 +905,4 @@ private Map getRemoteStoreNodeAttributes() { remoteStoreNodeAttributes.put(REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, "my-translog-repo-1"); return remoteStoreNodeAttributes; } - } diff --git a/server/src/test/java/org/opensearch/action/update/UpdateRequestTests.java b/server/src/test/java/org/opensearch/action/update/UpdateRequestTests.java index b70fda0d86240..e85dfa8cca556 100644 --- a/server/src/test/java/org/opensearch/action/update/UpdateRequestTests.java +++ b/server/src/test/java/org/opensearch/action/update/UpdateRequestTests.java @@ -247,6 +247,7 @@ public void testFromXContent() throws Exception { assertThat(params, notNullValue()); assertThat(params.size(), equalTo(1)); assertThat(params.get("param1").toString(), equalTo("value1")); + assertThat(request.upsertRequest().index(), equalTo("test")); Map upsertDoc = XContentHelper.convertToMap( request.upsertRequest().source(), true, @@ -304,6 +305,7 @@ public void testFromXContent() throws Exception { ) ); Map doc = request.doc().sourceAsMap(); + assertThat(request.doc().index(), equalTo("test")); assertThat(doc.get("field1").toString(), equalTo("value1")); assertThat(((Map) doc.get("compound")).get("field2").toString(), equalTo("value2")); } @@ -662,7 +664,7 @@ public void testToString() throws IOException { request.toString(), equalTo( "update {[test][1], doc_as_upsert[false], " - + "doc[index {[null][null], source[{\"body\":\"bar\"}]}], scripted_upsert[false], detect_noop[true]}" + + "doc[index {[test][null], source[{\"body\":\"bar\"}]}], scripted_upsert[false], detect_noop[true]}" ) ); } diff --git a/server/src/test/java/org/opensearch/cluster/applicationtemplates/ClusterStateSystemTemplateLoaderTests.java b/server/src/test/java/org/opensearch/cluster/applicationtemplates/ClusterStateSystemTemplateLoaderTests.java index 63caccc87e67a..c7cfab6d38e04 100644 --- a/server/src/test/java/org/opensearch/cluster/applicationtemplates/ClusterStateSystemTemplateLoaderTests.java +++ b/server/src/test/java/org/opensearch/cluster/applicationtemplates/ClusterStateSystemTemplateLoaderTests.java @@ -145,4 +145,9 @@ public void testLoadTemplateVersionMismatch() throws IOException { ) ); } + + @Override + protected boolean resetNodeAfterTest() { + return true; + } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactoryTests.java b/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactoryTests.java index 39294ee8da41e..86f4b9502d6ab 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactoryTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactoryTests.java @@ -40,7 +40,8 @@ public void testGetServiceWhenRemoteRoutingDisabled() { repositoriesService, settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - threadPool + threadPool, + "test-cluster" ); assertTrue(service instanceof NoopRemoteRoutingTableService); } @@ -56,7 +57,8 @@ public void testGetServiceWhenRemoteRoutingEnabled() { repositoriesService, settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - threadPool + threadPool, + "test-cluster" ); assertTrue(service instanceof InternalRemoteRoutingTableService); } diff --git a/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java index 839ebe1ff8301..564c7f7aed304 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java @@ -19,27 +19,25 @@ import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.CheckedRunnable; -import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; -import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.compress.DeflateCompressor; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.common.util.TestCapturingListener; import org.opensearch.core.action.ActionListener; -import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.compress.NoneCompressor; import org.opensearch.core.index.Index; import org.opensearch.gateway.remote.ClusterMetadataManifest; -import org.opensearch.gateway.remote.RemoteStateTransferException; -import org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable; +import org.opensearch.gateway.remote.RemoteClusterStateUtils; import org.opensearch.index.remote.RemoteStoreEnums; import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.repositories.FilterRepository; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.RepositoryMissingException; @@ -51,33 +49,37 @@ import org.junit.Before; import java.io.IOException; +import java.io.InputStream; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; import java.util.function.Supplier; -import org.mockito.ArgumentCaptor; import org.mockito.Mockito; -import static org.opensearch.cluster.routing.remote.InternalRemoteRoutingTableService.INDEX_ROUTING_FILE_PREFIX; -import static org.opensearch.cluster.routing.remote.InternalRemoteRoutingTableService.INDEX_ROUTING_PATH_TOKEN; import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; import static org.opensearch.gateway.remote.ClusterMetadataManifestTests.randomUploadedIndexMetadataList; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.PATH_DELIMITER; +import static org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable.INDEX_ROUTING_FILE; +import static org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable.INDEX_ROUTING_METADATA_PREFIX; +import static org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable.INDEX_ROUTING_TABLE; +import static org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable.INDEX_ROUTING_TABLE_FORMAT; +import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64; +import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY; -import static org.mockito.ArgumentMatchers.anyLong; +import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.startsWith; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -92,6 +94,8 @@ public class RemoteRoutingTableServiceTests extends OpenSearchTestCase { private BlobPath basePath; private ClusterSettings clusterSettings; private ClusterService clusterService; + private Compressor compressor; + private BlobStoreTransferService blobStoreTransferService; private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); @Before @@ -105,6 +109,7 @@ public void setup() { .build(); clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); clusterService = mock(ClusterService.class); + blobStoreTransferService = mock(BlobStoreTransferService.class); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); blobStoreRepository = mock(BlobStoreRepository.class); when(blobStoreRepository.getCompressor()).thenReturn(new DeflateCompressor()); @@ -112,18 +117,20 @@ public void setup() { blobContainer = mock(BlobContainer.class); when(repositoriesService.repository("routing_repository")).thenReturn(blobStoreRepository); when(blobStoreRepository.blobStore()).thenReturn(blobStore); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); Settings nodeSettings = Settings.builder().put(REMOTE_PUBLICATION_EXPERIMENTAL, "true").build(); FeatureFlags.initializeFeatureFlags(nodeSettings); - + compressor = new NoneCompressor(); basePath = BlobPath.cleanPath().add("base-path"); - + when(blobStoreRepository.basePath()).thenReturn(basePath); remoteRoutingTableService = new InternalRemoteRoutingTableService( repositoriesServiceSupplier, settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - threadPool + threadPool, + "test-cluster" ); - + remoteRoutingTableService.doStart(); } @After @@ -141,7 +148,8 @@ public void testFailInitializationWhenRemoteRoutingDisabled() { repositoriesServiceSupplier, settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - threadPool + threadPool, + "test-cluster" ) ); } @@ -347,136 +355,13 @@ public void testGetIndicesRoutingMapDiffIndexDeleted() { assertEquals(indexName, diff.getDeletes().get(0)); } - public void testGetIndexRoutingAsyncAction() throws IOException { - String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); - ClusterState clusterState = createClusterState(indexName); - BlobPath expectedPath = getPath(); - - LatchedActionListener listener = mock(LatchedActionListener.class); - when(blobStore.blobContainer(expectedPath)).thenReturn(blobContainer); - - remoteRoutingTableService.start(); - CheckedRunnable runnable = remoteRoutingTableService.getIndexRoutingAsyncAction( - clusterState, - clusterState.routingTable().getIndicesRouting().get(indexName), - listener, - basePath - ); - assertNotNull(runnable); - runnable.run(); - - String expectedFilePrefix = String.join( - DELIMITER, - INDEX_ROUTING_FILE_PREFIX, - RemoteStoreUtils.invertLong(clusterState.term()), - RemoteStoreUtils.invertLong(clusterState.version()) - ); - verify(blobContainer, times(1)).writeBlob(startsWith(expectedFilePrefix), any(StreamInput.class), anyLong(), eq(true)); - verify(listener, times(1)).onResponse(any(ClusterMetadataManifest.UploadedMetadata.class)); - } - - public void testGetIndexRoutingAsyncActionFailureInBlobRepo() throws IOException { - String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); - ClusterState clusterState = createClusterState(indexName); - BlobPath expectedPath = getPath(); - - LatchedActionListener listener = mock(LatchedActionListener.class); - when(blobStore.blobContainer(expectedPath)).thenReturn(blobContainer); - doThrow(new IOException("testing failure")).when(blobContainer).writeBlob(anyString(), any(StreamInput.class), anyLong(), eq(true)); - - remoteRoutingTableService.start(); - CheckedRunnable runnable = remoteRoutingTableService.getIndexRoutingAsyncAction( - clusterState, - clusterState.routingTable().getIndicesRouting().get(indexName), - listener, - basePath - ); - assertNotNull(runnable); - runnable.run(); - String expectedFilePrefix = String.join( - DELIMITER, - INDEX_ROUTING_FILE_PREFIX, - RemoteStoreUtils.invertLong(clusterState.term()), - RemoteStoreUtils.invertLong(clusterState.version()) - ); - verify(blobContainer, times(1)).writeBlob(startsWith(expectedFilePrefix), any(StreamInput.class), anyLong(), eq(true)); - verify(listener, times(1)).onFailure(any(RemoteStateTransferException.class)); - } - - public void testGetIndexRoutingAsyncActionAsyncRepo() throws IOException { - String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); - ClusterState clusterState = createClusterState(indexName); - BlobPath expectedPath = getPath(); - - LatchedActionListener listener = mock(LatchedActionListener.class); - blobContainer = mock(AsyncMultiStreamBlobContainer.class); - when(blobStore.blobContainer(expectedPath)).thenReturn(blobContainer); - ArgumentCaptor> actionListenerArgumentCaptor = ArgumentCaptor.forClass(ActionListener.class); - ArgumentCaptor writeContextArgumentCaptor = ArgumentCaptor.forClass(WriteContext.class); - ConcurrentHashMap capturedWriteContext = new ConcurrentHashMap<>(); - - doAnswer((i) -> { - actionListenerArgumentCaptor.getValue().onResponse(null); - WriteContext writeContext = writeContextArgumentCaptor.getValue(); - capturedWriteContext.put(writeContext.getFileName().split(DELIMITER)[0], writeContextArgumentCaptor.getValue()); - return null; - }).when((AsyncMultiStreamBlobContainer) blobContainer) - .asyncBlobUpload(writeContextArgumentCaptor.capture(), actionListenerArgumentCaptor.capture()); - - remoteRoutingTableService.start(); - CheckedRunnable runnable = remoteRoutingTableService.getIndexRoutingAsyncAction( - clusterState, - clusterState.routingTable().getIndicesRouting().get(indexName), - listener, - basePath - ); - assertNotNull(runnable); - runnable.run(); - - String expectedFilePrefix = String.join( - DELIMITER, - INDEX_ROUTING_FILE_PREFIX, - RemoteStoreUtils.invertLong(clusterState.term()), - RemoteStoreUtils.invertLong(clusterState.version()) - ); - assertEquals(1, actionListenerArgumentCaptor.getAllValues().size()); - assertEquals(1, writeContextArgumentCaptor.getAllValues().size()); - assertNotNull(capturedWriteContext.get("index_routing")); - assertEquals(capturedWriteContext.get("index_routing").getWritePriority(), WritePriority.URGENT); - assertTrue(capturedWriteContext.get("index_routing").getFileName().startsWith(expectedFilePrefix)); - } - - public void testGetIndexRoutingAsyncActionAsyncRepoFailureInRepo() throws IOException { - String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); - ClusterState clusterState = createClusterState(indexName); - BlobPath expectedPath = getPath(); - - LatchedActionListener listener = mock(LatchedActionListener.class); - blobContainer = mock(AsyncMultiStreamBlobContainer.class); - when(blobStore.blobContainer(expectedPath)).thenReturn(blobContainer); - - doThrow(new IOException("Testing failure")).when((AsyncMultiStreamBlobContainer) blobContainer) - .asyncBlobUpload(any(WriteContext.class), any(ActionListener.class)); - - remoteRoutingTableService.start(); - CheckedRunnable runnable = remoteRoutingTableService.getIndexRoutingAsyncAction( - clusterState, - clusterState.routingTable().getIndicesRouting().get(indexName), - listener, - basePath - ); - assertNotNull(runnable); - runnable.run(); - verify(listener, times(1)).onFailure(any(RemoteStateTransferException.class)); - } - public void testGetAllUploadedIndicesRouting() { final ClusterMetadataManifest previousManifest = ClusterMetadataManifest.builder().build(); final ClusterMetadataManifest.UploadedIndexMetadata uploadedIndexMetadata = new ClusterMetadataManifest.UploadedIndexMetadata( "test-index", "index-uuid", "index-filename", - InternalRemoteRoutingTableService.INDEX_ROUTING_METADATA_PREFIX + INDEX_ROUTING_METADATA_PREFIX ); List allIndiceRoutingMetadata = remoteRoutingTableService @@ -491,7 +376,7 @@ public void testGetAllUploadedIndicesRoutingExistingIndexInManifest() { "test-index", "index-uuid", "index-filename", - InternalRemoteRoutingTableService.INDEX_ROUTING_METADATA_PREFIX + INDEX_ROUTING_METADATA_PREFIX ); final ClusterMetadataManifest previousManifest = ClusterMetadataManifest.builder() .indicesRouting(List.of(uploadedIndexMetadata)) @@ -509,7 +394,7 @@ public void testGetAllUploadedIndicesRoutingNewIndexFromManifest() { "test-index", "index-uuid", "index-filename", - InternalRemoteRoutingTableService.INDEX_ROUTING_METADATA_PREFIX + INDEX_ROUTING_METADATA_PREFIX ); final ClusterMetadataManifest previousManifest = ClusterMetadataManifest.builder() .indicesRouting(List.of(uploadedIndexMetadata)) @@ -518,7 +403,7 @@ public void testGetAllUploadedIndicesRoutingNewIndexFromManifest() { "test-index2", "index-uuid", "index-filename", - InternalRemoteRoutingTableService.INDEX_ROUTING_METADATA_PREFIX + INDEX_ROUTING_METADATA_PREFIX ); List allIndiceRoutingMetadata = remoteRoutingTableService @@ -534,13 +419,13 @@ public void testGetAllUploadedIndicesRoutingIndexDeleted() { "test-index", "index-uuid", "index-filename", - InternalRemoteRoutingTableService.INDEX_ROUTING_METADATA_PREFIX + INDEX_ROUTING_METADATA_PREFIX ); final ClusterMetadataManifest.UploadedIndexMetadata uploadedIndexMetadata2 = new ClusterMetadataManifest.UploadedIndexMetadata( "test-index2", "index-uuid", "index-filename", - InternalRemoteRoutingTableService.INDEX_ROUTING_METADATA_PREFIX + INDEX_ROUTING_METADATA_PREFIX ); final ClusterMetadataManifest previousManifest = ClusterMetadataManifest.builder() .indicesRouting(List.of(uploadedIndexMetadata, uploadedIndexMetadata2)) @@ -558,13 +443,13 @@ public void testGetAllUploadedIndicesRoutingNoChange() { "test-index", "index-uuid", "index-filename", - InternalRemoteRoutingTableService.INDEX_ROUTING_METADATA_PREFIX + INDEX_ROUTING_METADATA_PREFIX ); final ClusterMetadataManifest.UploadedIndexMetadata uploadedIndexMetadata2 = new ClusterMetadataManifest.UploadedIndexMetadata( "test-index2", "index-uuid", "index-filename", - InternalRemoteRoutingTableService.INDEX_ROUTING_METADATA_PREFIX + INDEX_ROUTING_METADATA_PREFIX ); final ClusterMetadataManifest previousManifest = ClusterMetadataManifest.builder() .indicesRouting(List.of(uploadedIndexMetadata, uploadedIndexMetadata2)) @@ -640,69 +525,83 @@ public void testIndicesRoutingDiffWhenIndexDeletedAndAdded() { ); } - public void testGetAsyncIndexMetadataReadAction() throws Exception { + public void testGetAsyncIndexRoutingReadAction() throws Exception { String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); ClusterState clusterState = createClusterState(indexName); String uploadedFileName = String.format(Locale.ROOT, "index-routing/" + indexName); - Index index = new Index(indexName, "uuid-01"); - - LatchedActionListener listener = mock(LatchedActionListener.class); - when(blobStore.blobContainer(any())).thenReturn(blobContainer); - BytesStreamOutput streamOutput = new BytesStreamOutput(); - RemoteIndexRoutingTable remoteIndexRoutingTable = new RemoteIndexRoutingTable( - clusterState.routingTable().getIndicesRouting().get(indexName) + when(blobContainer.readBlob(indexName)).thenReturn( + INDEX_ROUTING_TABLE_FORMAT.serialize( + clusterState.getRoutingTable().getIndicesRouting().get(indexName), + uploadedFileName, + compressor + ).streamInput() ); - remoteIndexRoutingTable.writeTo(streamOutput); - when(blobContainer.readBlob(indexName)).thenReturn(streamOutput.bytes().streamInput()); - remoteRoutingTableService.start(); + TestCapturingListener listener = new TestCapturingListener<>(); + CountDownLatch latch = new CountDownLatch(1); - CheckedRunnable runnable = remoteRoutingTableService.getAsyncIndexRoutingReadAction(uploadedFileName, index, listener); - assertNotNull(runnable); - runnable.run(); + remoteRoutingTableService.getAsyncIndexRoutingReadAction( + "cluster-uuid", + uploadedFileName, + new LatchedActionListener<>(listener, latch) + ).run(); + latch.await(); - assertBusy(() -> verify(blobContainer, times(1)).readBlob(any())); - assertBusy(() -> verify(listener, times(1)).onResponse(any(IndexRoutingTable.class))); + assertNull(listener.getFailure()); + assertNotNull(listener.getResult()); + IndexRoutingTable indexRoutingTable = listener.getResult(); + assertEquals(clusterState.getRoutingTable().getIndicesRouting().get(indexName), indexRoutingTable); } - public void testGetAsyncIndexMetadataReadActionFailureForIncorrectIndex() throws Exception { + public void testGetAsyncIndexRoutingWriteAction() throws Exception { String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); ClusterState clusterState = createClusterState(indexName); - String uploadedFileName = String.format(Locale.ROOT, "index-routing/" + indexName); - Index index = new Index("incorrect-index", "uuid-01"); - - LatchedActionListener listener = mock(LatchedActionListener.class); - when(blobStore.blobContainer(any())).thenReturn(blobContainer); - BytesStreamOutput streamOutput = new BytesStreamOutput(); - RemoteIndexRoutingTable remoteIndexRoutingTable = new RemoteIndexRoutingTable( - clusterState.routingTable().getIndicesRouting().get(indexName) + Iterable remotePath = HASHED_PREFIX.path( + RemoteStorePathStrategy.PathInput.builder() + .basePath( + new BlobPath().add("base-path") + .add(RemoteClusterStateUtils.encodeString(ClusterName.DEFAULT.toString())) + .add(CLUSTER_STATE_PATH_TOKEN) + .add(clusterState.metadata().clusterUUID()) + .add(INDEX_ROUTING_TABLE) + ) + .indexUUID(clusterState.getRoutingTable().indicesRouting().get(indexName).getIndex().getUUID()) + .build(), + FNV_1A_BASE64 ); - remoteIndexRoutingTable.writeTo(streamOutput); - when(blobContainer.readBlob(anyString())).thenReturn(streamOutput.bytes().streamInput()); - remoteRoutingTableService.doStart(); - - CheckedRunnable runnable = remoteRoutingTableService.getAsyncIndexRoutingReadAction(uploadedFileName, index, listener); - assertNotNull(runnable); - runnable.run(); - - assertBusy(() -> verify(blobContainer, times(1)).readBlob(any())); - assertBusy(() -> verify(listener, times(1)).onFailure(any(Exception.class))); - } - - public void testGetAsyncIndexMetadataReadActionFailureInBlobRepo() throws Exception { - String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); - String uploadedFileName = String.format(Locale.ROOT, "index-routing/" + indexName); - Index index = new Index(indexName, "uuid-01"); - - LatchedActionListener listener = mock(LatchedActionListener.class); - when(blobStore.blobContainer(any())).thenReturn(blobContainer); - doThrow(new IOException("testing failure")).when(blobContainer).readBlob(indexName); - remoteRoutingTableService.doStart(); - - CheckedRunnable runnable = remoteRoutingTableService.getAsyncIndexRoutingReadAction(uploadedFileName, index, listener); - assertNotNull(runnable); - runnable.run(); - assertBusy(() -> verify(listener, times(1)).onFailure(any(RemoteStateTransferException.class))); + doAnswer(invocationOnMock -> { + invocationOnMock.getArgument(4, ActionListener.class).onResponse(null); + return null; + }).when(blobStoreTransferService) + .uploadBlob(any(InputStream.class), eq(remotePath), anyString(), eq(WritePriority.URGENT), any(ActionListener.class)); + + TestCapturingListener listener = new TestCapturingListener<>(); + CountDownLatch latch = new CountDownLatch(1); + + remoteRoutingTableService.getAsyncIndexRoutingWriteAction( + clusterState.metadata().clusterUUID(), + clusterState.term(), + clusterState.version(), + clusterState.getRoutingTable().indicesRouting().get(indexName), + new LatchedActionListener<>(listener, latch) + ).run(); + latch.await(); + assertNull(listener.getFailure()); + assertNotNull(listener.getResult()); + ClusterMetadataManifest.UploadedMetadata uploadedMetadata = listener.getResult(); + + assertEquals(INDEX_ROUTING_METADATA_PREFIX + indexName, uploadedMetadata.getComponent()); + String uploadedFileName = uploadedMetadata.getUploadedFilename(); + String[] pathTokens = uploadedFileName.split(PATH_DELIMITER); + assertEquals(8, pathTokens.length); + assertEquals(pathTokens[1], "base-path"); + String[] fileNameTokens = pathTokens[7].split(DELIMITER); + + assertEquals(4, fileNameTokens.length); + assertEquals(fileNameTokens[0], INDEX_ROUTING_FILE); + assertEquals(fileNameTokens[1], RemoteStoreUtils.invertLong(1L)); + assertEquals(fileNameTokens[2], RemoteStoreUtils.invertLong(2L)); + assertThat(RemoteStoreUtils.invertLong(fileNameTokens[3]), lessThanOrEqualTo(System.currentTimeMillis())); } public void testGetUpdatedIndexRoutingTableMetadataWhenNoChange() { @@ -758,7 +657,7 @@ private ClusterState createClusterState(String indexName) { } private BlobPath getPath() { - BlobPath indexRoutingPath = basePath.add(INDEX_ROUTING_PATH_TOKEN); + BlobPath indexRoutingPath = basePath.add(INDEX_ROUTING_TABLE); return RemoteStoreEnums.PathType.HASHED_PREFIX.path( RemoteStorePathStrategy.PathInput.builder().basePath(indexRoutingPath).indexUUID("uuid").build(), RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64 diff --git a/server/src/test/java/org/opensearch/gateway/ReplicaShardBatchAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/ReplicaShardBatchAllocatorTests.java index 2e148c2bc8130..526a3990955b8 100644 --- a/server/src/test/java/org/opensearch/gateway/ReplicaShardBatchAllocatorTests.java +++ b/server/src/test/java/org/opensearch/gateway/ReplicaShardBatchAllocatorTests.java @@ -644,6 +644,25 @@ public void testDoNotCancelForBrokenNode() { assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED), empty()); } + public void testDoNotCancelForInactivePrimaryNode() { + RoutingAllocation allocation = oneInactivePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders(), null); + testBatchAllocator.addData( + node1, + null, + "MATCH", + null, + new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION) + ).addData(node2, randomSyncId(), null, new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); + + testBatchAllocator.processExistingRecoveries( + allocation, + Collections.singletonList(new ArrayList<>(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING))) + ); + + assertThat(allocation.routingNodesChanged(), equalTo(false)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED), empty()); + } + public void testAllocateUnassignedBatchThrottlingAllocationDeciderIsHonoured() throws InterruptedException { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); AllocationDeciders allocationDeciders = randomAllocationDeciders( @@ -872,6 +891,41 @@ private RoutingAllocation onePrimaryOnNode1And1ReplicaRecovering(AllocationDecid ); } + private RoutingAllocation oneInactivePrimaryOnNode1And1ReplicaRecovering(AllocationDeciders deciders, UnassignedInfo unassignedInfo) { + ShardRouting primaryShard = TestShardRouting.newShardRouting(shardId, node1.getId(), true, ShardRoutingState.INITIALIZING); + RoutingTable routingTable = RoutingTable.builder() + .add( + IndexRoutingTable.builder(shardId.getIndex()) + .addIndexShard( + new IndexShardRoutingTable.Builder(shardId).addShard(primaryShard) + .addShard( + TestShardRouting.newShardRouting( + shardId, + node2.getId(), + null, + false, + ShardRoutingState.INITIALIZING, + unassignedInfo + ) + ) + .build() + ) + ) + .build(); + ClusterState state = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .routingTable(routingTable) + .nodes(DiscoveryNodes.builder().add(node1).add(node2)) + .build(); + return new RoutingAllocation( + deciders, + new RoutingNodes(state, false), + state, + ClusterInfo.EMPTY, + SnapshotShardSizeInfo.EMPTY, + System.nanoTime() + ); + } + private RoutingAllocation onePrimaryOnNode1And1ReplicaRecovering(AllocationDeciders deciders) { return onePrimaryOnNode1And1ReplicaRecovering(deciders, new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null)); } diff --git a/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java index 152a6dba6c032..256161af1a3e2 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java @@ -13,7 +13,6 @@ import org.opensearch.cluster.metadata.IndexGraveyard; import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.metadata.WeightedRoutingMetadata; -import org.opensearch.cluster.routing.remote.InternalRemoteRoutingTableService; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; @@ -44,6 +43,7 @@ import static org.opensearch.gateway.remote.model.RemotePersistentSettingsMetadata.SETTING_METADATA; import static org.opensearch.gateway.remote.model.RemoteTemplatesMetadata.TEMPLATES_METADATA; import static org.opensearch.gateway.remote.model.RemoteTransientSettingsMetadata.TRANSIENT_SETTING_METADATA; +import static org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable.INDEX_ROUTING_METADATA_PREFIX; public class ClusterMetadataManifestTests extends OpenSearchTestCase { @@ -545,7 +545,7 @@ public void testClusterMetadataManifestXContentV2WithoutEphemeral() throws IOExc "test-index", "test-uuid", "routing-path", - InternalRemoteRoutingTableService.INDEX_ROUTING_METADATA_PREFIX + INDEX_ROUTING_METADATA_PREFIX ); ClusterMetadataManifest originalManifest = ClusterMetadataManifest.builder() .clusterTerm(1L) diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index 6cd9cbbf13848..ebd3488d06007 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -117,6 +117,7 @@ import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.TestClusterStateCustom1; import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.TestClusterStateCustom2; import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.TestClusterStateCustom3; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.CUSTOM_DELIMITER; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.FORMAT_PARAMS; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.getFormattedIndexFileName; @@ -126,7 +127,6 @@ import static org.opensearch.gateway.remote.model.RemoteClusterStateCustoms.CLUSTER_STATE_CUSTOM; import static org.opensearch.gateway.remote.model.RemoteCoordinationMetadata.COORDINATION_METADATA; import static org.opensearch.gateway.remote.model.RemoteCoordinationMetadata.COORDINATION_METADATA_FORMAT; -import static org.opensearch.gateway.remote.model.RemoteCustomMetadata.CUSTOM_DELIMITER; import static org.opensearch.gateway.remote.model.RemoteCustomMetadata.CUSTOM_METADATA; import static org.opensearch.gateway.remote.model.RemoteCustomMetadata.readFrom; import static org.opensearch.gateway.remote.model.RemoteDiscoveryNodes.DISCOVERY_NODES; @@ -144,6 +144,7 @@ import static org.opensearch.gateway.remote.model.RemoteTemplatesMetadata.TEMPLATES_METADATA_FORMAT; import static org.opensearch.gateway.remote.model.RemoteTemplatesMetadataTests.getTemplatesMetadata; import static org.opensearch.gateway.remote.model.RemoteTransientSettingsMetadata.TRANSIENT_SETTING_METADATA; +import static org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable.INDEX_ROUTING_METADATA_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; @@ -2603,7 +2604,7 @@ public void testWriteFullMetadataSuccessWithRoutingTable() throws IOException { "test-index", "index-uuid", "routing-filename", - InternalRemoteRoutingTableService.INDEX_ROUTING_METADATA_PREFIX + INDEX_ROUTING_METADATA_PREFIX ); final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() .indices(List.of(uploadedIndexMetadata)) @@ -2654,7 +2655,7 @@ public void testWriteFullMetadataInParallelSuccessWithRoutingTable() throws IOEx "test-index", "index-uuid", "routing-filename", - InternalRemoteRoutingTableService.INDEX_ROUTING_METADATA_PREFIX + INDEX_ROUTING_METADATA_PREFIX ); final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() @@ -2710,7 +2711,7 @@ public void testWriteIncrementalMetadataSuccessWithRoutingTable() throws IOExcep "test-index", "index-uuid", "routing-filename", - InternalRemoteRoutingTableService.INDEX_ROUTING_METADATA_PREFIX + INDEX_ROUTING_METADATA_PREFIX ); final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() .indices(List.of(uploadedIndexMetadata)) diff --git a/server/src/test/java/org/opensearch/gateway/remote/model/RemoteRoutingTableBlobStoreTests.java b/server/src/test/java/org/opensearch/gateway/remote/model/RemoteRoutingTableBlobStoreTests.java new file mode 100644 index 0000000000000..ea0f3264cfe4f --- /dev/null +++ b/server/src/test/java/org/opensearch/gateway/remote/model/RemoteRoutingTableBlobStoreTests.java @@ -0,0 +1,133 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.model; + +import org.opensearch.Version; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.compress.DeflateCompressor; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.Index; +import org.opensearch.gateway.remote.RemoteClusterStateUtils; +import org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable; +import org.opensearch.index.remote.RemoteStoreEnums; +import org.opensearch.index.remote.RemoteStorePathStrategy; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN; +import static org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable.INDEX_ROUTING_TABLE; +import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64; +import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_PREFIX; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class RemoteRoutingTableBlobStoreTests extends OpenSearchTestCase { + + private RemoteRoutingTableBlobStore remoteIndexRoutingTableStore; + ClusterSettings clusterSettings; + ThreadPool threadPool; + + @Before + public void setup() { + clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + BlobStoreTransferService blobStoreTransferService = mock(BlobStoreTransferService.class); + BlobStoreRepository blobStoreRepository = mock(BlobStoreRepository.class); + BlobPath blobPath = new BlobPath().add("base-path"); + when(blobStoreRepository.basePath()).thenReturn(blobPath); + + threadPool = new TestThreadPool(getClass().getName()); + this.remoteIndexRoutingTableStore = new RemoteRoutingTableBlobStore<>( + blobStoreTransferService, + blobStoreRepository, + "test-cluster", + threadPool, + ThreadPool.Names.REMOTE_STATE_READ, + clusterSettings + ); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdown(); + + } + + public void testRemoteRoutingTablePathTypeSetting() { + // Assert the default is HASHED_PREFIX + assertEquals(HASHED_PREFIX.toString(), remoteIndexRoutingTableStore.getPathTypeSetting().toString()); + + Settings newSettings = Settings.builder() + .put("cluster.remote_store.routing_table.path_type", RemoteStoreEnums.PathType.FIXED.toString()) + .build(); + clusterSettings.applySettings(newSettings); + assertEquals(RemoteStoreEnums.PathType.FIXED.toString(), remoteIndexRoutingTableStore.getPathTypeSetting().toString()); + } + + public void testRemoteRoutingTableHashAlgoSetting() { + // Assert the default is FNV_1A_BASE64 + assertEquals(FNV_1A_BASE64.toString(), remoteIndexRoutingTableStore.getPathHashAlgoSetting().toString()); + + Settings newSettings = Settings.builder() + .put("cluster.remote_store.routing_table.path_hash_algo", RemoteStoreEnums.PathHashAlgorithm.FNV_1A_COMPOSITE_1.toString()) + .build(); + clusterSettings.applySettings(newSettings); + assertEquals( + RemoteStoreEnums.PathHashAlgorithm.FNV_1A_COMPOSITE_1.toString(), + remoteIndexRoutingTableStore.getPathHashAlgoSetting().toString() + ); + } + + public void testGetBlobPathForUpload() { + + Index index = new Index("test-idx", "index-uuid"); + Settings idxSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build(); + + IndexMetadata indexMetadata = new IndexMetadata.Builder(index.getName()).settings(idxSettings) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + + IndexRoutingTable indexRoutingTable = new IndexRoutingTable.Builder(index).initializeAsNew(indexMetadata).build(); + + RemoteIndexRoutingTable remoteObjectForUpload = new RemoteIndexRoutingTable( + indexRoutingTable, + "cluster-uuid", + new DeflateCompressor(), + 2L, + 3L + ); + BlobPath blobPath = remoteIndexRoutingTableStore.getBlobPathForUpload(remoteObjectForUpload); + BlobPath expectedPath = HASHED_PREFIX.path( + RemoteStorePathStrategy.PathInput.builder() + .basePath( + new BlobPath().add("base-path") + .add(RemoteClusterStateUtils.encodeString("test-cluster")) + .add(CLUSTER_STATE_PATH_TOKEN) + .add("cluster-uuid") + .add(INDEX_ROUTING_TABLE) + ) + .indexUUID(index.getUUID()) + .build(), + FNV_1A_BASE64 + ); + assertEquals(expectedPath, blobPath); + } +} diff --git a/server/src/test/java/org/opensearch/gateway/remote/routingtable/IndexRoutingTableHeaderTests.java b/server/src/test/java/org/opensearch/gateway/remote/routingtable/IndexRoutingTableHeaderTests.java deleted file mode 100644 index a3f0ac36a40f1..0000000000000 --- a/server/src/test/java/org/opensearch/gateway/remote/routingtable/IndexRoutingTableHeaderTests.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.gateway.remote.routingtable; - -import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.BytesStreamInput; -import org.opensearch.test.OpenSearchTestCase; - -import java.io.IOException; - -public class IndexRoutingTableHeaderTests extends OpenSearchTestCase { - - public void testIndexRoutingTableHeader() throws IOException { - String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); - IndexRoutingTableHeader header = new IndexRoutingTableHeader(indexName); - try (BytesStreamOutput out = new BytesStreamOutput()) { - header.writeTo(out); - - BytesStreamInput in = new BytesStreamInput(out.bytes().toBytesRef().bytes); - IndexRoutingTableHeader headerRead = new IndexRoutingTableHeader(in); - assertEquals(indexName, headerRead.getIndexName()); - - } - } - -} diff --git a/server/src/test/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTableTests.java b/server/src/test/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTableTests.java index 72066d8afb45b..29d4ffa978851 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTableTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTableTests.java @@ -13,16 +13,136 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.RoutingTable; -import org.opensearch.cluster.routing.ShardRoutingState; -import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.compress.DeflateCompressor; +import org.opensearch.common.remote.BlobPathParameters; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.compress.NoneCompressor; +import org.opensearch.gateway.remote.ClusterMetadataManifest; +import org.opensearch.gateway.remote.RemoteClusterStateUtils; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; import java.io.IOException; -import java.util.concurrent.atomic.AtomicInteger; +import java.io.InputStream; +import java.util.List; + +import static org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable.INDEX_ROUTING_FILE; +import static org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable.INDEX_ROUTING_METADATA_PREFIX; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class RemoteIndexRoutingTableTests extends OpenSearchTestCase { - public void testRoutingTableInput() { + private static final String TEST_BLOB_NAME = "/test-path/test-blob-name"; + private static final String TEST_BLOB_PATH = "test-path"; + private static final String TEST_BLOB_FILE_NAME = "test-blob-name"; + private static final String INDEX_ROUTING_TABLE_TYPE = "test-index-routing-table"; + private static final long STATE_VERSION = 3L; + private static final long STATE_TERM = 2L; + private String clusterUUID; + private BlobStoreTransferService blobStoreTransferService; + private BlobStoreRepository blobStoreRepository; + private String clusterName; + private ClusterSettings clusterSettings; + private Compressor compressor; + private NamedWriteableRegistry namedWriteableRegistry; + private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + + @Before + public void setup() { + clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + this.clusterUUID = "test-cluster-uuid"; + this.blobStoreTransferService = mock(BlobStoreTransferService.class); + this.blobStoreRepository = mock(BlobStoreRepository.class); + BlobPath blobPath = new BlobPath().add("/path"); + when(blobStoreRepository.basePath()).thenReturn(blobPath); + when(blobStoreRepository.getCompressor()).thenReturn(new DeflateCompressor()); + compressor = new NoneCompressor(); + namedWriteableRegistry = writableRegistry(); + this.clusterName = "test-cluster-name"; + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdown(); + } + + public void testClusterUUID() { + String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); + int numberOfShards = randomIntBetween(1, 10); + int numberOfReplicas = randomIntBetween(1, 10); + Metadata metadata = Metadata.builder() + .put( + IndexMetadata.builder(indexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(numberOfShards) + .numberOfReplicas(numberOfReplicas) + ) + .build(); + + RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index(indexName)).build(); + + initialRoutingTable.getIndicesRouting().values().forEach(indexRoutingTable -> { + RemoteIndexRoutingTable remoteObjectForUpload = new RemoteIndexRoutingTable( + indexRoutingTable, + clusterUUID, + compressor, + STATE_TERM, + STATE_VERSION + ); + assertEquals(remoteObjectForUpload.clusterUUID(), clusterUUID); + + RemoteIndexRoutingTable remoteObjectForDownload = new RemoteIndexRoutingTable(TEST_BLOB_NAME, clusterUUID, compressor); + assertEquals(remoteObjectForDownload.clusterUUID(), clusterUUID); + }); + } + + public void testFullBlobName() { + String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); + int numberOfShards = randomIntBetween(1, 10); + int numberOfReplicas = randomIntBetween(1, 10); + Metadata metadata = Metadata.builder() + .put( + IndexMetadata.builder(indexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(numberOfShards) + .numberOfReplicas(numberOfReplicas) + ) + .build(); + + RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index(indexName)).build(); + + initialRoutingTable.getIndicesRouting().values().forEach(indexRoutingTable -> { + RemoteIndexRoutingTable remoteObjectForUpload = new RemoteIndexRoutingTable( + indexRoutingTable, + clusterUUID, + compressor, + STATE_TERM, + STATE_VERSION + ); + assertThat(remoteObjectForUpload.getFullBlobName(), nullValue()); + + RemoteIndexRoutingTable remoteObjectForDownload = new RemoteIndexRoutingTable(TEST_BLOB_NAME, clusterUUID, compressor); + assertThat(remoteObjectForDownload.getFullBlobName(), is(TEST_BLOB_NAME)); + }); + } + + public void testBlobFileName() { String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); int numberOfShards = randomIntBetween(1, 10); int numberOfReplicas = randomIntBetween(1, 10); @@ -37,51 +157,164 @@ public void testRoutingTableInput() { RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index(indexName)).build(); - initialRoutingTable.getIndicesRouting().values().forEach(indexShardRoutingTables -> { - RemoteIndexRoutingTable indexRouting = new RemoteIndexRoutingTable(indexShardRoutingTables); - try (BytesStreamOutput streamOutput = new BytesStreamOutput();) { - indexRouting.writeTo(streamOutput); - RemoteIndexRoutingTable remoteIndexRoutingTable = new RemoteIndexRoutingTable( - streamOutput.bytes().streamInput(), - metadata.index(indexName).getIndex() - ); - IndexRoutingTable indexRoutingTable = remoteIndexRoutingTable.getIndexRoutingTable(); - assertEquals(numberOfShards, indexRoutingTable.getShards().size()); - assertEquals(metadata.index(indexName).getIndex(), indexRoutingTable.getIndex()); - assertEquals( - numberOfShards * (1 + numberOfReplicas), - indexRoutingTable.shardsWithState(ShardRoutingState.UNASSIGNED).size() - ); + initialRoutingTable.getIndicesRouting().values().forEach(indexRoutingTable -> { + RemoteIndexRoutingTable remoteObjectForUpload = new RemoteIndexRoutingTable( + indexRoutingTable, + clusterUUID, + compressor, + STATE_TERM, + STATE_VERSION + ); + assertThat(remoteObjectForUpload.getBlobFileName(), nullValue()); + + RemoteIndexRoutingTable remoteObjectForDownload = new RemoteIndexRoutingTable(TEST_BLOB_NAME, clusterUUID, compressor); + assertThat(remoteObjectForDownload.getBlobFileName(), is(TEST_BLOB_FILE_NAME)); + }); + } + + public void testBlobPathTokens() { + String uploadedFile = "user/local/opensearch/routingTable"; + RemoteIndexRoutingTable remoteObjectForDownload = new RemoteIndexRoutingTable(uploadedFile, clusterUUID, compressor); + assertThat(remoteObjectForDownload.getBlobPathTokens(), is(new String[] { "user", "local", "opensearch", "routingTable" })); + } + + public void testBlobPathParameters() { + String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); + int numberOfShards = randomIntBetween(1, 10); + int numberOfReplicas = randomIntBetween(1, 10); + Metadata metadata = Metadata.builder() + .put( + IndexMetadata.builder(indexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(numberOfShards) + .numberOfReplicas(numberOfReplicas) + ) + .build(); + + RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index(indexName)).build(); + + initialRoutingTable.getIndicesRouting().values().forEach(indexRoutingTable -> { + RemoteIndexRoutingTable remoteObjectForUpload = new RemoteIndexRoutingTable( + indexRoutingTable, + clusterUUID, + compressor, + STATE_TERM, + STATE_VERSION + ); + assertThat(remoteObjectForUpload.getBlobFileName(), nullValue()); + + BlobPathParameters params = remoteObjectForUpload.getBlobPathParameters(); + assertThat(params.getPathTokens(), is(List.of(indexRoutingTable.getIndex().getUUID()))); + String expectedPrefix = INDEX_ROUTING_FILE; + assertThat(params.getFilePrefix(), is(expectedPrefix)); + }); + } + + public void testGenerateBlobFileName() { + String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); + int numberOfShards = randomIntBetween(1, 10); + int numberOfReplicas = randomIntBetween(1, 10); + Metadata metadata = Metadata.builder() + .put( + IndexMetadata.builder(indexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(numberOfShards) + .numberOfReplicas(numberOfReplicas) + ) + .build(); + + RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index(indexName)).build(); + + initialRoutingTable.getIndicesRouting().values().forEach(indexRoutingTable -> { + RemoteIndexRoutingTable remoteObjectForUpload = new RemoteIndexRoutingTable( + indexRoutingTable, + clusterUUID, + compressor, + STATE_TERM, + STATE_VERSION + ); + + String blobFileName = remoteObjectForUpload.generateBlobFileName(); + String[] nameTokens = blobFileName.split(RemoteClusterStateUtils.DELIMITER); + assertEquals(nameTokens[0], INDEX_ROUTING_FILE); + assertEquals(nameTokens[1], RemoteStoreUtils.invertLong(STATE_TERM)); + assertEquals(nameTokens[2], RemoteStoreUtils.invertLong(STATE_VERSION)); + assertThat(RemoteStoreUtils.invertLong(nameTokens[3]), lessThanOrEqualTo(System.currentTimeMillis())); + }); + } + + public void testGetUploadedMetadata() throws IOException { + String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); + int numberOfShards = randomIntBetween(1, 10); + int numberOfReplicas = randomIntBetween(1, 10); + Metadata metadata = Metadata.builder() + .put( + IndexMetadata.builder(indexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(numberOfShards) + .numberOfReplicas(numberOfReplicas) + ) + .build(); + + RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index(indexName)).build(); + + initialRoutingTable.getIndicesRouting().values().forEach(indexRoutingTable -> { + RemoteIndexRoutingTable remoteObjectForUpload = new RemoteIndexRoutingTable( + indexRoutingTable, + clusterUUID, + compressor, + STATE_TERM, + STATE_VERSION + ); + + assertThrows(AssertionError.class, remoteObjectForUpload::getUploadedMetadata); + + try (InputStream inputStream = remoteObjectForUpload.serialize()) { + remoteObjectForUpload.setFullBlobName(new BlobPath().add(TEST_BLOB_PATH)); + ClusterMetadataManifest.UploadedMetadata uploadedMetadata = remoteObjectForUpload.getUploadedMetadata(); + String expectedPrefix = INDEX_ROUTING_METADATA_PREFIX + indexRoutingTable.getIndex().getName(); + assertThat(uploadedMetadata.getComponent(), is(expectedPrefix)); + assertThat(uploadedMetadata.getUploadedFilename(), is(remoteObjectForUpload.getFullBlobName())); } catch (IOException e) { throw new RuntimeException(e); } }); } - public void testRoutingTableInputStreamWithInvalidIndex() { + public void testSerDe() throws IOException { + String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); + int numberOfShards = randomIntBetween(1, 10); + int numberOfReplicas = randomIntBetween(1, 10); Metadata metadata = Metadata.builder() - .put(IndexMetadata.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) - .put(IndexMetadata.builder("invalid-index").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) + .put( + IndexMetadata.builder(indexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(numberOfShards) + .numberOfReplicas(numberOfReplicas) + ) .build(); - RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); - AtomicInteger assertionError = new AtomicInteger(); - initialRoutingTable.getIndicesRouting().values().forEach(indexShardRoutingTables -> { - RemoteIndexRoutingTable indexRouting = new RemoteIndexRoutingTable(indexShardRoutingTables); - try (BytesStreamOutput streamOutput = new BytesStreamOutput()) { - indexRouting.writeTo(streamOutput); - RemoteIndexRoutingTable remoteIndexRoutingTable = new RemoteIndexRoutingTable( - streamOutput.bytes().streamInput(), - metadata.index("invalid-index").getIndex() - ); - } catch (AssertionError e) { - assertionError.getAndIncrement(); + RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index(indexName)).build(); + + initialRoutingTable.getIndicesRouting().values().forEach(indexRoutingTable -> { + RemoteIndexRoutingTable remoteObjectForUpload = new RemoteIndexRoutingTable( + indexRoutingTable, + clusterUUID, + compressor, + STATE_TERM, + STATE_VERSION + ); + + assertThrows(AssertionError.class, remoteObjectForUpload::getUploadedMetadata); + + try (InputStream inputStream = remoteObjectForUpload.serialize()) { + remoteObjectForUpload.setFullBlobName(BlobPath.cleanPath()); + assertThat(inputStream.available(), greaterThan(0)); + IndexRoutingTable readIndexRoutingTable = remoteObjectForUpload.deserialize(inputStream); + assertEquals(readIndexRoutingTable, indexRoutingTable); } catch (IOException e) { throw new RuntimeException(e); } }); - - assertEquals(1, assertionError.get()); } - } diff --git a/server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java b/server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java index 6c6d26656e4de..31df9a49bebfb 100644 --- a/server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java +++ b/server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java @@ -12,12 +12,7 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.lucene99.Lucene99Codec; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.SortedNumericDocValuesField; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.BaseDocValuesFormatTestCase; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.common.Rounding; import org.opensearch.index.codec.composite.Composite99Codec; @@ -31,7 +26,6 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.StarTreeMapper; -import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -77,34 +71,4 @@ private static StarTreeField getStarTreeField(List d1Cale return new StarTreeField("starTree", dims, metrics, config); } - - public void testStarTreeDocValues() throws IOException { - Directory directory = newDirectory(); - IndexWriterConfig conf = newIndexWriterConfig(null); - conf.setMergePolicy(newLogMergePolicy()); - RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf); - Document doc = new Document(); - doc.add(new SortedNumericDocValuesField("sndv", 1)); - doc.add(new SortedNumericDocValuesField("dv", 1)); - doc.add(new SortedNumericDocValuesField("field", 1)); - iw.addDocument(doc); - doc.add(new SortedNumericDocValuesField("sndv", 1)); - doc.add(new SortedNumericDocValuesField("dv", 1)); - doc.add(new SortedNumericDocValuesField("field", 1)); - iw.addDocument(doc); - iw.forceMerge(1); - doc.add(new SortedNumericDocValuesField("sndv", 2)); - doc.add(new SortedNumericDocValuesField("dv", 2)); - doc.add(new SortedNumericDocValuesField("field", 2)); - iw.addDocument(doc); - doc.add(new SortedNumericDocValuesField("sndv", 2)); - doc.add(new SortedNumericDocValuesField("dv", 2)); - doc.add(new SortedNumericDocValuesField("field", 2)); - iw.addDocument(doc); - iw.forceMerge(1); - iw.close(); - - // TODO : validate star tree structures that got created - directory.close(); - } } diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregatorTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregatorTests.java new file mode 100644 index 0000000000000..e30e203406a6c --- /dev/null +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregatorTests.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.aggregators; + +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.startree.aggregators.numerictype.StarTreeNumericType; +import org.opensearch.test.OpenSearchTestCase; + +public class CountValueAggregatorTests extends OpenSearchTestCase { + private final CountValueAggregator aggregator = new CountValueAggregator(); + + public void testGetAggregationType() { + assertEquals(MetricStat.COUNT.getTypeName(), aggregator.getAggregationType().getTypeName()); + } + + public void testGetAggregatedValueType() { + assertEquals(CountValueAggregator.VALUE_AGGREGATOR_TYPE, aggregator.getAggregatedValueType()); + } + + public void testGetInitialAggregatedValueForSegmentDocValue() { + assertEquals(1L, aggregator.getInitialAggregatedValueForSegmentDocValue(randomLong(), StarTreeNumericType.LONG), 0.0); + } + + public void testMergeAggregatedValueAndSegmentValue() { + assertEquals(3L, aggregator.mergeAggregatedValueAndSegmentValue(2L, 3L, StarTreeNumericType.LONG), 0.0); + } + + public void testMergeAggregatedValues() { + assertEquals(5L, aggregator.mergeAggregatedValues(2L, 3L), 0.0); + } + + public void testGetInitialAggregatedValue() { + assertEquals(3L, aggregator.getInitialAggregatedValue(3L), 0.0); + } + + public void testGetMaxAggregatedValueByteSize() { + assertEquals(Long.BYTES, aggregator.getMaxAggregatedValueByteSize()); + } + + public void testToLongValue() { + assertEquals(3L, aggregator.toLongValue(3L), 0.0); + } + + public void testToStarTreeNumericTypeValue() { + assertEquals(3L, aggregator.toStarTreeNumericTypeValue(3L, StarTreeNumericType.LONG), 0.0); + } +} diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/MetricAggregatorInfoTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/MetricAggregatorInfoTests.java new file mode 100644 index 0000000000000..d08f637a3f0a9 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/MetricAggregatorInfoTests.java @@ -0,0 +1,123 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.aggregators; + +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.fielddata.IndexNumericFieldData; +import org.opensearch.test.OpenSearchTestCase; + +public class MetricAggregatorInfoTests extends OpenSearchTestCase { + + public void testConstructor() { + MetricAggregatorInfo pair = new MetricAggregatorInfo( + MetricStat.SUM, + "column1", + "star_tree_field", + IndexNumericFieldData.NumericType.DOUBLE, + null + ); + assertEquals(MetricStat.SUM, pair.getMetricStat()); + assertEquals("column1", pair.getField()); + } + + public void testCountStarConstructor() { + MetricAggregatorInfo pair = new MetricAggregatorInfo( + MetricStat.COUNT, + "anything", + "star_tree_field", + IndexNumericFieldData.NumericType.DOUBLE, + null + ); + assertEquals(MetricStat.COUNT, pair.getMetricStat()); + assertEquals("anything", pair.getField()); + } + + public void testToFieldName() { + MetricAggregatorInfo pair = new MetricAggregatorInfo( + MetricStat.SUM, + "column2", + "star_tree_field", + IndexNumericFieldData.NumericType.DOUBLE, + null + ); + assertEquals("star_tree_field_column2_sum", pair.toFieldName()); + } + + public void testEquals() { + MetricAggregatorInfo pair1 = new MetricAggregatorInfo( + MetricStat.SUM, + "column1", + "star_tree_field", + IndexNumericFieldData.NumericType.DOUBLE, + null + ); + MetricAggregatorInfo pair2 = new MetricAggregatorInfo( + MetricStat.SUM, + "column1", + "star_tree_field", + IndexNumericFieldData.NumericType.DOUBLE, + null + ); + assertEquals(pair1, pair2); + assertNotEquals( + pair1, + new MetricAggregatorInfo(MetricStat.COUNT, "column1", "star_tree_field", IndexNumericFieldData.NumericType.DOUBLE, null) + ); + assertNotEquals( + pair1, + new MetricAggregatorInfo(MetricStat.SUM, "column2", "star_tree_field", IndexNumericFieldData.NumericType.DOUBLE, null) + ); + } + + public void testHashCode() { + MetricAggregatorInfo pair1 = new MetricAggregatorInfo( + MetricStat.SUM, + "column1", + "star_tree_field", + IndexNumericFieldData.NumericType.DOUBLE, + null + ); + MetricAggregatorInfo pair2 = new MetricAggregatorInfo( + MetricStat.SUM, + "column1", + "star_tree_field", + IndexNumericFieldData.NumericType.DOUBLE, + null + ); + assertEquals(pair1.hashCode(), pair2.hashCode()); + } + + public void testCompareTo() { + MetricAggregatorInfo pair1 = new MetricAggregatorInfo( + MetricStat.SUM, + "column1", + "star_tree_field", + IndexNumericFieldData.NumericType.DOUBLE, + null + ); + MetricAggregatorInfo pair2 = new MetricAggregatorInfo( + MetricStat.SUM, + "column2", + "star_tree_field", + IndexNumericFieldData.NumericType.DOUBLE, + null + ); + MetricAggregatorInfo pair3 = new MetricAggregatorInfo( + MetricStat.COUNT, + "column1", + "star_tree_field", + IndexNumericFieldData.NumericType.DOUBLE, + null + ); + assertTrue(pair1.compareTo(pair2) < 0); + assertTrue(pair2.compareTo(pair1) > 0); + assertTrue(pair1.compareTo(pair3) > 0); + assertTrue(pair3.compareTo(pair1) < 0); + } +} diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/SumValueAggregatorTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/SumValueAggregatorTests.java new file mode 100644 index 0000000000000..3fb627e7cd434 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/SumValueAggregatorTests.java @@ -0,0 +1,72 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.aggregators; + +import org.apache.lucene.util.NumericUtils; +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.startree.aggregators.numerictype.StarTreeNumericType; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +public class SumValueAggregatorTests extends OpenSearchTestCase { + + private SumValueAggregator aggregator; + + @Before + public void setup() { + aggregator = new SumValueAggregator(); + } + + public void testGetAggregationType() { + assertEquals(MetricStat.SUM.getTypeName(), aggregator.getAggregationType().getTypeName()); + } + + public void testGetAggregatedValueType() { + assertEquals(SumValueAggregator.VALUE_AGGREGATOR_TYPE, aggregator.getAggregatedValueType()); + } + + public void testGetInitialAggregatedValueForSegmentDocValue() { + assertEquals(1.0, aggregator.getInitialAggregatedValueForSegmentDocValue(1L, StarTreeNumericType.LONG), 0.0); + assertThrows( + NullPointerException.class, + () -> aggregator.getInitialAggregatedValueForSegmentDocValue(null, StarTreeNumericType.DOUBLE) + ); + } + + public void testMergeAggregatedValueAndSegmentValue() { + aggregator.getInitialAggregatedValue(2.0); + assertEquals(5.0, aggregator.mergeAggregatedValueAndSegmentValue(2.0, 3L, StarTreeNumericType.LONG), 0.0); + } + + public void testMergeAggregatedValueAndSegmentValue_nullSegmentDocValue() { + aggregator.getInitialAggregatedValue(2.0); + assertThrows(NullPointerException.class, () -> aggregator.mergeAggregatedValueAndSegmentValue(2.0, null, StarTreeNumericType.LONG)); + } + + public void testMergeAggregatedValues() { + aggregator.getInitialAggregatedValue(3.0); + assertEquals(5.0, aggregator.mergeAggregatedValues(2.0, 3.0), 0.0); + } + + public void testGetInitialAggregatedValue() { + assertEquals(3.14, aggregator.getInitialAggregatedValue(3.14), 0.0); + } + + public void testGetMaxAggregatedValueByteSize() { + assertEquals(Double.BYTES, aggregator.getMaxAggregatedValueByteSize()); + } + + public void testToLongValue() { + assertEquals(NumericUtils.doubleToSortableLong(3.14), aggregator.toLongValue(3.14), 0.0); + } + + public void testToStarTreeNumericTypeValue() { + assertEquals(NumericUtils.sortableLongToDouble(3L), aggregator.toStarTreeNumericTypeValue(3L, StarTreeNumericType.DOUBLE), 0.0); + } +} diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactoryTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactoryTests.java new file mode 100644 index 0000000000000..ce61ab839cc61 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactoryTests.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.aggregators; + +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.startree.aggregators.numerictype.StarTreeNumericType; +import org.opensearch.test.OpenSearchTestCase; + +public class ValueAggregatorFactoryTests extends OpenSearchTestCase { + + public void testGetValueAggregatorForSumType() { + ValueAggregator aggregator = ValueAggregatorFactory.getValueAggregator(MetricStat.SUM); + assertNotNull(aggregator); + assertEquals(SumValueAggregator.class, aggregator.getClass()); + } + + public void testGetAggregatedValueTypeForSumType() { + StarTreeNumericType starTreeNumericType = ValueAggregatorFactory.getAggregatedValueType(MetricStat.SUM); + assertEquals(SumValueAggregator.VALUE_AGGREGATOR_TYPE, starTreeNumericType); + } +} diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilderTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilderTests.java new file mode 100644 index 0000000000000..b78130e72aba1 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilderTests.java @@ -0,0 +1,216 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.builder; + +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.SegmentInfo; +import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.index.VectorEncoding; +import org.apache.lucene.index.VectorSimilarityFunction; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.InfoStream; +import org.apache.lucene.util.Version; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.Metric; +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.NumericDimension; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; +import org.opensearch.index.compositeindex.datacube.startree.aggregators.MetricAggregatorInfo; +import org.opensearch.index.fielddata.IndexNumericFieldData; +import org.opensearch.index.mapper.ContentPath; +import org.opensearch.index.mapper.DocumentMapper; +import org.opensearch.index.mapper.Mapper; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.MappingLookup; +import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class BaseStarTreeBuilderTests extends OpenSearchTestCase { + + private static BaseStarTreeBuilder builder; + private static MapperService mapperService; + private static List dimensionsOrder; + private static List fields = List.of( + "field1", + "field2", + "field3", + "field4", + "field5", + "field6", + "field7", + "field8", + "field9", + "field10" + ); + private static List metrics; + private static Directory directory; + private static FieldInfo[] fieldsInfo; + private static SegmentWriteState state; + private static StarTreeField starTreeField; + + @BeforeClass + public static void setup() throws IOException { + + dimensionsOrder = List.of( + new NumericDimension("field1"), + new NumericDimension("field3"), + new NumericDimension("field5"), + new NumericDimension("field8") + ); + metrics = List.of(new Metric("field2", List.of(MetricStat.SUM)), new Metric("field4", List.of(MetricStat.SUM))); + + starTreeField = new StarTreeField( + "test", + dimensionsOrder, + metrics, + new StarTreeFieldConfiguration(1, Set.of("field8"), StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP) + ); + + DocValuesProducer docValuesProducer = mock(DocValuesProducer.class); + directory = newFSDirectory(createTempDir()); + SegmentInfo segmentInfo = new SegmentInfo( + directory, + Version.LATEST, + Version.LUCENE_9_11_0, + "test_segment", + 5, + false, + false, + new Lucene99Codec(), + new HashMap<>(), + UUID.randomUUID().toString().substring(0, 16).getBytes(StandardCharsets.UTF_8), + new HashMap<>(), + null + ); + + fieldsInfo = new FieldInfo[fields.size()]; + Map fieldProducerMap = new HashMap<>(); + for (int i = 0; i < fieldsInfo.length; i++) { + fieldsInfo[i] = new FieldInfo( + fields.get(i), + i, + false, + false, + true, + IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS, + DocValuesType.SORTED_NUMERIC, + -1, + Collections.emptyMap(), + 0, + 0, + 0, + 0, + VectorEncoding.FLOAT32, + VectorSimilarityFunction.EUCLIDEAN, + false, + false + ); + fieldProducerMap.put(fields.get(i), docValuesProducer); + } + FieldInfos fieldInfos = new FieldInfos(fieldsInfo); + state = new SegmentWriteState(InfoStream.getDefault(), segmentInfo.dir, segmentInfo, fieldInfos, null, newIOContext(random())); + + mapperService = mock(MapperService.class); + DocumentMapper documentMapper = mock(DocumentMapper.class); + when(mapperService.documentMapper()).thenReturn(documentMapper); + Settings settings = Settings.builder().put(settings(org.opensearch.Version.CURRENT).build()).build(); + NumberFieldMapper numberFieldMapper1 = new NumberFieldMapper.Builder("field2", NumberFieldMapper.NumberType.DOUBLE, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + NumberFieldMapper numberFieldMapper2 = new NumberFieldMapper.Builder("field4", NumberFieldMapper.NumberType.DOUBLE, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + MappingLookup fieldMappers = new MappingLookup( + Set.of(numberFieldMapper1, numberFieldMapper2), + Collections.emptyList(), + Collections.emptyList(), + 0, + null + ); + when(documentMapper.mappers()).thenReturn(fieldMappers); + + builder = new BaseStarTreeBuilder(starTreeField, fieldProducerMap, state, mapperService) { + @Override + public void appendStarTreeDocument(StarTreeDocument starTreeDocument) throws IOException {} + + @Override + public StarTreeDocument getStarTreeDocument(int docId) throws IOException { + return null; + } + + @Override + public List getStarTreeDocuments() { + return List.of(); + } + + @Override + public Long getDimensionValue(int docId, int dimensionId) throws IOException { + return 0L; + } + + @Override + public Iterator sortAndAggregateStarTreeDocuments() throws IOException { + return null; + } + + @Override + public Iterator generateStarTreeDocumentsForStarNode(int startDocId, int endDocId, int dimensionId) + throws IOException { + return null; + } + }; + } + + public void test_generateMetricAggregatorInfos() throws IOException { + List metricAggregatorInfos = builder.generateMetricAggregatorInfos(mapperService, state); + List expectedMetricAggregatorInfos = List.of( + new MetricAggregatorInfo(MetricStat.SUM, "field2", starTreeField.getName(), IndexNumericFieldData.NumericType.DOUBLE, null), + new MetricAggregatorInfo(MetricStat.SUM, "field4", starTreeField.getName(), IndexNumericFieldData.NumericType.DOUBLE, null) + ); + assertEquals(metricAggregatorInfos, expectedMetricAggregatorInfos); + } + + public void test_reduceStarTreeDocuments() { + StarTreeDocument starTreeDocument1 = new StarTreeDocument(new Long[] { 1L, 3L, 5L, 8L }, new Double[] { 4.0, 8.0 }); + StarTreeDocument starTreeDocument2 = new StarTreeDocument(new Long[] { 1L, 3L, 5L, 8L }, new Double[] { 10.0, 6.0 }); + + StarTreeDocument expectedeMergedStarTreeDocument = new StarTreeDocument(new Long[] { 1L, 3L, 5L, 8L }, new Double[] { 14.0, 14.0 }); + StarTreeDocument mergedStarTreeDocument = builder.reduceStarTreeDocuments(null, starTreeDocument1); + StarTreeDocument resultStarTreeDocument = builder.reduceStarTreeDocuments(mergedStarTreeDocument, starTreeDocument2); + + assertEquals(resultStarTreeDocument.metrics[0], expectedeMergedStarTreeDocument.metrics[0]); + assertEquals(resultStarTreeDocument.metrics[1], expectedeMergedStarTreeDocument.metrics[1]); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + directory.close(); + } +} diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilderTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilderTests.java new file mode 100644 index 0000000000000..4e107e78d27be --- /dev/null +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilderTests.java @@ -0,0 +1,706 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.builder; + +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.SegmentInfo; +import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.index.VectorEncoding; +import org.apache.lucene.index.VectorSimilarityFunction; +import org.apache.lucene.sandbox.document.HalfFloatPoint; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.InfoStream; +import org.apache.lucene.util.NumericUtils; +import org.apache.lucene.util.Version; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.Metric; +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.NumericDimension; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; +import org.opensearch.index.mapper.ContentPath; +import org.opensearch.index.mapper.DocumentMapper; +import org.opensearch.index.mapper.Mapper; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.MappingLookup; +import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class OnHeapStarTreeBuilderTests extends OpenSearchTestCase { + + private OnHeapStarTreeBuilder builder; + private MapperService mapperService; + private List dimensionsOrder; + private List fields = List.of(); + private List metrics; + private Directory directory; + private FieldInfo[] fieldsInfo; + private StarTreeField compositeField; + private Map fieldProducerMap; + private SegmentWriteState writeState; + + @Before + public void setup() throws IOException { + fields = List.of("field1", "field2", "field3", "field4", "field5", "field6", "field7", "field8", "field9", "field10"); + + dimensionsOrder = List.of( + new NumericDimension("field1"), + new NumericDimension("field3"), + new NumericDimension("field5"), + new NumericDimension("field8") + ); + metrics = List.of( + new Metric("field2", List.of(MetricStat.SUM)), + new Metric("field4", List.of(MetricStat.SUM)), + new Metric("field6", List.of(MetricStat.COUNT)) + ); + + DocValuesProducer docValuesProducer = mock(DocValuesProducer.class); + + compositeField = new StarTreeField( + "test", + dimensionsOrder, + metrics, + new StarTreeFieldConfiguration(1, Set.of("field8"), StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP) + ); + directory = newFSDirectory(createTempDir()); + SegmentInfo segmentInfo = new SegmentInfo( + directory, + Version.LATEST, + Version.LUCENE_9_11_0, + "test_segment", + 5, + false, + false, + new Lucene99Codec(), + new HashMap<>(), + UUID.randomUUID().toString().substring(0, 16).getBytes(StandardCharsets.UTF_8), + new HashMap<>(), + null + ); + + fieldsInfo = new FieldInfo[fields.size()]; + fieldProducerMap = new HashMap<>(); + for (int i = 0; i < fieldsInfo.length; i++) { + fieldsInfo[i] = new FieldInfo( + fields.get(i), + i, + false, + false, + true, + IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS, + DocValuesType.SORTED_NUMERIC, + -1, + Collections.emptyMap(), + 0, + 0, + 0, + 0, + VectorEncoding.FLOAT32, + VectorSimilarityFunction.EUCLIDEAN, + false, + false + ); + fieldProducerMap.put(fields.get(i), docValuesProducer); + } + FieldInfos fieldInfos = new FieldInfos(fieldsInfo); + writeState = new SegmentWriteState(InfoStream.getDefault(), segmentInfo.dir, segmentInfo, fieldInfos, null, newIOContext(random())); + + mapperService = mock(MapperService.class); + DocumentMapper documentMapper = mock(DocumentMapper.class); + when(mapperService.documentMapper()).thenReturn(documentMapper); + Settings settings = Settings.builder().put(settings(org.opensearch.Version.CURRENT).build()).build(); + NumberFieldMapper numberFieldMapper1 = new NumberFieldMapper.Builder("field2", NumberFieldMapper.NumberType.DOUBLE, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + NumberFieldMapper numberFieldMapper2 = new NumberFieldMapper.Builder("field4", NumberFieldMapper.NumberType.DOUBLE, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + NumberFieldMapper numberFieldMapper3 = new NumberFieldMapper.Builder("field6", NumberFieldMapper.NumberType.DOUBLE, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + MappingLookup fieldMappers = new MappingLookup( + Set.of(numberFieldMapper1, numberFieldMapper2, numberFieldMapper3), + Collections.emptyList(), + Collections.emptyList(), + 0, + null + ); + when(documentMapper.mappers()).thenReturn(fieldMappers); + builder = new OnHeapStarTreeBuilder(compositeField, fieldProducerMap, writeState, mapperService); + } + + public void test_sortAndAggregateStarTreeDocuments() throws IOException { + + int noOfStarTreeDocuments = 5; + StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + + starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 12.0, 10.0, randomDouble() }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, 6.0, randomDouble() }); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 14.0, 12.0, randomDouble() }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, 4.0, randomDouble() }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, 16.0, randomDouble() }); + + List inorderStarTreeDocuments = List.of( + new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 34.0, 3L }) + ); + Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); + + StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + for (int i = 0; i < noOfStarTreeDocuments; i++) { + long metric1 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[0]); + long metric2 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[1]); + long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); + segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1, metric2, metric3 }); + } + + Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateStarTreeDocuments(segmentStarTreeDocuments); + int numOfAggregatedDocuments = 0; + while (segmentStarTreeDocumentIterator.hasNext() && expectedStarTreeDocumentIterator.hasNext()) { + StarTreeDocument resultStarTreeDocument = segmentStarTreeDocumentIterator.next(); + StarTreeDocument expectedStarTreeDocument = expectedStarTreeDocumentIterator.next(); + + assertEquals(expectedStarTreeDocument.dimensions[0], resultStarTreeDocument.dimensions[0]); + assertEquals(expectedStarTreeDocument.dimensions[1], resultStarTreeDocument.dimensions[1]); + assertEquals(expectedStarTreeDocument.dimensions[2], resultStarTreeDocument.dimensions[2]); + assertEquals(expectedStarTreeDocument.dimensions[3], resultStarTreeDocument.dimensions[3]); + assertEquals(expectedStarTreeDocument.metrics[0], resultStarTreeDocument.metrics[0]); + assertEquals(expectedStarTreeDocument.metrics[1], resultStarTreeDocument.metrics[1]); + assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); + + numOfAggregatedDocuments++; + } + + assertEquals(inorderStarTreeDocuments.size(), numOfAggregatedDocuments); + + } + + public void test_sortAndAggregateStarTreeDocuments_nullMetric() throws IOException { + + int noOfStarTreeDocuments = 5; + StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + + starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 12.0, 10.0, randomDouble() }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, 6.0, randomDouble() }); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 14.0, 12.0, randomDouble() }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, 4.0, randomDouble() }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, null, randomDouble() }); + StarTreeDocument expectedStarTreeDocument = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 21.0, 14.0, 2.0 }); + + StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + for (int i = 0; i < noOfStarTreeDocuments; i++) { + Long metric1 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[0]); + Long metric2 = starTreeDocuments[i].metrics[1] != null + ? NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[1]) + : null; + Long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); + segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Object[] { metric1, metric2, metric3 }); + } + + Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateStarTreeDocuments(segmentStarTreeDocuments); + + StarTreeDocument resultStarTreeDocument = segmentStarTreeDocumentIterator.next(); + assertEquals(expectedStarTreeDocument.dimensions[0], resultStarTreeDocument.dimensions[0]); + assertEquals(expectedStarTreeDocument.dimensions[1], resultStarTreeDocument.dimensions[1]); + assertEquals(expectedStarTreeDocument.dimensions[2], resultStarTreeDocument.dimensions[2]); + assertEquals(expectedStarTreeDocument.dimensions[3], resultStarTreeDocument.dimensions[3]); + assertEquals(expectedStarTreeDocument.metrics[0], resultStarTreeDocument.metrics[0]); + assertEquals(expectedStarTreeDocument.metrics[1], resultStarTreeDocument.metrics[1]); + + assertThrows( + "Null metric should have resulted in IllegalStateException", + IllegalStateException.class, + segmentStarTreeDocumentIterator::next + ); + + } + + public void test_sortAndAggregateStarTreeDocument_longMaxAndLongMinDimensions() throws IOException { + + int noOfStarTreeDocuments = 5; + StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + + starTreeDocuments[0] = new StarTreeDocument(new Long[] { Long.MIN_VALUE, 4L, 3L, 4L }, new Double[] { 12.0, 10.0, randomDouble() }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, Long.MAX_VALUE }, new Double[] { 10.0, 6.0, randomDouble() }); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, Long.MAX_VALUE }, new Double[] { 14.0, 12.0, randomDouble() }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { Long.MIN_VALUE, 4L, 3L, 4L }, new Double[] { 9.0, 4.0, randomDouble() }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, Long.MAX_VALUE }, new Double[] { 11.0, 16.0, randomDouble() }); + + List inorderStarTreeDocuments = List.of( + new StarTreeDocument(new Long[] { Long.MIN_VALUE, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, Long.MAX_VALUE }, new Object[] { 35.0, 34.0, 3L }) + ); + Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); + + StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + for (int i = 0; i < noOfStarTreeDocuments; i++) { + long metric1 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[0]); + long metric2 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[1]); + long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); + segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1, metric2, metric3 }); + } + + Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateStarTreeDocuments(segmentStarTreeDocuments); + int numOfAggregatedDocuments = 0; + while (segmentStarTreeDocumentIterator.hasNext() && expectedStarTreeDocumentIterator.hasNext()) { + StarTreeDocument resultStarTreeDocument = segmentStarTreeDocumentIterator.next(); + StarTreeDocument expectedStarTreeDocument = expectedStarTreeDocumentIterator.next(); + + assertEquals(expectedStarTreeDocument.dimensions[0], resultStarTreeDocument.dimensions[0]); + assertEquals(expectedStarTreeDocument.dimensions[1], resultStarTreeDocument.dimensions[1]); + assertEquals(expectedStarTreeDocument.dimensions[2], resultStarTreeDocument.dimensions[2]); + assertEquals(expectedStarTreeDocument.dimensions[3], resultStarTreeDocument.dimensions[3]); + assertEquals(expectedStarTreeDocument.metrics[0], resultStarTreeDocument.metrics[0]); + assertEquals(expectedStarTreeDocument.metrics[1], resultStarTreeDocument.metrics[1]); + assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); + + numOfAggregatedDocuments++; + } + + assertEquals(inorderStarTreeDocuments.size(), numOfAggregatedDocuments); + + } + + public void test_sortAndAggregateStarTreeDocument_DoubleMaxAndDoubleMinMetrics() throws IOException { + + int noOfStarTreeDocuments = 5; + StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + + starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { Double.MAX_VALUE, 10.0, randomDouble() }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, 6.0, randomDouble() }); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 14.0, Double.MIN_VALUE, randomDouble() }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, 4.0, randomDouble() }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, 16.0, randomDouble() }); + + List inorderStarTreeDocuments = List.of( + new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { Double.MAX_VALUE + 9, 14.0, 2L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, Double.MIN_VALUE + 22, 3L }) + ); + Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); + + StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + for (int i = 0; i < noOfStarTreeDocuments; i++) { + long metric1 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[0]); + long metric2 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[1]); + long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); + segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1, metric2, metric3 }); + } + + Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateStarTreeDocuments(segmentStarTreeDocuments); + int numOfAggregatedDocuments = 0; + while (segmentStarTreeDocumentIterator.hasNext() && expectedStarTreeDocumentIterator.hasNext()) { + StarTreeDocument resultStarTreeDocument = segmentStarTreeDocumentIterator.next(); + StarTreeDocument expectedStarTreeDocument = expectedStarTreeDocumentIterator.next(); + + assertEquals(expectedStarTreeDocument.dimensions[0], resultStarTreeDocument.dimensions[0]); + assertEquals(expectedStarTreeDocument.dimensions[1], resultStarTreeDocument.dimensions[1]); + assertEquals(expectedStarTreeDocument.dimensions[2], resultStarTreeDocument.dimensions[2]); + assertEquals(expectedStarTreeDocument.dimensions[3], resultStarTreeDocument.dimensions[3]); + assertEquals(expectedStarTreeDocument.metrics[0], resultStarTreeDocument.metrics[0]); + assertEquals(expectedStarTreeDocument.metrics[1], resultStarTreeDocument.metrics[1]); + assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); + + numOfAggregatedDocuments++; + } + + assertEquals(inorderStarTreeDocuments.size(), numOfAggregatedDocuments); + + } + + public void test_build_halfFloatMetrics() throws IOException { + + mapperService = mock(MapperService.class); + DocumentMapper documentMapper = mock(DocumentMapper.class); + when(mapperService.documentMapper()).thenReturn(documentMapper); + Settings settings = Settings.builder().put(settings(org.opensearch.Version.CURRENT).build()).build(); + NumberFieldMapper numberFieldMapper1 = new NumberFieldMapper.Builder("field2", NumberFieldMapper.NumberType.HALF_FLOAT, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + NumberFieldMapper numberFieldMapper2 = new NumberFieldMapper.Builder("field4", NumberFieldMapper.NumberType.HALF_FLOAT, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + NumberFieldMapper numberFieldMapper3 = new NumberFieldMapper.Builder("field6", NumberFieldMapper.NumberType.HALF_FLOAT, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + MappingLookup fieldMappers = new MappingLookup( + Set.of(numberFieldMapper1, numberFieldMapper2, numberFieldMapper3), + Collections.emptyList(), + Collections.emptyList(), + 0, + null + ); + when(documentMapper.mappers()).thenReturn(fieldMappers); + builder = new OnHeapStarTreeBuilder(compositeField, fieldProducerMap, writeState, mapperService); + + int noOfStarTreeDocuments = 5; + StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + + starTreeDocuments[0] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new HalfFloatPoint[] { new HalfFloatPoint("hf1", 12), new HalfFloatPoint("hf6", 10), new HalfFloatPoint("field6", 10) } + ); + starTreeDocuments[1] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new HalfFloatPoint[] { new HalfFloatPoint("hf2", 10), new HalfFloatPoint("hf7", 6), new HalfFloatPoint("field6", 10) } + ); + starTreeDocuments[2] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new HalfFloatPoint[] { new HalfFloatPoint("hf3", 14), new HalfFloatPoint("hf8", 12), new HalfFloatPoint("field6", 10) } + ); + starTreeDocuments[3] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new HalfFloatPoint[] { new HalfFloatPoint("hf4", 9), new HalfFloatPoint("hf9", 4), new HalfFloatPoint("field6", 10) } + ); + starTreeDocuments[4] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new HalfFloatPoint[] { new HalfFloatPoint("hf5", 11), new HalfFloatPoint("hf10", 16), new HalfFloatPoint("field6", 10) } + ); + + StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + for (int i = 0; i < noOfStarTreeDocuments; i++) { + long metric1 = HalfFloatPoint.halfFloatToSortableShort( + ((HalfFloatPoint) starTreeDocuments[i].metrics[0]).numericValue().floatValue() + ); + long metric2 = HalfFloatPoint.halfFloatToSortableShort( + ((HalfFloatPoint) starTreeDocuments[i].metrics[1]).numericValue().floatValue() + ); + long metric3 = HalfFloatPoint.halfFloatToSortableShort( + ((HalfFloatPoint) starTreeDocuments[i].metrics[2]).numericValue().floatValue() + ); + segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1, metric2, metric3 }); + } + + Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateStarTreeDocuments(segmentStarTreeDocuments); + builder.build(segmentStarTreeDocumentIterator); + + List resultStarTreeDocuments = builder.getStarTreeDocuments(); + assertEquals(7, resultStarTreeDocuments.size()); + + Iterator expectedStarTreeDocumentIterator = getExpectedStarTreeDocumentIterator(); + assertStarTreeDocuments(resultStarTreeDocuments, expectedStarTreeDocumentIterator); + } + + public void test_build_floatMetrics() throws IOException { + + mapperService = mock(MapperService.class); + DocumentMapper documentMapper = mock(DocumentMapper.class); + when(mapperService.documentMapper()).thenReturn(documentMapper); + Settings settings = Settings.builder().put(settings(org.opensearch.Version.CURRENT).build()).build(); + NumberFieldMapper numberFieldMapper1 = new NumberFieldMapper.Builder("field2", NumberFieldMapper.NumberType.FLOAT, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + NumberFieldMapper numberFieldMapper2 = new NumberFieldMapper.Builder("field4", NumberFieldMapper.NumberType.FLOAT, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + NumberFieldMapper numberFieldMapper3 = new NumberFieldMapper.Builder("field6", NumberFieldMapper.NumberType.FLOAT, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + MappingLookup fieldMappers = new MappingLookup( + Set.of(numberFieldMapper1, numberFieldMapper2, numberFieldMapper3), + Collections.emptyList(), + Collections.emptyList(), + 0, + null + ); + when(documentMapper.mappers()).thenReturn(fieldMappers); + builder = new OnHeapStarTreeBuilder(compositeField, fieldProducerMap, writeState, mapperService); + + int noOfStarTreeDocuments = 5; + StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + + starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Float[] { 12.0F, 10.0F, randomFloat() }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Float[] { 10.0F, 6.0F, randomFloat() }); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Float[] { 14.0F, 12.0F, randomFloat() }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Float[] { 9.0F, 4.0F, randomFloat() }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Float[] { 11.0F, 16.0F, randomFloat() }); + + StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + for (int i = 0; i < noOfStarTreeDocuments; i++) { + long metric1 = NumericUtils.floatToSortableInt((Float) starTreeDocuments[i].metrics[0]); + long metric2 = NumericUtils.floatToSortableInt((Float) starTreeDocuments[i].metrics[1]); + long metric3 = NumericUtils.floatToSortableInt((Float) starTreeDocuments[i].metrics[2]); + segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1, metric2, metric3 }); + } + + Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateStarTreeDocuments(segmentStarTreeDocuments); + builder.build(segmentStarTreeDocumentIterator); + + List resultStarTreeDocuments = builder.getStarTreeDocuments(); + assertEquals(7, resultStarTreeDocuments.size()); + + Iterator expectedStarTreeDocumentIterator = getExpectedStarTreeDocumentIterator(); + assertStarTreeDocuments(resultStarTreeDocuments, expectedStarTreeDocumentIterator); + } + + public void test_build_longMetrics() throws IOException { + + mapperService = mock(MapperService.class); + DocumentMapper documentMapper = mock(DocumentMapper.class); + when(mapperService.documentMapper()).thenReturn(documentMapper); + Settings settings = Settings.builder().put(settings(org.opensearch.Version.CURRENT).build()).build(); + NumberFieldMapper numberFieldMapper1 = new NumberFieldMapper.Builder("field2", NumberFieldMapper.NumberType.LONG, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + NumberFieldMapper numberFieldMapper2 = new NumberFieldMapper.Builder("field4", NumberFieldMapper.NumberType.LONG, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + NumberFieldMapper numberFieldMapper3 = new NumberFieldMapper.Builder("field6", NumberFieldMapper.NumberType.LONG, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + MappingLookup fieldMappers = new MappingLookup( + Set.of(numberFieldMapper1, numberFieldMapper2, numberFieldMapper3), + Collections.emptyList(), + Collections.emptyList(), + 0, + null + ); + when(documentMapper.mappers()).thenReturn(fieldMappers); + builder = new OnHeapStarTreeBuilder(compositeField, fieldProducerMap, writeState, mapperService); + + int noOfStarTreeDocuments = 5; + StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + + starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Long[] { 12L, 10L, randomLong() }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Long[] { 10L, 6L, randomLong() }); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Long[] { 14L, 12L, randomLong() }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Long[] { 9L, 4L, randomLong() }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Long[] { 11L, 16L, randomLong() }); + + StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + for (int i = 0; i < noOfStarTreeDocuments; i++) { + long metric1 = (Long) starTreeDocuments[i].metrics[0]; + long metric2 = (Long) starTreeDocuments[i].metrics[1]; + long metric3 = (Long) starTreeDocuments[i].metrics[2]; + segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1, metric2, metric3 }); + } + + Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateStarTreeDocuments(segmentStarTreeDocuments); + builder.build(segmentStarTreeDocumentIterator); + + List resultStarTreeDocuments = builder.getStarTreeDocuments(); + assertEquals(7, resultStarTreeDocuments.size()); + + Iterator expectedStarTreeDocumentIterator = getExpectedStarTreeDocumentIterator(); + assertStarTreeDocuments(resultStarTreeDocuments, expectedStarTreeDocumentIterator); + } + + private static Iterator getExpectedStarTreeDocumentIterator() { + List expectedStarTreeDocuments = List.of( + new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 34.0, 3L }), + new StarTreeDocument(new Long[] { -1L, 4L, 2L, 1L }, new Object[] { 35.0, 34.0, 3L }), + new StarTreeDocument(new Long[] { -1L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L }), + new StarTreeDocument(new Long[] { -1L, 4L, -1L, 1L }, new Object[] { 35.0, 34.0, 3L }), + new StarTreeDocument(new Long[] { -1L, 4L, -1L, 4L }, new Object[] { 21.0, 14.0, 2L }), + new StarTreeDocument(new Long[] { -1L, 4L, -1L, -1L }, new Object[] { 56.0, 48.0, 5L }) + ); + return expectedStarTreeDocuments.iterator(); + } + + public void test_build() throws IOException { + + int noOfStarTreeDocuments = 5; + StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + + starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 12.0, 10.0, randomDouble() }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, 6.0, randomDouble() }); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 14.0, 12.0, randomDouble() }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, 4.0, randomDouble() }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, 16.0, randomDouble() }); + + StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + for (int i = 0; i < noOfStarTreeDocuments; i++) { + long metric1 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[0]); + long metric2 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[1]); + long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); + segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1, metric2, metric3 }); + } + + Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateStarTreeDocuments(segmentStarTreeDocuments); + builder.build(segmentStarTreeDocumentIterator); + + List resultStarTreeDocuments = builder.getStarTreeDocuments(); + assertEquals(7, resultStarTreeDocuments.size()); + + Iterator expectedStarTreeDocumentIterator = getExpectedStarTreeDocumentIterator(); + assertStarTreeDocuments(resultStarTreeDocuments, expectedStarTreeDocumentIterator); + } + + private void assertStarTreeDocuments( + List resultStarTreeDocuments, + Iterator expectedStarTreeDocumentIterator + ) { + Iterator resultStarTreeDocumentIterator = resultStarTreeDocuments.iterator(); + while (resultStarTreeDocumentIterator.hasNext() && expectedStarTreeDocumentIterator.hasNext()) { + StarTreeDocument resultStarTreeDocument = resultStarTreeDocumentIterator.next(); + StarTreeDocument expectedStarTreeDocument = expectedStarTreeDocumentIterator.next(); + + assertEquals(expectedStarTreeDocument.dimensions[0], resultStarTreeDocument.dimensions[0]); + assertEquals(expectedStarTreeDocument.dimensions[1], resultStarTreeDocument.dimensions[1]); + assertEquals(expectedStarTreeDocument.dimensions[2], resultStarTreeDocument.dimensions[2]); + assertEquals(expectedStarTreeDocument.dimensions[3], resultStarTreeDocument.dimensions[3]); + assertEquals(expectedStarTreeDocument.metrics[0], resultStarTreeDocument.metrics[0]); + assertEquals(expectedStarTreeDocument.metrics[1], resultStarTreeDocument.metrics[1]); + assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); + } + } + + public void test_build_starTreeDataset() throws IOException { + + fields = List.of("fieldC", "fieldB", "fieldL", "fieldI"); + + dimensionsOrder = List.of(new NumericDimension("fieldC"), new NumericDimension("fieldB"), new NumericDimension("fieldL")); + metrics = List.of(new Metric("fieldI", List.of(MetricStat.SUM))); + + DocValuesProducer docValuesProducer = mock(DocValuesProducer.class); + + compositeField = new StarTreeField( + "test", + dimensionsOrder, + metrics, + new StarTreeFieldConfiguration(1, Set.of(), StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP) + ); + SegmentInfo segmentInfo = new SegmentInfo( + directory, + Version.LATEST, + Version.LUCENE_9_11_0, + "test_segment", + 7, + false, + false, + new Lucene99Codec(), + new HashMap<>(), + UUID.randomUUID().toString().substring(0, 16).getBytes(StandardCharsets.UTF_8), + new HashMap<>(), + null + ); + + fieldsInfo = new FieldInfo[fields.size()]; + fieldProducerMap = new HashMap<>(); + for (int i = 0; i < fieldsInfo.length; i++) { + fieldsInfo[i] = new FieldInfo( + fields.get(i), + i, + false, + false, + true, + IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS, + DocValuesType.SORTED_NUMERIC, + -1, + Collections.emptyMap(), + 0, + 0, + 0, + 0, + VectorEncoding.FLOAT32, + VectorSimilarityFunction.EUCLIDEAN, + false, + false + ); + fieldProducerMap.put(fields.get(i), docValuesProducer); + } + FieldInfos fieldInfos = new FieldInfos(fieldsInfo); + writeState = new SegmentWriteState(InfoStream.getDefault(), segmentInfo.dir, segmentInfo, fieldInfos, null, newIOContext(random())); + + mapperService = mock(MapperService.class); + DocumentMapper documentMapper = mock(DocumentMapper.class); + when(mapperService.documentMapper()).thenReturn(documentMapper); + Settings settings = Settings.builder().put(settings(org.opensearch.Version.CURRENT).build()).build(); + NumberFieldMapper numberFieldMapper1 = new NumberFieldMapper.Builder("fieldI", NumberFieldMapper.NumberType.DOUBLE, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + MappingLookup fieldMappers = new MappingLookup( + Set.of(numberFieldMapper1), + Collections.emptyList(), + Collections.emptyList(), + 0, + null + ); + when(documentMapper.mappers()).thenReturn(fieldMappers); + builder = new OnHeapStarTreeBuilder(compositeField, fieldProducerMap, writeState, mapperService); + + int noOfStarTreeDocuments = 7; + StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + starTreeDocuments[0] = new StarTreeDocument(new Long[] { 1L, 11L, 21L }, new Double[] { 400.0 }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { 1L, 12L, 22L }, new Double[] { 200.0 }); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { 2L, 13L, 23L }, new Double[] { 300.0 }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 13L, 21L }, new Double[] { 100.0 }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 11L, 21L }, new Double[] { 600.0 }); + starTreeDocuments[5] = new StarTreeDocument(new Long[] { 3L, 12L, 23L }, new Double[] { 200.0 }); + starTreeDocuments[6] = new StarTreeDocument(new Long[] { 3L, 12L, 21L }, new Double[] { 400.0 }); + + StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + for (int i = 0; i < noOfStarTreeDocuments; i++) { + long metric1 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[0]); + segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1 }); + } + + Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateStarTreeDocuments(segmentStarTreeDocuments); + builder.build(segmentStarTreeDocumentIterator); + + List resultStarTreeDocuments = builder.getStarTreeDocuments(); + List expectedStarTreeDocuments = List.of( + new StarTreeDocument(new Long[] { 1L, 11L, 21L }, new Object[] { 400.0 }), + new StarTreeDocument(new Long[] { 1L, 12L, 22L }, new Object[] { 200.0 }), + new StarTreeDocument(new Long[] { 2L, 13L, 21L }, new Object[] { 100.0 }), + new StarTreeDocument(new Long[] { 2L, 13L, 23L }, new Object[] { 300.0 }), + new StarTreeDocument(new Long[] { 3L, 11L, 21L }, new Object[] { 600.0 }), + new StarTreeDocument(new Long[] { 3L, 12L, 21L }, new Object[] { 400.0 }), + new StarTreeDocument(new Long[] { 3L, 12L, 23L }, new Object[] { 200.0 }), + new StarTreeDocument(new Long[] { -1L, 11L, 21L }, new Object[] { 1000.0 }), + new StarTreeDocument(new Long[] { -1L, 12L, 21L }, new Object[] { 400.0 }), + new StarTreeDocument(new Long[] { -1L, 12L, 22L }, new Object[] { 200.0 }), + new StarTreeDocument(new Long[] { -1L, 12L, 23L }, new Object[] { 200.0 }), + new StarTreeDocument(new Long[] { -1L, 13L, 21L }, new Object[] { 100.0 }), + new StarTreeDocument(new Long[] { -1L, 13L, 23L }, new Object[] { 300.0 }), + new StarTreeDocument(new Long[] { -1L, -1L, 21L }, new Object[] { 1500.0 }), + new StarTreeDocument(new Long[] { -1L, -1L, 22L }, new Object[] { 200.0 }), + new StarTreeDocument(new Long[] { -1L, -1L, 23L }, new Object[] { 500.0 }), + new StarTreeDocument(new Long[] { -1L, -1L, -1L }, new Object[] { 2200.0 }), + new StarTreeDocument(new Long[] { -1L, 12L, -1L }, new Object[] { 800.0 }), + new StarTreeDocument(new Long[] { -1L, 13L, -1L }, new Object[] { 400.0 }), + new StarTreeDocument(new Long[] { 1L, -1L, 21L }, new Object[] { 400.0 }), + new StarTreeDocument(new Long[] { 1L, -1L, 22L }, new Object[] { 200.0 }), + new StarTreeDocument(new Long[] { 1L, -1L, -1L }, new Object[] { 600.0 }), + new StarTreeDocument(new Long[] { 2L, 13L, -1L }, new Object[] { 400.0 }), + new StarTreeDocument(new Long[] { 3L, -1L, 21L }, new Object[] { 1000.0 }), + new StarTreeDocument(new Long[] { 3L, -1L, 23L }, new Object[] { 200.0 }), + new StarTreeDocument(new Long[] { 3L, -1L, -1L }, new Object[] { 1200.0 }), + new StarTreeDocument(new Long[] { 3L, 12L, -1L }, new Object[] { 600.0 }) + ); + + Iterator expectedStarTreeDocumentIterator = expectedStarTreeDocuments.iterator(); + Iterator resultStarTreeDocumentIterator = resultStarTreeDocuments.iterator(); + while (resultStarTreeDocumentIterator.hasNext() && expectedStarTreeDocumentIterator.hasNext()) { + StarTreeDocument resultStarTreeDocument = resultStarTreeDocumentIterator.next(); + StarTreeDocument expectedStarTreeDocument = expectedStarTreeDocumentIterator.next(); + + assertEquals(expectedStarTreeDocument.dimensions[0], resultStarTreeDocument.dimensions[0]); + assertEquals(expectedStarTreeDocument.dimensions[1], resultStarTreeDocument.dimensions[1]); + assertEquals(expectedStarTreeDocument.dimensions[2], resultStarTreeDocument.dimensions[2]); + assertEquals(expectedStarTreeDocument.metrics[0], resultStarTreeDocument.metrics[0]); + } + + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + directory.close(); + } +} diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeDocValuesIteratorAdapterTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeDocValuesIteratorAdapterTests.java new file mode 100644 index 0000000000000..9c2621401faa4 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeDocValuesIteratorAdapterTests.java @@ -0,0 +1,139 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.builder; + +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.index.VectorEncoding; +import org.apache.lucene.index.VectorSimilarityFunction; +import org.apache.lucene.search.DocIdSetIterator; +import org.opensearch.index.compositeindex.datacube.startree.utils.SequentialDocValuesIterator; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Collections; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class StarTreeDocValuesIteratorAdapterTests extends OpenSearchTestCase { + + private StarTreeDocValuesIteratorAdapter adapter; + + @Override + public void setUp() throws Exception { + super.setUp(); + adapter = new StarTreeDocValuesIteratorAdapter(); + } + + public void testGetDocValuesIterator() throws IOException { + DocValuesProducer mockProducer = mock(DocValuesProducer.class); + SortedNumericDocValues mockSortedNumericDocValues = mock(SortedNumericDocValues.class); + + when(mockProducer.getSortedNumeric(any())).thenReturn(mockSortedNumericDocValues); + + SequentialDocValuesIterator iterator = adapter.getDocValuesIterator(DocValuesType.SORTED_NUMERIC, any(), mockProducer); + + assertNotNull(iterator); + assertEquals(mockSortedNumericDocValues, iterator.getDocIdSetIterator()); + } + + public void testGetDocValuesIteratorWithUnsupportedType() { + DocValuesProducer mockProducer = mock(DocValuesProducer.class); + FieldInfo fieldInfo = new FieldInfo( + "random_field", + 0, + false, + false, + true, + IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS, + DocValuesType.SORTED_NUMERIC, + -1, + Collections.emptyMap(), + 0, + 0, + 0, + 0, + VectorEncoding.FLOAT32, + VectorSimilarityFunction.EUCLIDEAN, + false, + false + ); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> { + adapter.getDocValuesIterator(DocValuesType.BINARY, fieldInfo, mockProducer); + }); + + assertEquals("Unsupported DocValuesType: BINARY", exception.getMessage()); + } + + public void testGetNextValue() throws IOException { + SortedNumericDocValues mockSortedNumericDocValues = mock(SortedNumericDocValues.class); + SequentialDocValuesIterator iterator = new SequentialDocValuesIterator(mockSortedNumericDocValues); + iterator.setDocId(1); + when(mockSortedNumericDocValues.nextValue()).thenReturn(42L); + + Long nextValue = adapter.getNextValue(iterator, 1); + + assertEquals(Long.valueOf(42L), nextValue); + assertEquals(Long.valueOf(42L), iterator.getDocValue()); + } + + public void testGetNextValueWithInvalidDocId() { + SortedNumericDocValues mockSortedNumericDocValues = mock(SortedNumericDocValues.class); + SequentialDocValuesIterator iterator = new SequentialDocValuesIterator(mockSortedNumericDocValues); + iterator.setDocId(DocIdSetIterator.NO_MORE_DOCS); + + IllegalStateException exception = expectThrows(IllegalStateException.class, () -> { adapter.getNextValue(iterator, 1); }); + + assertEquals("invalid doc id to fetch the next value", exception.getMessage()); + } + + public void testGetNextValueWithUnsupportedIterator() { + DocIdSetIterator mockIterator = mock(DocIdSetIterator.class); + SequentialDocValuesIterator iterator = new SequentialDocValuesIterator(mockIterator); + + IllegalStateException exception = expectThrows(IllegalStateException.class, () -> { adapter.getNextValue(iterator, 1); }); + + assertEquals("Unsupported Iterator: " + mockIterator.toString(), exception.getMessage()); + } + + public void testNextDoc() throws IOException { + SortedNumericDocValues mockSortedNumericDocValues = mock(SortedNumericDocValues.class); + SequentialDocValuesIterator iterator = new SequentialDocValuesIterator(mockSortedNumericDocValues); + when(mockSortedNumericDocValues.nextDoc()).thenReturn(2, 3, DocIdSetIterator.NO_MORE_DOCS); + when(mockSortedNumericDocValues.nextValue()).thenReturn(42L, 32L); + + int nextDocId = adapter.nextDoc(iterator, 1); + assertEquals(2, nextDocId); + assertEquals(Long.valueOf(42L), adapter.getNextValue(iterator, nextDocId)); + + nextDocId = adapter.nextDoc(iterator, 2); + assertEquals(3, nextDocId); + when(mockSortedNumericDocValues.nextValue()).thenReturn(42L, 32L); + + } + + public void testNextDoc_noMoreDocs() throws IOException { + SortedNumericDocValues mockSortedNumericDocValues = mock(SortedNumericDocValues.class); + SequentialDocValuesIterator iterator = new SequentialDocValuesIterator(mockSortedNumericDocValues); + when(mockSortedNumericDocValues.nextDoc()).thenReturn(2, DocIdSetIterator.NO_MORE_DOCS); + when(mockSortedNumericDocValues.nextValue()).thenReturn(42L, 32L); + + int nextDocId = adapter.nextDoc(iterator, 1); + assertEquals(2, nextDocId); + assertEquals(Long.valueOf(42L), adapter.getNextValue(iterator, nextDocId)); + + assertThrows(IllegalStateException.class, () -> adapter.nextDoc(iterator, 2)); + + } +} diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeValuesIteratorFactoryTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeValuesIteratorFactoryTests.java new file mode 100644 index 0000000000000..1aba67533d52e --- /dev/null +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeValuesIteratorFactoryTests.java @@ -0,0 +1,131 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.builder; + +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.index.VectorEncoding; +import org.apache.lucene.index.VectorSimilarityFunction; +import org.apache.lucene.search.DocIdSetIterator; +import org.opensearch.index.compositeindex.datacube.startree.utils.SequentialDocValuesIterator; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.util.Collections; + +import org.mockito.Mockito; + +import static org.mockito.Mockito.when; + +public class StarTreeValuesIteratorFactoryTests extends OpenSearchTestCase { + + private static StarTreeDocValuesIteratorAdapter starTreeDocValuesIteratorAdapter; + private static FieldInfo mockFieldInfo; + + @BeforeClass + public static void setup() { + starTreeDocValuesIteratorAdapter = new StarTreeDocValuesIteratorAdapter(); + mockFieldInfo = new FieldInfo( + "field", + 1, + false, + false, + true, + IndexOptions.NONE, + DocValuesType.NONE, + -1, + Collections.emptyMap(), + 0, + 0, + 0, + 0, + VectorEncoding.FLOAT32, + VectorSimilarityFunction.EUCLIDEAN, + false, + false + ); + } + + public void testCreateIterator_SortedNumeric() throws IOException { + DocValuesProducer producer = Mockito.mock(DocValuesProducer.class); + SortedNumericDocValues iterator = Mockito.mock(SortedNumericDocValues.class); + when(producer.getSortedNumeric(mockFieldInfo)).thenReturn(iterator); + SequentialDocValuesIterator result = starTreeDocValuesIteratorAdapter.getDocValuesIterator( + DocValuesType.SORTED_NUMERIC, + mockFieldInfo, + producer + ); + assertEquals(iterator.getClass(), result.getDocIdSetIterator().getClass()); + } + + public void testCreateIterator_UnsupportedType() { + DocValuesProducer producer = Mockito.mock(DocValuesProducer.class); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> { + starTreeDocValuesIteratorAdapter.getDocValuesIterator(DocValuesType.BINARY, mockFieldInfo, producer); + }); + assertEquals("Unsupported DocValuesType: BINARY", exception.getMessage()); + } + + public void testGetNextValue_SortedNumeric() throws IOException { + SortedNumericDocValues iterator = Mockito.mock(SortedNumericDocValues.class); + when(iterator.nextDoc()).thenReturn(0); + when(iterator.nextValue()).thenReturn(123L); + SequentialDocValuesIterator sequentialDocValuesIterator = new SequentialDocValuesIterator(iterator); + sequentialDocValuesIterator.getDocIdSetIterator().nextDoc(); + long result = starTreeDocValuesIteratorAdapter.getNextValue(sequentialDocValuesIterator, 0); + assertEquals(123L, result); + } + + public void testGetNextValue_UnsupportedIterator() { + DocIdSetIterator iterator = Mockito.mock(DocIdSetIterator.class); + SequentialDocValuesIterator sequentialDocValuesIterator = new SequentialDocValuesIterator(iterator); + + IllegalStateException exception = expectThrows(IllegalStateException.class, () -> { + starTreeDocValuesIteratorAdapter.getNextValue(sequentialDocValuesIterator, 0); + }); + assertEquals("Unsupported Iterator: " + iterator.toString(), exception.getMessage()); + } + + public void testNextDoc() throws IOException { + SortedNumericDocValues iterator = Mockito.mock(SortedNumericDocValues.class); + SequentialDocValuesIterator sequentialDocValuesIterator = new SequentialDocValuesIterator(iterator); + when(iterator.nextDoc()).thenReturn(5); + + int result = starTreeDocValuesIteratorAdapter.nextDoc(sequentialDocValuesIterator, 5); + assertEquals(5, result); + } + + public void test_multipleCoordinatedDocumentReader() throws IOException { + SortedNumericDocValues iterator1 = Mockito.mock(SortedNumericDocValues.class); + SortedNumericDocValues iterator2 = Mockito.mock(SortedNumericDocValues.class); + + SequentialDocValuesIterator sequentialDocValuesIterator1 = new SequentialDocValuesIterator(iterator1); + SequentialDocValuesIterator sequentialDocValuesIterator2 = new SequentialDocValuesIterator(iterator2); + + when(iterator1.nextDoc()).thenReturn(0); + when(iterator2.nextDoc()).thenReturn(1); + + when(iterator1.nextValue()).thenReturn(9L); + when(iterator2.nextValue()).thenReturn(9L); + + starTreeDocValuesIteratorAdapter.nextDoc(sequentialDocValuesIterator1, 0); + starTreeDocValuesIteratorAdapter.nextDoc(sequentialDocValuesIterator2, 0); + assertEquals(0, sequentialDocValuesIterator1.getDocId()); + assertEquals(9L, (long) sequentialDocValuesIterator1.getDocValue()); + assertNotEquals(0, sequentialDocValuesIterator2.getDocId()); + assertEquals(1, sequentialDocValuesIterator2.getDocId()); + assertEquals(9L, (long) sequentialDocValuesIterator2.getDocValue()); + + } + +} diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilderTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilderTests.java new file mode 100644 index 0000000000000..518c6729c2e1a --- /dev/null +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilderTests.java @@ -0,0 +1,132 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.builder; + +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.SegmentInfo; +import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.InfoStream; +import org.apache.lucene.util.Version; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.StarTreeMapper; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.UUID; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.when; + +public class StarTreesBuilderTests extends OpenSearchTestCase { + + private MapperService mapperService; + private SegmentWriteState segmentWriteState; + private DocValuesProducer docValuesProducer; + private StarTreeMapper.StarTreeFieldType starTreeFieldType; + private StarTreeField starTreeField; + private Map fieldProducerMap; + private Directory directory; + + public void setUp() throws Exception { + super.setUp(); + mapperService = mock(MapperService.class); + directory = newFSDirectory(createTempDir()); + SegmentInfo segmentInfo = new SegmentInfo( + directory, + Version.LATEST, + Version.LUCENE_9_11_0, + "test_segment", + 5, + false, + false, + new Lucene99Codec(), + new HashMap<>(), + UUID.randomUUID().toString().substring(0, 16).getBytes(StandardCharsets.UTF_8), + new HashMap<>(), + null + ); + FieldInfos fieldInfos = new FieldInfos(new FieldInfo[0]); + segmentWriteState = new SegmentWriteState( + InfoStream.getDefault(), + segmentInfo.dir, + segmentInfo, + fieldInfos, + null, + newIOContext(random()) + ); + docValuesProducer = mock(DocValuesProducer.class); + StarTreeFieldConfiguration starTreeFieldConfiguration = new StarTreeFieldConfiguration( + 1, + new HashSet<>(), + StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP + ); + starTreeField = new StarTreeField("star_tree", new ArrayList<>(), new ArrayList<>(), starTreeFieldConfiguration); + starTreeFieldType = new StarTreeMapper.StarTreeFieldType("star_tree", starTreeField); + fieldProducerMap = new HashMap<>(); + fieldProducerMap.put("field1", docValuesProducer); + } + + public void test_buildWithNoStarTreeFields() throws IOException { + when(mapperService.getCompositeFieldTypes()).thenReturn(new HashSet<>()); + + StarTreesBuilder starTreesBuilder = new StarTreesBuilder(fieldProducerMap, segmentWriteState, mapperService); + starTreesBuilder.build(); + + verifyNoInteractions(docValuesProducer); + } + + public void test_getStarTreeBuilder() throws IOException { + when(mapperService.getCompositeFieldTypes()).thenReturn(Set.of(starTreeFieldType)); + StarTreesBuilder starTreesBuilder = new StarTreesBuilder(fieldProducerMap, segmentWriteState, mapperService); + StarTreeBuilder starTreeBuilder = starTreesBuilder.getStarTreeBuilder(starTreeField, fieldProducerMap, segmentWriteState, mapperService); + assertTrue(starTreeBuilder instanceof OnHeapStarTreeBuilder); + } + + public void test_getStarTreeBuilder_illegalArgument() { + when(mapperService.getCompositeFieldTypes()).thenReturn(Set.of(starTreeFieldType)); + StarTreeFieldConfiguration starTreeFieldConfiguration = new StarTreeFieldConfiguration(1, new HashSet<>(), StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP); + StarTreeField starTreeField = new StarTreeField("star_tree", new ArrayList<>(), new ArrayList<>(), starTreeFieldConfiguration); + StarTreesBuilder starTreesBuilder = new StarTreesBuilder(fieldProducerMap, segmentWriteState, mapperService); + assertThrows(IllegalArgumentException.class, () -> starTreesBuilder.getStarTreeBuilder(starTreeField, fieldProducerMap, segmentWriteState, mapperService)); + } + + public void test_closeWithNoStarTreeFields() throws IOException { + StarTreeFieldConfiguration starTreeFieldConfiguration = new StarTreeFieldConfiguration( + 1, + new HashSet<>(), + StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP + ); + StarTreeField starTreeField = new StarTreeField("star_tree", new ArrayList<>(), new ArrayList<>(), starTreeFieldConfiguration); + starTreeFieldType = new StarTreeMapper.StarTreeFieldType("star_tree", starTreeField); + when(mapperService.getCompositeFieldTypes()).thenReturn(Set.of(starTreeFieldType)); + StarTreesBuilder starTreesBuilder = new StarTreesBuilder(fieldProducerMap, segmentWriteState, mapperService); + starTreesBuilder.close(); + + verifyNoInteractions(docValuesProducer); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + directory.close(); + } +} diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIteratorTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIteratorTests.java new file mode 100644 index 0000000000000..76b612e3677f7 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIteratorTests.java @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.utils; + +import org.apache.lucene.index.SortedNumericDocValues; +import org.opensearch.index.fielddata.AbstractNumericDocValues; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class SequentialDocValuesIteratorTests extends OpenSearchTestCase { + + public void test_sequentialDocValuesIterator() { + SequentialDocValuesIterator sequentialDocValuesIterator = new SequentialDocValuesIterator(new AbstractNumericDocValues() { + @Override + public long longValue() throws IOException { + return 0; + } + + @Override + public boolean advanceExact(int i) throws IOException { + return false; + } + + @Override + public int docID() { + return 0; + } + }); + + assertTrue(sequentialDocValuesIterator.getDocIdSetIterator() instanceof AbstractNumericDocValues); + assertEquals(sequentialDocValuesIterator.getDocId(), 0); + } + + public void test_sequentialDocValuesIterator_default() { + SequentialDocValuesIterator sequentialDocValuesIterator = new SequentialDocValuesIterator(); + assertTrue(sequentialDocValuesIterator.getDocIdSetIterator() instanceof SortedNumericDocValues); + } + +} diff --git a/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldMapperTests.java index 65dd3b6447663..e9d0b6d826ade 100644 --- a/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldMapperTests.java @@ -105,6 +105,14 @@ public void testMissingDefaultIndexMapper() throws Exception { assertThat(e.getMessage(), containsString("Field [field] is missing required parameter [value]")); } + public void testBuilderToXContent() throws IOException { + ConstantKeywordFieldMapper.Builder builder = new ConstantKeywordFieldMapper.Builder("name", "value1"); + XContentBuilder xContentBuilder = JsonXContent.contentBuilder().startObject(); + builder.toXContent(xContentBuilder, false); + xContentBuilder.endObject(); + assertEquals("{\"value\":\"value1\"}", xContentBuilder.toString()); + } + private final SourceToParse source(CheckedConsumer build) throws IOException { XContentBuilder builder = JsonXContent.contentBuilder().startObject(); build.accept(builder); diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java index 15915ee431972..ec48032df4a15 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java @@ -9,15 +9,29 @@ package org.opensearch.index.remote; import org.opensearch.Version; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.TestShardRouting; +import org.opensearch.common.UUIDs; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.support.PlainBlobMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.IndexShardTestUtils; import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.translog.transfer.TranslogTransferMetadata; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.test.OpenSearchTestCase; @@ -28,11 +42,15 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.UUID; import java.util.stream.Collectors; import static org.opensearch.cluster.metadata.IndexMetadata.REMOTE_STORE_CUSTOM_KEY; +import static org.opensearch.index.remote.RemoteMigrationIndexMetadataUpdaterTests.createIndexMetadataWithDocrepSettings; import static org.opensearch.index.remote.RemoteStoreUtils.URL_BASE64_CHARSET; import static org.opensearch.index.remote.RemoteStoreUtils.determineTranslogMetadataEnabled; +import static org.opensearch.index.remote.RemoteStoreUtils.finalizeMigration; +import static org.opensearch.index.remote.RemoteStoreUtils.isSwitchToStrictCompatibilityMode; import static org.opensearch.index.remote.RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding; import static org.opensearch.index.remote.RemoteStoreUtils.longToUrlBase64; import static org.opensearch.index.remote.RemoteStoreUtils.urlBase64ToLong; @@ -42,6 +60,9 @@ import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX; import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR; import static org.opensearch.index.translog.transfer.TranslogTransferMetadata.METADATA_SEPARATOR; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; public class RemoteStoreUtilsTests extends OpenSearchTestCase { @@ -398,4 +419,122 @@ private static Map getCustomDataMap(int option) { ); } + public void testFinalizeMigrationWithAllRemoteNodes() { + String migratedIndex = "migrated-index"; + Settings mockSettings = Settings.builder().put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "strict").build(); + DiscoveryNode remoteNode1 = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + getRemoteStoreNodeAttributes(), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + DiscoveryNode remoteNode2 = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + getRemoteStoreNodeAttributes(), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder() + .add(remoteNode1) + .localNodeId(remoteNode1.getId()) + .add(remoteNode2) + .localNodeId(remoteNode2.getId()) + .build(); + Metadata docrepIdxMetadata = createIndexMetadataWithDocrepSettings(migratedIndex); + assertDocrepSettingsApplied(docrepIdxMetadata.index(migratedIndex)); + Metadata remoteIndexMd = Metadata.builder(docrepIdxMetadata).persistentSettings(mockSettings).build(); + ClusterState clusterStateWithDocrepIndexSettings = ClusterState.builder(ClusterName.DEFAULT) + .metadata(remoteIndexMd) + .nodes(discoveryNodes) + .routingTable(createRoutingTableAllShardsStarted(migratedIndex, 1, 1, remoteNode1, remoteNode2)) + .build(); + Metadata mutatedMetadata = finalizeMigration(clusterStateWithDocrepIndexSettings, logger).metadata(); + assertTrue(mutatedMetadata.index(migratedIndex).getVersion() > docrepIdxMetadata.index(migratedIndex).getVersion()); + assertRemoteSettingsApplied(mutatedMetadata.index(migratedIndex)); + } + + public void testFinalizeMigrationWithAllDocrepNodes() { + String docrepIndex = "docrep-index"; + Settings mockSettings = Settings.builder().put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "strict").build(); + DiscoveryNode docrepNode1 = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNode docrepNode2 = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder() + .add(docrepNode1) + .localNodeId(docrepNode1.getId()) + .add(docrepNode2) + .localNodeId(docrepNode2.getId()) + .build(); + Metadata docrepIdxMetadata = createIndexMetadataWithDocrepSettings(docrepIndex); + assertDocrepSettingsApplied(docrepIdxMetadata.index(docrepIndex)); + Metadata remoteIndexMd = Metadata.builder(docrepIdxMetadata).persistentSettings(mockSettings).build(); + ClusterState clusterStateWithDocrepIndexSettings = ClusterState.builder(ClusterName.DEFAULT) + .metadata(remoteIndexMd) + .nodes(discoveryNodes) + .routingTable(createRoutingTableAllShardsStarted(docrepIndex, 1, 1, docrepNode1, docrepNode2)) + .build(); + Metadata mutatedMetadata = finalizeMigration(clusterStateWithDocrepIndexSettings, logger).metadata(); + assertEquals(docrepIdxMetadata.index(docrepIndex).getVersion(), mutatedMetadata.index(docrepIndex).getVersion()); + assertDocrepSettingsApplied(mutatedMetadata.index(docrepIndex)); + } + + public void testIsSwitchToStrictCompatibilityMode() { + Settings mockSettings = Settings.builder().put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "strict").build(); + ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(); + request.persistentSettings(mockSettings); + assertTrue(isSwitchToStrictCompatibilityMode(request)); + + mockSettings = Settings.builder().put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed").build(); + request.persistentSettings(mockSettings); + assertFalse(isSwitchToStrictCompatibilityMode(request)); + } + + private void assertRemoteSettingsApplied(IndexMetadata indexMetadata) { + assertTrue(IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexMetadata.getSettings())); + assertTrue(IndexMetadata.INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING.exists(indexMetadata.getSettings())); + assertTrue(IndexMetadata.INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.exists(indexMetadata.getSettings())); + assertEquals(ReplicationType.SEGMENT, IndexMetadata.INDEX_REPLICATION_TYPE_SETTING.get(indexMetadata.getSettings())); + } + + private void assertDocrepSettingsApplied(IndexMetadata indexMetadata) { + assertFalse(IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexMetadata.getSettings())); + assertFalse(IndexMetadata.INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING.exists(indexMetadata.getSettings())); + assertFalse(IndexMetadata.INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.exists(indexMetadata.getSettings())); + assertEquals(ReplicationType.DOCUMENT, IndexMetadata.INDEX_REPLICATION_TYPE_SETTING.get(indexMetadata.getSettings())); + } + + private RoutingTable createRoutingTableAllShardsStarted( + String indexName, + int numberOfShards, + int numberOfReplicas, + DiscoveryNode primaryHostingNode, + DiscoveryNode replicaHostingNode + ) { + RoutingTable.Builder builder = RoutingTable.builder(); + Index index = new Index(indexName, UUID.randomUUID().toString()); + + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); + for (int i = 0; i < numberOfShards; i++) { + ShardId shardId = new ShardId(index, i); + IndexShardRoutingTable.Builder indexShardRoutingTable = new IndexShardRoutingTable.Builder(shardId); + indexShardRoutingTable.addShard( + TestShardRouting.newShardRouting(shardId, primaryHostingNode.getId(), true, ShardRoutingState.STARTED) + ); + for (int j = 0; j < numberOfReplicas; j++) { + indexShardRoutingTable.addShard( + TestShardRouting.newShardRouting(shardId, replicaHostingNode.getId(), false, ShardRoutingState.STARTED) + ); + } + indexRoutingTableBuilder.addIndexShard(indexShardRoutingTable.build()); + } + return builder.add(indexRoutingTableBuilder.build()).build(); + } + + private Map getRemoteStoreNodeAttributes() { + Map remoteStoreNodeAttributes = new HashMap<>(); + remoteStoreNodeAttributes.put(REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, "my-segment-repo-1"); + remoteStoreNodeAttributes.put(REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, "my-translog-repo-1"); + return remoteStoreNodeAttributes; + } } diff --git a/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java b/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java index 684297c11c140..e61fbb6e1dbff 100644 --- a/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java @@ -1605,6 +1605,13 @@ public void testResolveRequiredOrDefaultPipelineDefaultPipeline() { assertThat(result, is(true)); assertThat(indexRequest.isPipelineResolved(), is(true)); assertThat(indexRequest.getPipeline(), equalTo("default-pipeline")); + + // index name matches with ITMD for bulk upsert + UpdateRequest updateRequest = new UpdateRequest("idx", "id1").upsert(emptyMap()).script(mockScript("1")); + result = IngestService.resolvePipelines(updateRequest, TransportBulkAction.getIndexWriteRequest(updateRequest), metadata); + assertThat(result, is(true)); + assertThat(updateRequest.upsertRequest().isPipelineResolved(), is(true)); + assertThat(updateRequest.upsertRequest().getPipeline(), equalTo("default-pipeline")); } public void testResolveFinalPipeline() { @@ -1642,6 +1649,13 @@ public void testResolveFinalPipeline() { assertThat(indexRequest.isPipelineResolved(), is(true)); assertThat(indexRequest.getPipeline(), equalTo("_none")); assertThat(indexRequest.getFinalPipeline(), equalTo("final-pipeline")); + + // index name matches with ITMD for bulk upsert: + UpdateRequest updateRequest = new UpdateRequest("idx", "id1").upsert(emptyMap()).script(mockScript("1")); + result = IngestService.resolvePipelines(updateRequest, TransportBulkAction.getIndexWriteRequest(updateRequest), metadata); + assertThat(result, is(true)); + assertThat(updateRequest.upsertRequest().isPipelineResolved(), is(true)); + assertThat(updateRequest.upsertRequest().getFinalPipeline(), equalTo("final-pipeline")); } public void testResolveRequestOrDefaultPipelineAndFinalPipeline() { diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java index 2445cad01574c..bd47507da4863 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -255,6 +255,28 @@ public void testBadChunksize() throws Exception { ); } + public void testPrefixModeVerification() throws Exception { + final Client client = client(); + final Path location = OpenSearchIntegTestCase.randomRepoPath(node().settings()); + final String repositoryName = "test-repo"; + AcknowledgedResponse putRepositoryResponse = client.admin() + .cluster() + .preparePutRepository(repositoryName) + .setType(REPO_TYPE) + .setSettings( + Settings.builder() + .put(node().settings()) + .put("location", location) + .put(BlobStoreRepository.PREFIX_MODE_VERIFICATION_SETTING.getKey(), true) + ) + .get(); + assertTrue(putRepositoryResponse.isAcknowledged()); + + final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); + final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repositoryName); + assertTrue(repository.getPrefixModeVerification()); + } + public void testFsRepositoryCompressDeprecatedIgnored() { final Path location = OpenSearchIntegTestCase.randomRepoPath(node().settings()); final Settings settings = Settings.builder().put(node().settings()).put("location", location).build(); diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 7a50502e418e2..9853cef482254 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -152,6 +152,7 @@ import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.fs.ReloadableFsRepository; import org.opensearch.script.MockScriptService; import org.opensearch.search.MockSearchService; @@ -386,6 +387,8 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase { protected static final String REMOTE_BACKED_STORAGE_REPOSITORY_NAME = "test-remote-store-repo"; + private static Boolean prefixModeVerificationEnable; + private Path remoteStoreRepositoryPath; private ReplicationType randomReplicationType; @@ -394,6 +397,7 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase { @BeforeClass public static void beforeClass() throws Exception { + prefixModeVerificationEnable = randomBoolean(); testClusterRule.beforeClass(); } @@ -2645,16 +2649,21 @@ private static Settings buildRemoteStoreNodeAttributes( segmentRepoName ); + String prefixModeVerificationSuffix = BlobStoreRepository.PREFIX_MODE_VERIFICATION_SETTING.getKey(); + Settings.Builder settings = Settings.builder() .put("node.attr." + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName) .put(segmentRepoTypeAttributeKey, segmentRepoType) .put(segmentRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath) + .put(segmentRepoSettingsAttributeKeyPrefix + prefixModeVerificationSuffix, prefixModeVerificationEnable) .put("node.attr." + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, translogRepoName) .put(translogRepoTypeAttributeKey, translogRepoType) .put(translogRepoSettingsAttributeKeyPrefix + "location", translogRepoPath) + .put(translogRepoSettingsAttributeKeyPrefix + prefixModeVerificationSuffix, prefixModeVerificationEnable) .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName) .put(stateRepoTypeAttributeKey, segmentRepoType) - .put(stateRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath); + .put(stateRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath) + .put(stateRepoSettingsAttributeKeyPrefix + prefixModeVerificationSuffix, prefixModeVerificationEnable); if (withRateLimiterAttributes) { settings.put(segmentRepoSettingsAttributeKeyPrefix + "compress", randomBoolean())