diff --git a/.ci/bwcVersions b/.ci/bwcVersions index e82101896818e..b6acb886dc327 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -53,3 +53,4 @@ BWC_VERSION: - "2.3.0" - "2.3.1" - "2.4.0" + - "2.5.0" diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 4537cadf71074..f54aa394cf83f 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,3 +1,10 @@ + + ### Description [Describe what this change achieves] diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index e47d8d88c0243..c1c2505a62245 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -22,7 +22,8 @@ jobs: installation_id: 22958780 - name: Backport - uses: VachaShah/backport@v1.1.4 + uses: VachaShah/backport@v2.1.0 with: github_token: ${{ steps.github_app_token.outputs.token }} - branch_name: backport/backport-${{ github.event.number }} + head_template: backport/backport-<%= number %>-to-<%= base %> + files_to_skip: 'CHANGELOG.md' diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 892d04936b743..0c2e62a7dd1ab 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -7,10 +7,6 @@ on: - 'dependabot/**' pull_request_target: types: [opened, synchronize, reopened] - workflow_run: - workflows: ["Gradle Precommit"] - types: - - completed jobs: gradle-check: @@ -91,3 +87,5 @@ jobs: * **RESULT:** ${{ env.result }} :x: * **URL:** ${{ env.workflow_url }} * **CommitID:** ${{ env.pr_from_sha }} + Please examine the workflow log, locate, and copy-paste the failure below, then iterate to green. + Is the failure [a flaky test](https://github.com/opensearch-project/OpenSearch/blob/main/DEVELOPER_GUIDE.md#flaky-tests) unrelated to your change? diff --git a/.gitignore b/.gitignore index 8ea328ce2f1e9..a0dabfb8798f9 100644 --- a/.gitignore +++ b/.gitignore @@ -11,6 +11,7 @@ out/ !.idea/inspectionProfiles/Project_Default.xml !.idea/runConfigurations/Debug_OpenSearch.xml !.idea/vcs.xml +!.idea/icon.svg # These files are generated in the main tree by IntelliJ benchmarks/src/main/generated/* diff --git a/.idea/icon.svg b/.idea/icon.svg new file mode 100644 index 0000000000000..4f8132d18a2e0 --- /dev/null +++ b/.idea/icon.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/CHANGELOG.md b/CHANGELOG.md index 0fa7f36226e23..bff334ad50edd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,9 +3,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ## [Unreleased] - ### Added - - Add support for s390x architecture ([#4001](https://github.com/opensearch-project/OpenSearch/pull/4001)) - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) - Point in time rest layer changes for create and delete PIT API ([#4064](https://github.com/opensearch-project/OpenSearch/pull/4064)) @@ -29,15 +27,40 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added release notes for 1.3.6 ([#4681](https://github.com/opensearch-project/OpenSearch/pull/4681)) - Added precommit support for MacOS ([#4682](https://github.com/opensearch-project/OpenSearch/pull/4682)) - Recommission API changes for service layer ([#4320](https://github.com/opensearch-project/OpenSearch/pull/4320)) +- Update GeoGrid base class access modifier to support extensibility ([#4572](https://github.com/opensearch-project/OpenSearch/pull/4572)) +- Add a new node role 'search' which is dedicated to provide search capability ([#4689](https://github.com/opensearch-project/OpenSearch/pull/4689)) +- Introduce experimental searchable snapshot API ([#4680](https://github.com/opensearch-project/OpenSearch/pull/4680)) +- Recommissioning of zone. REST layer support. ([#4624](https://github.com/opensearch-project/OpenSearch/pull/4604)) +- Make searchable snapshot indexes read-only but allow deletion ([#4764](https://github.com/opensearch-project/OpenSearch/pull/4764)) +- Added in-flight cancellation of SearchShardTask based on resource consumption ([#4565](https://github.com/opensearch-project/OpenSearch/pull/4565)) +- Apply reproducible builds configuration for OpenSearch plugins through gradle plugin ([#4746](https://github.com/opensearch-project/OpenSearch/pull/4746)) +- Add icon for IntelliJidea toolbox ([#4882](https://github.com/opensearch-project/OpenSearch/pull/4882)) +- Add groupId value propagation tests for ZIP publication task ([#4772](https://github.com/opensearch-project/OpenSearch/pull/4772)) +- Add support for GeoJson Point type in GeoPoint field ([#4597](https://github.com/opensearch-project/OpenSearch/pull/4597)) +- Add dev guide for dealing with flaky tests ([4868](https://github.com/opensearch-project/OpenSearch/pull/4868)) +- Update pull request template ([#4851](https://github.com/opensearch-project/OpenSearch/pull/4851)) +- Added missing no-jdk distributions ([#4722](https://github.com/opensearch-project/OpenSearch/pull/4722)) +- Add dev help in gradle check CI failures ([4872](https://github.com/opensearch-project/OpenSearch/pull/4872)) +- Copy `build.sh` over from opensearch-build ([#4887](https://github.com/opensearch-project/OpenSearch/pull/4887)) +- Add project health badges to the README.md ([#4843](https://github.com/opensearch-project/OpenSearch/pull/4843)) +- Added changes for graceful node decommission ([#4586](https://github.com/opensearch-project/OpenSearch/pull/4586)) +- Build no-jdk distributions as part of release build ([#4902](https://github.com/opensearch-project/OpenSearch/pull/4902)) +- Added resource usage trackers for in-flight cancellation of SearchShardTask ([#4805](https://github.com/opensearch-project/OpenSearch/pull/4805)) +- Renamed flaky tests ([#4912](https://github.com/opensearch-project/OpenSearch/pull/4912)) +- Update previous release bwc version to 2.5.0 ([#5003](https://github.com/opensearch-project/OpenSearch/pull/5003)) +- Use getParameterCount instead of getParameterTypes ([#4821](https://github.com/opensearch-project/OpenSearch/pull/4821)) +- Remote shard balancer support for searchable snapshots ([#4870](https://github.com/opensearch-project/OpenSearch/pull/4870)) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 - Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 - Bumps `jettison` from 1.5.0 to 1.5.1 - Bumps `azure-storage-common` from 12.18.0 to 12.18.1 - Bumps `forbiddenapis` from 3.3 to 3.4 - - -### Dependencies +- Bumps `gson` from 2.9.0 to 2.9.1 +- Bumps `protobuf-java` from 3.21.2 to 3.21.7k +- Bumps `azure-core` from 1.31.0 to 1.33.0 +- Bumps `avro` from 1.11.0 to 1.11.1 +- Bumps `woodstox-core` from 6.3.0 to 6.3.1 - Bumps `xmlbeans` from 5.1.0 to 5.1.1 ([#4354](https://github.com/opensearch-project/OpenSearch/pull/4354)) - Bumps `azure-core-http-netty` from 1.12.0 to 1.12.4 ([#4160](https://github.com/opensearch-project/OpenSearch/pull/4160)) - Bumps `azure-core` from 1.27.0 to 1.31.0 ([#4160](https://github.com/opensearch-project/OpenSearch/pull/4160)) @@ -49,10 +72,21 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Bumps `reactor-core` from 3.4.18 to 3.4.23 ([#4548](https://github.com/opensearch-project/OpenSearch/pull/4548)) - Bumps `jempbox` from 1.8.16 to 1.8.17 ([#4550](https://github.com/opensearch-project/OpenSearch/pull/4550)) - Bumps `hadoop-hdfs` from 3.3.3 to 3.3.4 ([#4644](https://github.com/opensearch-project/OpenSearch/pull/4644)) -- Bumps `jna` from 5.11.0 to 5.12.1 ([#4656](https://github.com/opensearch-project/OpenSearch/pull/4656)) +- Update Jackson Databind to 2.13.4.2 (addressing CVE-2022-42003) ([#4779](https://github.com/opensearch-project/OpenSearch/pull/4779)) +- Bumps `tika` from 2.4.0 to 2.5.0 ([#4791](https://github.com/opensearch-project/OpenSearch/pull/4791)) +- Exclude jettison version brought in with hadoop-minicluster. ([#4787](https://github.com/opensearch-project/OpenSearch/pull/4787)) +- Bump protobuf-java to 3.21.7 in repository-gcs and repository-hdfs ([#4790](https://github.com/opensearch-project/OpenSearch/pull/4790)) +- Bump reactor-netty-http to 1.0.24 in repository-azure ([#4880](https://github.com/opensearch-project/OpenSearch/pull/4880)) +- Bumps `protobuf-java` from 3.21.7 to 3.21.8 ([#4886](https://github.com/opensearch-project/OpenSearch/pull/4886)) +- Upgrade netty to 4.1.84.Final ([#4893](https://github.com/opensearch-project/OpenSearch/pull/4893)) +- Dependency updates: asm 9.3 -> 9.4, bytebuddy 1.12.12 -> 1.12.18 ([#4889](https://github.com/opensearch-project/OpenSearch/pull/4889)) +- Update Apache Lucene to 9.4.1 ([#4922](https://github.com/opensearch-project/OpenSearch/pull/4922)) +- Bump `woodstox-core` to 6.4.0 ([#4947](https://github.com/opensearch-project/OpenSearch/pull/4947)) +- Update Apache Lucene to 9.5.0-snapshot-a4ef70f ([#4979](https://github.com/opensearch-project/OpenSearch/pull/4979)) +- Upgrade jetty-http, kotlin-stdlib and snakeyaml ([#4963](https://github.com/opensearch-project/OpenSearch/pull/4963)) +- OpenJDK Update (October 2022 Patch releases) ([#4997](https://github.com/opensearch-project/OpenSearch/pull/4997)) ### Changed - - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) - Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240)) - Plugin ZIP publication groupId value is configurable ([#4156](https://github.com/opensearch-project/OpenSearch/pull/4156)) @@ -69,20 +103,35 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Further simplification of the ZIP publication implementation ([#4360](https://github.com/opensearch-project/OpenSearch/pull/4360)) - Relax visibility of the HTTP_CHANNEL_KEY and HTTP_SERVER_CHANNEL_KEY to make it possible for the plugins to access associated Netty4HttpChannel / Netty4HttpServerChannel instance ([#4638](https://github.com/opensearch-project/OpenSearch/pull/4638)) - Load the deprecated master role in a dedicated method instead of in setAdditionalRoles() ([#4582](https://github.com/opensearch-project/OpenSearch/pull/4582)) -- Include Windows OS in Bootstrap initializeNatives() check for definitelyRunningAsRoot() ([#4656](https://github.com/opensearch-project/OpenSearch/pull/4656)) - Add APIs (GET/PUT) to decommission awareness attribute ([#4261](https://github.com/opensearch-project/OpenSearch/pull/4261)) - Improve Gradle pre-commit checks to pre-empt Jenkins build ([#4660](https://github.com/opensearch-project/OpenSearch/pull/4660)) - Update to Apache Lucene 9.4.0 ([#4661](https://github.com/opensearch-project/OpenSearch/pull/4661)) +- Controlling discovery for decommissioned nodes ([#4590](https://github.com/opensearch-project/OpenSearch/pull/4590)) - Backport Apache Lucene version change for 2.4.0 ([#4677](https://github.com/opensearch-project/OpenSearch/pull/4677)) - +- Use ReplicationFailedException instead of OpensearchException in ReplicationTarget ([#4725](https://github.com/opensearch-project/OpenSearch/pull/4725)) +- Fix weighted routing metadata deserialization error on process restart ([#4691](https://github.com/opensearch-project/OpenSearch/pull/4691)) +- Refactor Base Action class javadocs to OpenSearch.API ([#4732](https://github.com/opensearch-project/OpenSearch/pull/4732)) +- Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) +- Refactored BalancedAllocator.Balancer to LocalShardsBalancer ([#4761](https://github.com/opensearch-project/OpenSearch/pull/4761)) +- Fail weight update when decommission ongoing and fail decommission when attribute not weighed away ([#4839](https://github.com/opensearch-project/OpenSearch/pull/4839)) +- Skip SymbolicLinkPreservingTarIT when running on Windows ([#5023](https://github.com/opensearch-project/OpenSearch/pull/5023)) ### Deprecated - ### Removed - Remove deprecated code to add node name into log pattern of log4j property file ([#4568](https://github.com/opensearch-project/OpenSearch/pull/4568)) - Unused object and import within TransportClusterAllocationExplainAction ([#4639](https://github.com/opensearch-project/OpenSearch/pull/4639)) +- Remove LegacyESVersion.V_7_0_* and V_7_1_* Constants ([#2768](https://https://github.com/opensearch-project/OpenSearch/pull/2768)) +- Remove LegacyESVersion.V_7_2_ and V_7_3_ Constants ([#4702](https://github.com/opensearch-project/OpenSearch/pull/4702)) +- Always auto release the flood stage block ([#4703](https://github.com/opensearch-project/OpenSearch/pull/4703)) +- Remove LegacyESVersion.V_7_4_ and V_7_5_ Constants ([#4704](https://github.com/opensearch-project/OpenSearch/pull/4704)) +- Remove Legacy Version support from Snapshot/Restore Service ([#4728](https://github.com/opensearch-project/OpenSearch/pull/4728)) +- Remove deprecated serialization logic from pipeline aggs ([#4847](https://github.com/opensearch-project/OpenSearch/pull/4847)) +- Remove unused private methods ([#4926](https://github.com/opensearch-project/OpenSearch/pull/4926)) +- Revert PR 4656 to unblock Windows CI ([#4949](https://github.com/opensearch-project/OpenSearch/pull/4949)) +- Remove LegacyESVersion.V_7_8_ and V_7_9_ Constants ([#4855](https://github.com/opensearch-project/OpenSearch/pull/4855)) +- Remove LegacyESVersion.V_7_6_ and V_7_7_ Constants ([#4837](https://github.com/opensearch-project/OpenSearch/pull/4837)) +- Remove LegacyESVersion.V_7_10_ Constants ([#5018](https://github.com/opensearch-project/OpenSearch/pull/5018)) ### Fixed - - `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289)) - PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) - `opensearch.bat` and `opensearch-service.bat install` failing to run, missing logs directory ([#4305](https://github.com/opensearch-project/OpenSearch/pull/4305)) @@ -122,15 +171,31 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix flaky DecommissionControllerTests.testTimesOut ([4683](https://github.com/opensearch-project/OpenSearch/pull/4683)) - Fix new race condition in DecommissionControllerTests ([4688](https://github.com/opensearch-project/OpenSearch/pull/4688)) - Fix SearchStats (de)serialization (caused by https://github.com/opensearch-project/OpenSearch/pull/4616) ([#4697](https://github.com/opensearch-project/OpenSearch/pull/4697)) +- Fixing Gradle warnings associated with publishPluginZipPublicationToXxx tasks ([#4696](https://github.com/opensearch-project/OpenSearch/pull/4696)) +- [BUG]: Remove redundant field from GetDecommissionStateResponse ([#4751](https://github.com/opensearch-project/OpenSearch/pull/4751)) +- Fixed randomly failing test ([4774](https://github.com/opensearch-project/OpenSearch/pull/4774)) +- Update version check after backport ([4786](https://github.com/opensearch-project/OpenSearch/pull/4786)) +- Fix decommission status update to non leader nodes ([4800](https://github.com/opensearch-project/OpenSearch/pull/4800)) +- Fix recovery path for searchable snapshots ([4813](https://github.com/opensearch-project/OpenSearch/pull/4813)) +- Fix bug in AwarenessAttributeDecommissionIT([4822](https://github.com/opensearch-project/OpenSearch/pull/4822)) +- Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) +- [BUG]: flaky test index/80_geo_point/Single point test([#4860](https://github.com/opensearch-project/OpenSearch/pull/4860)) +- Fix bug in SlicedInputStream with zero length ([#4863](https://github.com/opensearch-project/OpenSearch/pull/4863)) +- Fix a bug on handling an invalid array value for point type field ([#4900](https://github.com/opensearch-project/OpenSearch/pull/4900)) +- [Segment Replication] Fix bug of replica shard's translog not purging on index flush when segment replication is enabled ([4928](https://github.com/opensearch-project/OpenSearch/pull/4928)) +- Fix a bug on handling an invalid array value for point type field #4900([#4900](https://github.com/opensearch-project/OpenSearch/pull/4900)) +- [BUG]: Allow decommission to support delay timeout ([#4930](https://github.com/opensearch-project/OpenSearch/pull/4930)) +- Fix failing test: VerifyVersionConstantsIT ([#4946](https://github.com/opensearch-project/OpenSearch/pull/4946)) +- Fixed compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) +- Fix for failing checkExtraction, checkLicense and checkNotice tasks for windows gradle check ([#4941](https://github.com/opensearch-project/OpenSearch/pull/4941)) +- Backport failures for merge conflicts on CHANGELOG.md file ([#4977](https://github.com/opensearch-project/OpenSearch/pull/4977)) +- Remove gradle-check dependency on precommit [#5027](https://github.com/opensearch-project/OpenSearch/pull/5027) ### Security - - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) ## [2.x] - ### Added - - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) - Label configuration for dependabot PRs ([#4348](https://github.com/opensearch-project/OpenSearch/pull/4348)) - Added RestLayer Changes for PIT stats ([#4217](https://github.com/opensearch-project/OpenSearch/pull/4217)) @@ -138,19 +203,17 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Addition of Doc values on the GeoShape Field - Addition of GeoShape ValueSource level code interfaces for accessing the DocValues. - Addition of Missing Value feature in the GeoShape Aggregations. - +- Install and configure Log4j JUL Adapter for Lucene 9.4 ([#4754](https://github.com/opensearch-project/OpenSearch/pull/4754)) +- Added feature to ignore indexes starting with dot during shard limit validation.([#4695](https://github.com/opensearch-project/OpenSearch/pull/4695)) ### Changed - ### Deprecated - ### Removed - ### Fixed - - PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) - Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) +- Better plural stemmer than minimal_english ([#4738](https://github.com/opensearch-project/OpenSearch/pull/4738)) +- Disable merge on refresh in DiskThresholdDeciderIT ([#4828](https://github.com/opensearch-project/OpenSearch/pull/4828)) ### Security - [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD [2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x diff --git a/DEVELOPER_GUIDE.md b/DEVELOPER_GUIDE.md index 8c2a6b4889122..313aecd62f5f9 100644 --- a/DEVELOPER_GUIDE.md +++ b/DEVELOPER_GUIDE.md @@ -38,21 +38,22 @@ - [Gradle Plugins](#gradle-plugins) - [Distribution Download Plugin](#distribution-download-plugin) - [Creating fat-JAR of a Module](#creating-fat-jar-of-a-module) - - [Misc](#misc) - - [git-secrets](#git-secrets) - - [Installation](#installation) - - [Configuration](#configuration) - [Components](#components) - [Build libraries & interfaces](#build-libraries--interfaces) - [Clients & Libraries](#clients--libraries) - [Plugins](#plugins-1) - - [Indexing & search](#indexing--search) + - [Indexing & Search](#indexing--search) - [Aggregations](#aggregations) - [Distributed Framework](#distributed-framework) - - [Submitting Changes](#submitting-changes) - - [Backports](#backports) - - [LineLint](#linelint) -- [Lucene Snapshots](#lucene-snapshots) + - [Misc](#misc) + - [Git Secrets](#git-secrets) + - [Installation](#installation) + - [Configuration](#configuration) + - [Submitting Changes](#submitting-changes) + - [Backports](#backports) + - [LineLint](#linelint) + - [Lucene Snapshots](#lucene-snapshots) + - [Flaky Tests](#flaky-tests) # Developer Guide @@ -414,37 +415,12 @@ Refer the installed JAR as any other maven artifact, e.g. ``` -## Misc - -### git-secrets - -Security is our top priority. Avoid checking in credentials. - -#### Installation -Install [awslabs/git-secrets](https://github.com/awslabs/git-secrets) by running the following commands. -``` -git clone https://github.com/awslabs/git-secrets.git -cd git-secrets -make install -``` - -#### Configuration -You can configure git secrets per repository, you need to change the directory to the root of the repository and run the following command. -``` -git secrets --install -✓ Installed commit-msg hook to .git/hooks/commit-msg -✓ Installed pre-commit hook to .git/hooks/pre-commit -✓ Installed prepare-commit-msg hook to .git/hooks/prepare-commit-msg -``` -Then, you need to apply patterns for git-secrets, you can install the AWS standard patterns by running the following command. -``` -git secrets --register-aws -``` - ## Components + As you work in the OpenSearch repo you may notice issues getting labeled with component labels. It's a housekeeping task to help group together similar pieces of work. You can pretty much ignore it, but if you're curious, here's what the different labels mean: ### Build libraries & interfaces + Tasks to make sure the build tasks are useful and packaging and distribution are easy. Includes: @@ -458,6 +434,7 @@ Includes: ### Clients & Libraries + APIs and communication mechanisms for external connections to OpenSearch. This includes the “library” directory in OpenSearch (a set of common functions). Includes: @@ -467,6 +444,7 @@ Includes: - CLI ### Plugins + Anything touching the plugin infrastructure within core OpenSearch. Includes: @@ -476,7 +454,8 @@ Includes: - Plugin interfaces -### Indexing & search +### Indexing & Search + The critical path of indexing and search, including: Measure index and search, performance, Improving the performance of indexing and search, ensure synchronization OpenSearch APIs with upstream Lucene change (e.g. new field types, changing doc values and codex). Includes: @@ -487,6 +466,7 @@ Includes: - DocValues ### Aggregations + Making sure OpenSearch can be used as a compute engine. Includes: @@ -495,6 +475,7 @@ Includes: - Framework ### Distributed Framework + Work to make sure that OpenSearch can scale in a distributed manner. Includes: @@ -506,15 +487,43 @@ Includes: - Shard Strategies - Circuit Breakers -## Submitting Changes +## Misc + +### Git Secrets + +Security is our top priority. Avoid checking in credentials. + +#### Installation +Install [awslabs/git-secrets](https://github.com/awslabs/git-secrets) by running the following commands. +``` +git clone https://github.com/awslabs/git-secrets.git +cd git-secrets +make install +``` + +#### Configuration +You can configure git secrets per repository, you need to change the directory to the root of the repository and run the following command. +``` +git secrets --install +✓ Installed commit-msg hook to .git/hooks/commit-msg +✓ Installed pre-commit hook to .git/hooks/pre-commit +✓ Installed prepare-commit-msg hook to .git/hooks/prepare-commit-msg +``` +Then, you need to apply patterns for git-secrets, you can install the AWS standard patterns by running the following command. +``` +git secrets --register-aws +``` + +### Submitting Changes See [CONTRIBUTING](CONTRIBUTING.md). -## Backports +### Backports The Github workflow in [`backport.yml`](.github/workflows/backport.yml) creates backport PRs automatically when the original PR with an appropriate label `backport ` is merged to main with the backport workflow run successfully on the PR. For example, if a PR on main needs to be backported to `1.x` branch, add a label `backport 1.x` to the PR and make sure the backport workflow runs on the PR along with other checks. Once this PR is merged to main, the workflow will create a backport PR to the `1.x` branch. -## LineLint +### LineLint + A linter in [`code-hygiene.yml`](.github/workflows/code-hygiene.yml) that validates simple newline and whitespace rules in all sorts of files. It can: - Recursively check a directory tree for files that do not end in a newline - Automatically fix these files by adding a newline or trimming extra newlines. @@ -529,7 +538,20 @@ Pass a list of files or directories to limit your search. linelint README.md LICENSE -# Lucene Snapshots +### Lucene Snapshots + The Github workflow in [lucene-snapshots.yml](.github/workflows/lucene-snapshots.yml) is a Github worfklow executable by maintainers to build a top-down snapshot build of lucene. -These snapshots are available to test compatibility with upcoming changes to Lucene by updating the version at [version.properties](buildsrc/version.properties) with the `version-snapshot-sha` version. -Example: `lucene = 10.0.0-snapshot-2e941fc`. +These snapshots are available to test compatibility with upcoming changes to Lucene by updating the version at [version.properties](buildsrc/version.properties) with the `version-snapshot-sha` version. Example: `lucene = 10.0.0-snapshot-2e941fc`. + +### Flaky Tests + +OpenSearch has a very large test suite with long running, often failing (flaky), integration tests. Such individual tests are labelled as [Flaky Random Test Failure](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aopen+is%3Aissue+label%3A%22flaky-test%22). Your help is wanted fixing these! + +If you encounter a build/test failure in CI that is unrelated to the change in your pull request, it may be a known flaky test, or a new test failure. + +1. Follow failed CI links, and locate the failing test(s). +2. Copy-paste the failure into a comment of your PR. +3. Search through [issues](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aopen+is%3Aissue+label%3A%22flaky-test%22) using the name of the failed test for whether this is a known flaky test. +5. If an existing issue is found, paste a link to the known issue in a comment to your PR. +6. If no existing issue is found, open one. +7. Retry CI via the GitHub UX or by pushing an update to your PR. diff --git a/README.md b/README.md index a7abedeefde8e..45d0a624ae0aa 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,13 @@ [![Chat](https://img.shields.io/badge/chat-on%20forums-blue)](https://forum.opensearch.org/c/opensearch/) [![Documentation](https://img.shields.io/badge/documentation-reference-blue)](https://opensearch.org/docs/latest/opensearch/index/) -[![codecov](https://codecov.io/gh/opensearch-project/OpenSearch/branch/main/graph/badge.svg)](https://codecov.io/gh/opensearch-project/OpenSearch) +[![Code Coverage](https://codecov.io/gh/opensearch-project/OpenSearch/branch/main/graph/badge.svg)](https://codecov.io/gh/opensearch-project/OpenSearch) +[![Untriaged Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/untriaged?labelColor=red)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"untriaged") +[![Security Vulnerabilities](https://img.shields.io/github/issues/opensearch-project/OpenSearch/security%20vulnerability?labelColor=red)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"security%20vulnerability") +[![Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch)](https://github.com/opensearch-project/OpenSearch/issues) +[![Open Pull Requests](https://img.shields.io/github/issues-pr/opensearch-project/OpenSearch)](https://github.com/opensearch-project/OpenSearch/pulls) +[![2.4 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v2.4.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v2.4.0") +[![3.0 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v3.0.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v3.0.0") [![GHA gradle check](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml) [![GHA validate pull request](https://github.com/opensearch-project/OpenSearch/actions/workflows/wrapper.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/wrapper.yml) [![GHA precommit](https://github.com/opensearch-project/OpenSearch/actions/workflows/precommit.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/precommit.yml) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 0b23631816fe9..498edaf057a34 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -110,12 +110,13 @@ dependencies { api 'com.netflix.nebula:gradle-info-plugin:11.3.3' api 'org.apache.rat:apache-rat:0.13' api 'commons-io:commons-io:2.7' - api "net.java.dev.jna:jna:5.12.1" + api "net.java.dev.jna:jna:5.11.0" api 'gradle.plugin.com.github.johnrengelman:shadow:7.1.2' api 'org.jdom:jdom2:2.0.6.1' - api 'org.jetbrains.kotlin:kotlin-stdlib-jdk8:1.7.10' + api "org.jetbrains.kotlin:kotlin-stdlib-jdk8:${props.getProperty('kotlin')}" api 'de.thetaphi:forbiddenapis:3.3' api 'com.avast.gradle:gradle-docker-compose-plugin:0.15.2' + api "org.yaml:snakeyaml:${props.getProperty('snakeyaml')}" api 'org.apache.maven:maven-model:3.6.2' api 'com.networknt:json-schema-validator:1.0.69' api "com.fasterxml.jackson.core:jackson-databind:${props.getProperty('jackson_databind')}" diff --git a/buildSrc/src/integTest/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTarIT.java b/buildSrc/src/integTest/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTarIT.java index b70574c507f70..61aa55b9c6b53 100644 --- a/buildSrc/src/integTest/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTarIT.java +++ b/buildSrc/src/integTest/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTarIT.java @@ -35,6 +35,7 @@ import org.apache.commons.compress.archivers.tar.TarArchiveInputStream; import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream; import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream; +import org.apache.tools.ant.taskdefs.condition.Os; import org.opensearch.gradle.test.GradleIntegrationTestCase; import org.gradle.api.GradleException; import org.gradle.testkit.runner.GradleRunner; @@ -52,6 +53,7 @@ import static org.hamcrest.CoreMatchers.anyOf; import static org.hamcrest.CoreMatchers.equalTo; +import static org.junit.Assume.assumeFalse; public class SymbolicLinkPreservingTarIT extends GradleIntegrationTestCase { @@ -60,6 +62,7 @@ public class SymbolicLinkPreservingTarIT extends GradleIntegrationTestCase { @Before public void before() throws IOException { + assumeFalse("Skip tar tests on windows.", Os.isFamily(Os.FAMILY_WINDOWS)); final Path realFolder = temporaryFolder.getRoot().toPath().resolve("real-folder"); Files.createDirectory(realFolder); Files.createFile(realFolder.resolve("file")); diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy index 31677965ab0d3..b7c78991a0da3 100644 --- a/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy @@ -29,13 +29,13 @@ package org.opensearch.gradle.plugin import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin +import org.gradle.api.tasks.bundling.AbstractArchiveTask import org.opensearch.gradle.BuildPlugin import org.opensearch.gradle.NoticeTask import org.opensearch.gradle.Version import org.opensearch.gradle.VersionProperties import org.opensearch.gradle.dependencies.CompileOnlyResolvePlugin import org.opensearch.gradle.info.BuildParams -import org.opensearch.gradle.plugin.PluginPropertiesExtension import org.opensearch.gradle.test.RestTestBasePlugin import org.opensearch.gradle.testclusters.RunTask import org.opensearch.gradle.util.Util @@ -134,6 +134,12 @@ class PluginBuildPlugin implements Plugin { } project.configurations.getByName('default') .extendsFrom(project.configurations.getByName('runtimeClasspath')) + project.tasks.withType(AbstractArchiveTask.class).configureEach { task -> + // ignore file timestamps + // be consistent in archive file order + task.preserveFileTimestamps = false + task.reproducibleFileOrder = true + } // allow running ES with this plugin in the foreground of a build project.tasks.register('run', RunTask) { dependsOn(project.tasks.bundlePlugin) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java index 2bdef8e4cd244..be12fdd99c1df 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java @@ -92,7 +92,7 @@ public String call() throws Exception { return String.format( "%s/distributions/%s-%s.pom", project.getBuildDir(), - getArchivesBaseName(project), + pomTask.getName().toLowerCase().contains("zip") ? project.getName() : getArchivesBaseName(project), project.getVersion() ); } @@ -130,7 +130,6 @@ public String call() throws Exception { publication.getPom().withXml(PublishPlugin::addScmInfo); if (!publication.getName().toLowerCase().contains("zip")) { - // have to defer this until archivesBaseName is set project.afterEvaluate(p -> publication.setArtifactId(getArchivesBaseName(project))); @@ -139,6 +138,8 @@ public String call() throws Exception { publication.artifact(project.getTasks().getByName("sourcesJar")); publication.artifact(project.getTasks().getByName("javadocJar")); } + } else { + project.afterEvaluate(p -> publication.setArtifactId(project.getName())); } generatePomTask.configure( diff --git a/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java index 166d8e3269d70..62e743d513193 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java @@ -51,7 +51,6 @@ import java.io.File; import java.io.FileInputStream; import java.io.IOException; -import java.io.UncheckedIOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; @@ -220,14 +219,6 @@ private List getAvailableJavaVersions(JavaVersion minimumCompilerVersi return javaVersions; } - private static boolean isCurrentJavaHome(File javaHome) { - try { - return Files.isSameFile(javaHome.toPath(), Jvm.current().getJavaHome().toPath()); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - private static String getTestSeed() { String testSeedProperty = System.getProperty("tests.seed"); final String testSeed; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveCheckPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveCheckPlugin.java index 2a162e5f12d7b..96a2928b6e71e 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveCheckPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveCheckPlugin.java @@ -73,12 +73,14 @@ public void apply(Project project) { .create("distributionArchiveCheck", DistributionArchiveCheckExtension.class); File archiveExtractionDir = calculateArchiveExtractionDir(project); - // sanity checks if archives can be extracted TaskProvider checkExtraction = registerCheckExtractionTask(project, buildDistTask, archiveExtractionDir); + checkExtraction.configure(InternalDistributionArchiveSetupPlugin.configure(buildTaskName)); TaskProvider checkLicense = registerCheckLicenseTask(project, checkExtraction); + checkLicense.configure(InternalDistributionArchiveSetupPlugin.configure(buildTaskName)); TaskProvider checkNotice = registerCheckNoticeTask(project, checkExtraction); + checkNotice.configure(InternalDistributionArchiveSetupPlugin.configure(buildTaskName)); TaskProvider checkTask = project.getTasks().named("check"); checkTask.configure(task -> { task.dependsOn(checkExtraction); @@ -118,7 +120,7 @@ public void execute(Task task) { } private TaskProvider registerCheckLicenseTask(Project project, TaskProvider checkExtraction) { - TaskProvider checkLicense = project.getTasks().register("checkLicense", task -> { + return project.getTasks().register("checkLicense", task -> { task.dependsOn(checkExtraction); task.doLast(new Action() { @Override @@ -138,7 +140,6 @@ public void execute(Task task) { } }); }); - return checkLicense; } private TaskProvider registerCheckExtractionTask(Project project, TaskProvider buildDistTask, File archiveExtractionDir) { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java index 8adfbff424278..0944f3960467b 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java @@ -87,7 +87,7 @@ public void apply(Project project) { configureTarDefaults(project); } - private Action configure(String name) { + static Action configure(String name) { return (Task task) -> task.onlyIf(s -> { if (OperatingSystem.current().isWindows()) { // On Windows, include only Windows distributions and integTestZip diff --git a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java index 6dc7d660922b2..6b581fcaa7774 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java @@ -13,12 +13,14 @@ import org.gradle.api.publish.maven.MavenPublication; import java.nio.file.Path; +import java.util.Set; +import java.util.stream.Collectors; + import org.gradle.api.Task; import org.gradle.api.publish.maven.plugins.MavenPublishPlugin; public class Publish implements Plugin { - // public final static String PLUGIN_ZIP_PUBLISH_POM_TASK = "generatePomFileForPluginZipPublication"; public final static String PUBLICATION_NAME = "pluginZip"; public final static String STAGING_REPO = "zipStaging"; public final static String LOCAL_STAGING_REPO_PATH = "/build/local-staging-repo"; @@ -67,10 +69,15 @@ public void apply(Project project) { if (validatePluginZipPom != null) { validatePluginZipPom.dependsOn("generatePomFileForNebulaPublication"); } - Task publishPluginZipPublicationToZipStagingRepository = project.getTasks() - .findByName("publishPluginZipPublicationToZipStagingRepository"); - if (publishPluginZipPublicationToZipStagingRepository != null) { - publishPluginZipPublicationToZipStagingRepository.dependsOn("generatePomFileForNebulaPublication"); + + // There are number of tasks prefixed by 'publishPluginZipPublication', f.e.: + // publishPluginZipPublicationToZipStagingRepository, publishPluginZipPublicationToMavenLocal + final Set publishPluginZipPublicationToTasks = project.getTasks() + .stream() + .filter(t -> t.getName().startsWith("publishPluginZipPublicationTo")) + .collect(Collectors.toSet()); + if (!publishPluginZipPublicationToTasks.isEmpty()) { + publishPluginZipPublicationToTasks.forEach(t -> t.dependsOn("generatePomFileForNebulaPublication")); } } else { project.getLogger() diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java index b14e93ecfd22d..e7c907dfdf000 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java @@ -75,9 +75,9 @@ import java.util.stream.Stream; public class DistroTestPlugin implements Plugin { - private static final String SYSTEM_JDK_VERSION = "11.0.16+8"; + private static final String SYSTEM_JDK_VERSION = "11.0.17+8"; private static final String SYSTEM_JDK_VENDOR = "adoptium"; - private static final String GRADLE_JDK_VERSION = "17.0.4+8"; + private static final String GRADLE_JDK_VERSION = "17.0.5+8"; private static final String GRADLE_JDK_VENDOR = "adoptium"; // all distributions used by distro tests. this is temporary until tests are per distribution diff --git a/buildSrc/src/main/resources/forbidden/http-signatures.txt b/buildSrc/src/main/resources/forbidden/http-signatures.txt index dcf20bbb09387..bfd81b3521a40 100644 --- a/buildSrc/src/main/resources/forbidden/http-signatures.txt +++ b/buildSrc/src/main/resources/forbidden/http-signatures.txt @@ -15,31 +15,14 @@ # language governing permissions and limitations under the License. @defaultMessage Explicitly specify the ContentType of HTTP entities when creating -org.apache.http.entity.StringEntity#(java.lang.String) -org.apache.http.entity.StringEntity#(java.lang.String,java.lang.String) -org.apache.http.entity.StringEntity#(java.lang.String,java.nio.charset.Charset) -org.apache.http.entity.ByteArrayEntity#(byte[]) -org.apache.http.entity.ByteArrayEntity#(byte[],int,int) -org.apache.http.entity.FileEntity#(java.io.File) -org.apache.http.entity.InputStreamEntity#(java.io.InputStream) -org.apache.http.entity.InputStreamEntity#(java.io.InputStream,long) -org.apache.http.nio.entity.NByteArrayEntity#(byte[]) -org.apache.http.nio.entity.NByteArrayEntity#(byte[],int,int) -org.apache.http.nio.entity.NFileEntity#(java.io.File) -org.apache.http.nio.entity.NStringEntity#(java.lang.String) -org.apache.http.nio.entity.NStringEntity#(java.lang.String,java.lang.String) +org.apache.hc.core5.http.io.entity.StringEntity#(java.lang.String) +org.apache.hc.core5.http.io.entity.StringEntity#(java.lang.String,java.nio.charset.Charset) @defaultMessage Use non-deprecated constructors -org.apache.http.nio.entity.NFileEntity#(java.io.File,java.lang.String) -org.apache.http.nio.entity.NFileEntity#(java.io.File,java.lang.String,boolean) -org.apache.http.entity.FileEntity#(java.io.File,java.lang.String) -org.apache.http.entity.StringEntity#(java.lang.String,java.lang.String,java.lang.String) +org.apache.hc.core5.http.io.entity.FileEntity#(java.io.File,org.apache.hc.core5.http.ContentType) @defaultMessage BasicEntity is easy to mess up and forget to set content type -org.apache.http.entity.BasicHttpEntity#() - -@defaultMessage EntityTemplate is easy to mess up and forget to set content type -org.apache.http.entity.EntityTemplate#(org.apache.http.entity.ContentProducer) +org.apache.hc.core5.http.io.entity.BasicHttpEntity#(java.io.InputStream,org.apache.hc.core5.http.ContentType) @defaultMessage SerializableEntity uses java serialization and makes it easy to forget to set content type -org.apache.http.entity.SerializableEntity#(java.io.Serializable) +org.apache.hc.core5.http.io.entity.SerializableEntity#(java.io.Serializable,org.apache.hc.core5.http.ContentType) diff --git a/buildSrc/src/test/java/org/opensearch/gradle/plugin/PluginBuildPluginTests.java b/buildSrc/src/test/java/org/opensearch/gradle/plugin/PluginBuildPluginTests.java index 9ed0e3e494992..8772a9fbd65ee 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/plugin/PluginBuildPluginTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/plugin/PluginBuildPluginTests.java @@ -31,6 +31,7 @@ package org.opensearch.gradle.plugin; +import org.gradle.api.tasks.bundling.AbstractArchiveTask; import org.opensearch.gradle.BwcVersions; import org.opensearch.gradle.test.GradleUnitTestCase; import org.gradle.api.Project; @@ -64,6 +65,10 @@ public void testApply() { assertNotNull("plugin extensions has the right type", project.getExtensions().findByType(PluginPropertiesExtension.class)); assertNull("plugin should not create the integTest task", project.getTasks().findByName("integTest")); + project.getTasks().withType(AbstractArchiveTask.class).forEach(t -> { + assertFalse(String.format("task '%s' should not preserve timestamps", t.getName()), t.isPreserveFileTimestamps()); + assertTrue(String.format("task '%s' should have reproducible file order", t.getName()), t.isReproducibleFileOrder()); + }); } @Ignore("https://github.com/elastic/elasticsearch/issues/47123") diff --git a/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java index 2ca0e507acb44..148a836f32b41 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java @@ -271,6 +271,76 @@ public void useDefaultValues() throws IOException, URISyntaxException, XmlPullPa assertEquals(model.getUrl(), "https://github.com/doe/sample-plugin"); } + /** + * If the `group` is defined in gradle's allprojects section then it does not have to defined in publications. + */ + @Test + public void allProjectsGroup() throws IOException, URISyntaxException, XmlPullParserException { + GradleRunner runner = prepareGradleRunnerFromTemplate("allProjectsGroup.gradle", "build", ZIP_PUBLISH_TASK); + BuildResult result = runner.build(); + + /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */ + assertEquals(SUCCESS, result.task(":" + "build").getOutcome()); + assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome()); + + // Parse the maven file and validate default values + MavenXpp3Reader reader = new MavenXpp3Reader(); + Model model = reader.read( + new FileReader( + new File( + projectDir.getRoot(), + String.join( + File.separator, + "build", + "local-staging-repo", + "org", + "opensearch", + PROJECT_NAME, + "2.0.0.0", + PROJECT_NAME + "-2.0.0.0.pom" + ) + ) + ) + ); + assertEquals(model.getVersion(), "2.0.0.0"); + assertEquals(model.getGroupId(), "org.opensearch"); + } + + /** + * The groupId value can be defined on several levels. This tests that the most internal level outweighs other levels. + */ + @Test + public void groupPriorityLevel() throws IOException, URISyntaxException, XmlPullParserException { + GradleRunner runner = prepareGradleRunnerFromTemplate("groupPriorityLevel.gradle", "build", ZIP_PUBLISH_TASK); + BuildResult result = runner.build(); + + /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */ + assertEquals(SUCCESS, result.task(":" + "build").getOutcome()); + assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome()); + + // Parse the maven file and validate default values + MavenXpp3Reader reader = new MavenXpp3Reader(); + Model model = reader.read( + new FileReader( + new File( + projectDir.getRoot(), + String.join( + File.separator, + "build", + "local-staging-repo", + "level", + "3", + PROJECT_NAME, + "2.0.0.0", + PROJECT_NAME + "-2.0.0.0.pom" + ) + ) + ) + ); + assertEquals(model.getVersion(), "2.0.0.0"); + assertEquals(model.getGroupId(), "level.3"); + } + /** * In this case the Publication entity is completely missing but still the POM file is generated using the default * values including the groupId and version values obtained from the Gradle project object. diff --git a/buildSrc/src/test/java/org/opensearch/gradle/precommit/ForbiddenPatternsTaskTests.java b/buildSrc/src/test/java/org/opensearch/gradle/precommit/ForbiddenPatternsTaskTests.java index ea4db8954bca4..6ce2e70f68381 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/precommit/ForbiddenPatternsTaskTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/precommit/ForbiddenPatternsTaskTests.java @@ -104,10 +104,6 @@ private ForbiddenPatternsTask createTask(Project project) { return project.getTasks().create("forbiddenPatterns", ForbiddenPatternsTask.class); } - private ForbiddenPatternsTask createTask(Project project, String taskName) { - return project.getTasks().create(taskName, ForbiddenPatternsTask.class); - } - private void writeSourceFile(Project project, String name, String... lines) throws IOException { File file = new File(project.getProjectDir(), name); file.getParentFile().mkdirs(); diff --git a/buildSrc/src/test/resources/pluginzip/allProjectsGroup.gradle b/buildSrc/src/test/resources/pluginzip/allProjectsGroup.gradle new file mode 100644 index 0000000000000..80638107c86e1 --- /dev/null +++ b/buildSrc/src/test/resources/pluginzip/allProjectsGroup.gradle @@ -0,0 +1,28 @@ +plugins { + id 'java-gradle-plugin' + id 'opensearch.pluginzip' +} + +version='2.0.0.0' + +// A bundlePlugin task mockup +tasks.register('bundlePlugin', Zip.class) { + archiveFileName = "sample-plugin-${version}.zip" + destinationDirectory = layout.buildDirectory.dir('distributions') + from layout.projectDirectory.file('sample-plugin-source.txt') +} + +allprojects { + group = 'org.opensearch' +} + +publishing { + publications { + pluginZip(MavenPublication) { publication -> + pom { + name = "sample-plugin" + description = "pluginDescription" + } + } + } +} diff --git a/buildSrc/src/test/resources/pluginzip/groupPriorityLevel.gradle b/buildSrc/src/test/resources/pluginzip/groupPriorityLevel.gradle new file mode 100644 index 0000000000000..4da02c9f191d8 --- /dev/null +++ b/buildSrc/src/test/resources/pluginzip/groupPriorityLevel.gradle @@ -0,0 +1,30 @@ +plugins { + id 'java-gradle-plugin' + id 'opensearch.pluginzip' +} + +version='2.0.0.0' + +// A bundlePlugin task mockup +tasks.register('bundlePlugin', Zip.class) { + archiveFileName = "sample-plugin-${version}.zip" + destinationDirectory = layout.buildDirectory.dir('distributions') + from layout.projectDirectory.file('sample-plugin-source.txt') +} + +allprojects { + group = 'level.1' +} + +publishing { + publications { + pluginZip(MavenPublication) { publication -> + groupId = "level.2" + pom { + name = "sample-plugin" + description = "pluginDescription" + groupId = "level.3" + } + } + } +} diff --git a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/JUnit3MethodProvider.java b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/JUnit3MethodProvider.java index 0c01b6d519d62..163a903d31832 100644 --- a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/JUnit3MethodProvider.java +++ b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/JUnit3MethodProvider.java @@ -59,7 +59,7 @@ public Collection getTestMethods(Class suiteClass, ClassModel classMo if (m.getName().startsWith("test") && Modifier.isPublic(m.getModifiers()) && !Modifier.isStatic(m.getModifiers()) - && m.getParameterTypes().length == 0) { + && m.getParameterCount() == 0) { result.add(m); } } diff --git a/buildSrc/version.properties b/buildSrc/version.properties index bf72245c63918..a42faa4a62080 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,8 +1,8 @@ opensearch = 3.0.0 -lucene = 9.4.0 +lucene = 9.5.0-snapshot-a4ef70f bundled_jdk_vendor = adoptium -bundled_jdk = 17.0.4+8 +bundled_jdk = 17.0.5+8 @@ -10,21 +10,27 @@ bundled_jdk = 17.0.4+8 spatial4j = 0.7 jts = 1.15.0 jackson = 2.13.4 -jackson_databind = 2.13.4 +jackson_databind = 2.13.4.2 snakeyaml = 1.32 icu4j = 70.1 supercsv = 2.4.0 +# Update to 2.17.2+ is breaking OpenSearchJsonLayout (see https://issues.apache.org/jira/browse/LOG4J2-3562) log4j = 2.17.1 slf4j = 1.7.36 -asm = 9.3 +asm = 9.4 +jettison = 1.5.1 +woodstox = 6.4.0 +kotlin = 1.7.10 # when updating the JNA version, also update the version in buildSrc/build.gradle -jna = 5.12.1 +jna = 5.5.0 -netty = 4.1.79.Final +netty = 4.1.84.Final joda = 2.10.13 # client dependencies +httpclient5 = 5.1.3 +httpcore5 = 5.1.4 httpclient = 4.5.13 httpcore = 4.4.15 httpasyncclient = 4.1.5 @@ -42,9 +48,10 @@ bouncycastle=1.70 randomizedrunner = 2.7.1 junit = 4.13.2 hamcrest = 2.1 +# Update to 4.8.0 is using reflection without SecurityManager checks (fails with java.security.AccessControlException) mockito = 4.7.0 objenesis = 3.2 -bytebuddy = 1.12.12 +bytebuddy = 1.12.18 # benchmark dependencies jmh = 1.35 diff --git a/client/benchmark/src/main/java/org/opensearch/client/benchmark/rest/RestClientBenchmark.java b/client/benchmark/src/main/java/org/opensearch/client/benchmark/rest/RestClientBenchmark.java index d2d7163b8dee2..e8dcff814603d 100644 --- a/client/benchmark/src/main/java/org/opensearch/client/benchmark/rest/RestClientBenchmark.java +++ b/client/benchmark/src/main/java/org/opensearch/client/benchmark/rest/RestClientBenchmark.java @@ -31,10 +31,10 @@ package org.opensearch.client.benchmark.rest; -import org.apache.http.HttpHeaders; -import org.apache.http.HttpHost; -import org.apache.http.HttpStatus; -import org.apache.http.message.BasicHeader; +import org.apache.hc.core5.http.HttpHeaders; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpStatus; +import org.apache.hc.core5.http.message.BasicHeader; import org.opensearch.OpenSearchException; import org.opensearch.client.Request; import org.opensearch.client.Response; diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index 07147ce81b72e..7fa2855d85487 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -104,3 +104,9 @@ testClusters.all { extraConfigFile nodeTrustStore.name, nodeTrustStore extraConfigFile pkiTrustCert.name, pkiTrustCert } + +thirdPartyAudit.ignoreMissingClasses( + 'org.conscrypt.Conscrypt', + 'org.slf4j.Logger', + 'org.slf4j.LoggerFactory' +) diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/ClusterRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/ClusterRequestConverters.java index 37a1ab8812845..4ff8e75b521b6 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/ClusterRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/ClusterRequestConverters.java @@ -32,10 +32,10 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; import org.opensearch.action.admin.cluster.settings.ClusterGetSettingsRequest; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java index 3a5384f23b90e..ca9154340a660 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java @@ -32,11 +32,11 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; import org.opensearch.action.admin.indices.alias.get.GetAliasesRequest; import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/IngestRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/IngestRequestConverters.java index 2504dec3af36e..4c044413642ac 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/IngestRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/IngestRequestConverters.java @@ -32,10 +32,10 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.opensearch.action.ingest.DeletePipelineRequest; import org.opensearch.action.ingest.GetPipelineRequest; import org.opensearch.action.ingest.PutPipelineRequest; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java index 91c339cc92c1b..88e3a3a904830 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java @@ -32,14 +32,14 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NByteArrayEntity; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; import org.apache.lucene.util.BytesRef; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; @@ -269,7 +269,7 @@ static Request bulk(BulkRequest bulkRequest) throws IOException { } } request.addParameters(parameters.asMap()); - request.setEntity(new NByteArrayEntity(content.toByteArray(), 0, content.size(), requestContentType)); + request.setEntity(new ByteArrayEntity(content.toByteArray(), 0, content.size(), requestContentType)); return request; } @@ -358,7 +358,7 @@ static Request index(IndexRequest indexRequest) { BytesRef source = indexRequest.source().toBytesRef(); ContentType contentType = createContentType(indexRequest.getContentType()); request.addParameters(parameters.asMap()); - request.setEntity(new NByteArrayEntity(source.bytes, source.offset, source.length, contentType)); + request.setEntity(new ByteArrayEntity(source.bytes, source.offset, source.length, contentType)); return request; } @@ -514,7 +514,7 @@ static Request multiSearch(MultiSearchRequest multiSearchRequest) throws IOExcep XContent xContent = REQUEST_BODY_CONTENT_TYPE.xContent(); byte[] source = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, xContent); request.addParameters(params.asMap()); - request.setEntity(new NByteArrayEntity(source, createContentType(xContent.type()))); + request.setEntity(new ByteArrayEntity(source, createContentType(xContent.type()))); return request; } @@ -549,7 +549,7 @@ static Request multiSearchTemplate(MultiSearchTemplateRequest multiSearchTemplat XContent xContent = REQUEST_BODY_CONTENT_TYPE.xContent(); byte[] source = MultiSearchTemplateRequest.writeMultiLineFormat(multiSearchTemplateRequest, xContent); - request.setEntity(new NByteArrayEntity(source, createContentType(xContent.type()))); + request.setEntity(new ByteArrayEntity(source, createContentType(xContent.type()))); return request; } @@ -817,7 +817,7 @@ static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType, ToXContent.Params toXContentParams) throws IOException { BytesRef source = XContentHelper.toXContent(toXContent, xContentType, toXContentParams, false).toBytesRef(); - return new NByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); + return new ByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); } static String endpoint(String index, String id) { diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java index 0a5880b778942..27f13fc3c00c4 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; +import org.apache.hc.core5.http.HttpEntity; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchStatusException; import org.opensearch.action.ActionListener; @@ -2220,9 +2220,9 @@ protected final Resp parseEntity(final HttpEntity entity, final CheckedFu if (entity.getContentType() == null) { throw new IllegalStateException("OpenSearch didn't return the [Content-Type] header, unable to parse response body"); } - XContentType xContentType = XContentType.fromMediaType(entity.getContentType().getValue()); + XContentType xContentType = XContentType.fromMediaType(entity.getContentType()); if (xContentType == null) { - throw new IllegalStateException("Unsupported Content-Type: " + entity.getContentType().getValue()); + throw new IllegalStateException("Unsupported Content-Type: " + entity.getContentType()); } try (XContentParser parser = xContentType.xContent().createParser(registry, DEPRECATION_HANDLER, entity.getContent())) { return entityParser.apply(parser); diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotRequestConverters.java index 3d44820966608..263d7db82ba08 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotRequestConverters.java @@ -32,10 +32,10 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; import org.opensearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/TasksRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/TasksRequestConverters.java index ff89950f37cb9..78a74ca04ff9b 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/TasksRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/TasksRequestConverters.java @@ -32,8 +32,8 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.client.RequestConverters.EndpointBuilder; import org.opensearch.client.tasks.CancelTasksRequest; diff --git a/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt b/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt index 68dc509e5ff27..0d7749b39fb91 100644 --- a/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt +++ b/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt @@ -15,10 +15,9 @@ # language governing permissions and limitations under the License. @defaultMessage Use Request#createContentType(XContentType) to be sure to pass the right MIME type -org.apache.http.entity.ContentType#create(java.lang.String) -org.apache.http.entity.ContentType#create(java.lang.String,java.lang.String) -org.apache.http.entity.ContentType#create(java.lang.String,java.nio.charset.Charset) -org.apache.http.entity.ContentType#create(java.lang.String,org.apache.http.NameValuePair[]) +org.apache.hc.core5.http.ContentType#create(java.lang.String) +org.apache.hc.core5.http.ContentType#create(java.lang.String,java.lang.String) +org.apache.hc.core5.http.ContentType#create(java.lang.String,java.nio.charset.Charset) @defaultMessage ES's logging infrastructure uses log4j2 which we don't want to force on high level rest client users org.opensearch.common.logging.DeprecationLogger @@ -30,7 +29,3 @@ org.opensearch.common.logging.PrefixLogger @defaultMessage We can't rely on log4j2 being on the classpath so don't log deprecations! org.opensearch.common.xcontent.LoggingDeprecationHandler - -@defaultMessage Use Nonblocking org.apache.http.nio.entity.NByteArrayEntity -org.apache.http.entity.ByteArrayEntity -org.apache.http.entity.StringEntity diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java index 71b869fb59e7b..82d2cbe9149ca 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java @@ -32,7 +32,8 @@ package org.opensearch.client; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchStatusException; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; @@ -220,7 +221,7 @@ public void testClusterHealthGreen() throws IOException { assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); } - public void testClusterHealthYellowClusterLevel() throws IOException { + public void testClusterHealthYellowClusterLevel() throws IOException, ParseException { createIndex("index", Settings.EMPTY); createIndex("index2", Settings.EMPTY); ClusterHealthRequest request = new ClusterHealthRequest(); @@ -231,7 +232,7 @@ public void testClusterHealthYellowClusterLevel() throws IOException { assertThat(response.getIndices().size(), equalTo(0)); } - public void testClusterHealthYellowIndicesLevel() throws IOException { + public void testClusterHealthYellowIndicesLevel() throws IOException, ParseException { String firstIndex = "index"; String secondIndex = "index2"; // including another index that we do not assert on, to ensure that we are not diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java index 27adc18fd37b8..f201599632969 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java @@ -32,8 +32,6 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPut; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; import org.opensearch.action.admin.cluster.settings.ClusterGetSettingsRequest; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; @@ -44,6 +42,8 @@ import org.opensearch.common.Priority; import org.opensearch.common.util.CollectionUtils; import org.opensearch.test.OpenSearchTestCase; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.hamcrest.CoreMatchers; import org.junit.Assert; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/CustomRestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/CustomRestHighLevelClientTests.java index 1d94f190c611c..972c96999945f 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/CustomRestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/CustomRestHighLevelClientTests.java @@ -32,15 +32,14 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.ProtocolVersion; -import org.apache.http.RequestLine; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.entity.ContentType; -import org.apache.http.message.BasicRequestLine; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.nio.entity.NByteArrayEntity; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ProtocolVersion; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.message.StatusLine; import org.apache.lucene.util.BytesRef; import org.opensearch.Build; import org.opensearch.Version; @@ -172,13 +171,13 @@ private Response mockPerformRequest(Request request) throws IOException { when(mockResponse.getHost()).thenReturn(new HttpHost("localhost", 9200)); ProtocolVersion protocol = new ProtocolVersion("HTTP", 1, 1); - when(mockResponse.getStatusLine()).thenReturn(new BasicStatusLine(protocol, 200, "OK")); + when(mockResponse.getStatusLine()).thenReturn(new StatusLine(protocol, 200, "OK")); MainResponse response = new MainResponse(httpHeader.getValue(), Version.CURRENT, ClusterName.DEFAULT, "_na", Build.CURRENT); BytesRef bytesRef = XContentHelper.toXContent(response, XContentType.JSON, false).toBytesRef(); - when(mockResponse.getEntity()).thenReturn(new NByteArrayEntity(bytesRef.bytes, ContentType.APPLICATION_JSON)); + when(mockResponse.getEntity()).thenReturn(new ByteArrayEntity(bytesRef.bytes, ContentType.APPLICATION_JSON)); - RequestLine requestLine = new BasicRequestLine(HttpGet.METHOD_NAME, ENDPOINT, protocol); + RequestLine requestLine = new RequestLine(HttpGet.METHOD_NAME, ENDPOINT, protocol); when(mockResponse.getRequestLine()).thenReturn(requestLine); return mockResponse; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/HighLevelRestClientCompressionIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/HighLevelRestClientCompressionIT.java index c0c03ed1d0e7c..6985353806a01 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/HighLevelRestClientCompressionIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/HighLevelRestClientCompressionIT.java @@ -31,13 +31,22 @@ package org.opensearch.client; -import org.apache.http.HttpHeaders; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.client5.http.entity.GzipCompressingEntity; +import org.apache.hc.client5.http.impl.classic.CloseableHttpClient; +import org.apache.hc.client5.http.impl.classic.CloseableHttpResponse; +import org.apache.hc.client5.http.impl.classic.HttpClients; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpHeaders; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import static org.hamcrest.Matchers.equalTo; @@ -62,4 +71,32 @@ public void testCompressesResponseIfRequested() throws IOException { assertEquals(SAMPLE_DOCUMENT, searchResponse.getHits().getHits()[0].getSourceAsString()); } + /** + * The default CloseableHttpAsyncClient does not support compression out of the box (so that applies to RestClient + * and RestHighLevelClient). To check the compression works on both sides, crafting the request using CloseableHttpClient + * instead which uses compression by default. + */ + public void testCompressesRequest() throws IOException, URISyntaxException { + try (CloseableHttpClient client = HttpClients.custom().build()) { + final Node node = client().getNodes().iterator().next(); + final URI baseUri = new URI(node.getHost().toURI()); + + final HttpPut index = new HttpPut(baseUri.resolve("/company/_doc/1")); + index.setEntity(new GzipCompressingEntity(new StringEntity(SAMPLE_DOCUMENT, ContentType.APPLICATION_JSON))); + try (CloseableHttpResponse response = client.execute(index)) { + assertThat(response.getCode(), equalTo(201)); + } + + final HttpGet refresh = new HttpGet(baseUri.resolve("/_refresh")); + try (CloseableHttpResponse response = client.execute(refresh)) { + assertThat(response.getCode(), equalTo(200)); + } + + final HttpPost search = new HttpPost(baseUri.resolve("/_search")); + index.setEntity(new GzipCompressingEntity(new StringEntity("{}", ContentType.APPLICATION_JSON))); + try (CloseableHttpResponse response = client.execute(search)) { + assertThat(response.getCode(), equalTo(200)); + } + } + } } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java index f9c8851f8839e..750b0c15e9c14 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java @@ -32,9 +32,9 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchStatusException; import org.opensearch.action.admin.indices.alias.Alias; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java index fdb5f2843b44d..512cc058a64a7 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java @@ -32,11 +32,11 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.admin.indices.alias.Alias; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java index 200069ade1ea2..8aae33307279b 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java @@ -32,10 +32,6 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; import org.opensearch.action.ingest.DeletePipelineRequest; import org.opensearch.action.ingest.GetPipelineRequest; import org.opensearch.action.ingest.PutPipelineRequest; @@ -44,6 +40,10 @@ import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.junit.Assert; import java.io.IOException; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/MockRestHighLevelTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/MockRestHighLevelTests.java index bd57c5c9e53f6..e1179c0f24cb8 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/MockRestHighLevelTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/MockRestHighLevelTests.java @@ -32,13 +32,12 @@ package org.opensearch.client; -import org.apache.http.HttpHost; -import org.apache.http.ProtocolVersion; -import org.apache.http.RequestLine; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.message.BasicRequestLine; -import org.apache.http.message.BasicStatusLine; import org.opensearch.test.OpenSearchTestCase; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ProtocolVersion; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.message.StatusLine; import org.junit.Before; import java.io.IOException; @@ -64,9 +63,9 @@ private void setupClient() throws IOException { when(mockResponse.getWarnings()).thenReturn(WARNINGS); ProtocolVersion protocol = new ProtocolVersion("HTTP", 1, 1); - when(mockResponse.getStatusLine()).thenReturn(new BasicStatusLine(protocol, 200, "OK")); + when(mockResponse.getStatusLine()).thenReturn(new StatusLine(protocol, 200, "OK")); - RequestLine requestLine = new BasicRequestLine(HttpGet.METHOD_NAME, "/_blah", protocol); + RequestLine requestLine = new RequestLine(HttpGet.METHOD_NAME, "/_blah", protocol); when(mockResponse.getRequestLine()).thenReturn(requestLine); WarningFailureException expectedException = new WarningFailureException(mockResponse); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/OpenSearchRestHighLevelClientTestCase.java b/client/rest-high-level/src/test/java/org/opensearch/client/OpenSearchRestHighLevelClientTestCase.java index efac508cf6814..a8c73393f54ce 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/OpenSearchRestHighLevelClientTestCase.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/OpenSearchRestHighLevelClientTestCase.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.util.EntityUtils; +import org.opensearch.OpenSearchParseException; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; @@ -61,6 +61,8 @@ import org.opensearch.search.SearchModule; import org.opensearch.tasks.TaskId; import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.junit.AfterClass; import org.junit.Before; @@ -324,7 +326,7 @@ protected static void setupRemoteClusterConfig(String remoteClusterName) throws }); } - protected static Map toMap(Response response) throws IOException { + protected static Map toMap(Response response) throws IOException, OpenSearchParseException, ParseException { return XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false); } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/PingAndInfoIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/PingAndInfoIT.java index 09ef90cef144d..6f66a5279afa3 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/PingAndInfoIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/PingAndInfoIT.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpGet; import org.opensearch.client.core.MainResponse; import java.io.IOException; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java index be9b614a8720f..1f10deb400ecc 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java @@ -8,8 +8,8 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.junit.Before; import org.opensearch.OpenSearchStatusException; import org.opensearch.action.ActionListener; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java index ee5795deb165d..576fe02718ba3 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java @@ -32,14 +32,6 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.nio.entity.NByteArrayEntity; -import org.apache.http.util.EntityUtils; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; import org.opensearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; @@ -120,6 +112,14 @@ import org.opensearch.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.RandomObjects; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.hamcrest.Matchers; import java.io.IOException; @@ -733,8 +733,8 @@ public void testIndex() throws IOException { assertEquals(method, request.getMethod()); HttpEntity entity = request.getEntity(); - assertTrue(entity instanceof NByteArrayEntity); - assertEquals(indexRequest.getContentType().mediaTypeWithoutParameters(), entity.getContentType().getValue()); + assertTrue(entity instanceof ByteArrayEntity); + assertEquals(indexRequest.getContentType().mediaTypeWithoutParameters(), entity.getContentType()); try (XContentParser parser = createParser(xContentType.xContent(), entity.getContent())) { assertEquals(nbFields, parser.map().size()); } @@ -805,11 +805,11 @@ public void testUpdate() throws IOException { assertEquals(HttpPost.METHOD_NAME, request.getMethod()); HttpEntity entity = request.getEntity(); - assertTrue(entity instanceof NByteArrayEntity); + assertTrue(entity instanceof ByteArrayEntity); UpdateRequest parsedUpdateRequest = new UpdateRequest(); - XContentType entityContentType = XContentType.fromMediaType(entity.getContentType().getValue()); + XContentType entityContentType = XContentType.fromMediaType(entity.getContentType()); try (XContentParser parser = createParser(entityContentType.xContent(), entity.getContent())) { parsedUpdateRequest.fromXContent(parser); } @@ -926,7 +926,7 @@ public void testBulk() throws IOException { assertEquals("/_bulk", request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); - assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType()); byte[] content = new byte[(int) request.getEntity().getContentLength()]; try (InputStream inputStream = request.getEntity().getContent()) { Streams.readFully(inputStream, content); @@ -979,7 +979,7 @@ public void testBulkWithDifferentContentTypes() throws IOException { bulkRequest.add(new DeleteRequest("index", "2")); Request request = RequestConverters.bulk(bulkRequest); - assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } { XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); @@ -989,7 +989,7 @@ public void testBulkWithDifferentContentTypes() throws IOException { bulkRequest.add(new DeleteRequest("index", "2")); Request request = RequestConverters.bulk(bulkRequest); - assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } { XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); @@ -1001,7 +1001,7 @@ public void testBulkWithDifferentContentTypes() throws IOException { } Request request = RequestConverters.bulk(new BulkRequest().add(updateRequest)); - assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } { BulkRequest bulkRequest = new BulkRequest(); @@ -1289,7 +1289,7 @@ public void testSearchScroll() throws IOException { assertEquals("/_search/scroll", request.getEndpoint()); assertEquals(0, request.getParameters().size()); assertToXContentBody(searchScrollRequest, request.getEntity()); - assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } public void testClearScroll() throws IOException { @@ -1303,7 +1303,7 @@ public void testClearScroll() throws IOException { assertEquals("/_search/scroll", request.getEndpoint()); assertEquals(0, request.getParameters().size()); assertToXContentBody(clearScrollRequest, request.getEntity()); - assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } public void testCreatePit() throws IOException { @@ -1324,7 +1324,7 @@ public void testCreatePit() throws IOException { assertEquals(endpoint.toString(), request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); assertToXContentBody(createPitRequest, request.getEntity()); - assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } public void testDeletePit() throws IOException { @@ -1337,7 +1337,7 @@ public void testDeletePit() throws IOException { assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); assertEquals(endpoint, request.getEndpoint()); assertToXContentBody(deletePitRequest, request.getEntity()); - assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } public void testDeleteAllPits() { @@ -1456,7 +1456,7 @@ public void testMultiSearchTemplate() throws Exception { HttpEntity actualEntity = multiRequest.getEntity(); byte[] expectedBytes = MultiSearchTemplateRequest.writeMultiLineFormat(multiSearchTemplateRequest, XContentType.JSON.xContent()); - assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType().getValue()); + assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType()); assertEquals(new BytesArray(expectedBytes), new BytesArray(EntityUtils.toByteArray(actualEntity))); } @@ -1763,7 +1763,7 @@ public void testDeleteScriptRequest() { static void assertToXContentBody(ToXContent expectedBody, HttpEntity actualEntity) throws IOException { BytesReference expectedBytes = XContentHelper.toXContent(expectedBody, REQUEST_BODY_CONTENT_TYPE, false); - assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType().getValue()); + assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType()); assertEquals(expectedBytes, new BytesArray(EntityUtils.toByteArray(actualEntity))); } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientExtTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientExtTests.java index dbdf7eba3dca4..5743820ff0175 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientExtTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientExtTests.java @@ -32,9 +32,9 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NStringEntity; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.junit.Before; import org.opensearch.common.ParseField; import org.opensearch.common.xcontent.NamedXContentRegistry; @@ -64,14 +64,14 @@ public void initClient() { public void testParseEntityCustomResponseSection() throws IOException { { - HttpEntity jsonEntity = new NStringEntity("{\"custom1\":{ \"field\":\"value\"}}", ContentType.APPLICATION_JSON); + HttpEntity jsonEntity = new StringEntity("{\"custom1\":{ \"field\":\"value\"}}", ContentType.APPLICATION_JSON); BaseCustomResponseSection customSection = restHighLevelClient.parseEntity(jsonEntity, BaseCustomResponseSection::fromXContent); assertThat(customSection, instanceOf(CustomResponseSection1.class)); CustomResponseSection1 customResponseSection1 = (CustomResponseSection1) customSection; assertEquals("value", customResponseSection1.value); } { - HttpEntity jsonEntity = new NStringEntity("{\"custom2\":{ \"array\": [\"item1\", \"item2\"]}}", ContentType.APPLICATION_JSON); + HttpEntity jsonEntity = new StringEntity("{\"custom2\":{ \"array\": [\"item1\", \"item2\"]}}", ContentType.APPLICATION_JSON); BaseCustomResponseSection customSection = restHighLevelClient.parseEntity(jsonEntity, BaseCustomResponseSection::fromXContent); assertThat(customSection, instanceOf(CustomResponseSection2.class)); CustomResponseSection2 customResponseSection2 = (CustomResponseSection2) customSection; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java index 4d989ff53df35..dc89b605be689 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java @@ -33,19 +33,6 @@ package org.opensearch.client; import com.fasterxml.jackson.core.JsonParseException; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.ProtocolVersion; -import org.apache.http.RequestLine; -import org.apache.http.StatusLine; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.entity.ContentType; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicRequestLine; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.nio.entity.NByteArrayEntity; -import org.apache.http.nio.entity.NStringEntity; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; @@ -87,6 +74,17 @@ import org.opensearch.test.InternalAggregationTestCase; import org.opensearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; import org.opensearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ProtocolVersion; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.message.StatusLine; import org.hamcrest.Matchers; import org.junit.Before; @@ -123,7 +121,7 @@ public class RestHighLevelClientTests extends OpenSearchTestCase { private static final String SUBMIT_TASK_PREFIX = "submit_"; private static final String SUBMIT_TASK_SUFFIX = "_task"; private static final ProtocolVersion HTTP_PROTOCOL = new ProtocolVersion("http", 1, 1); - private static final RequestLine REQUEST_LINE = new BasicRequestLine(HttpGet.METHOD_NAME, "/", HTTP_PROTOCOL); + private static final RequestLine REQUEST_LINE = new RequestLine(HttpGet.METHOD_NAME, "/", HTTP_PROTOCOL); /** * These APIs do not use a Request object (because they don't have a body, or any request parameters). @@ -258,7 +256,7 @@ private void mockResponse(ToXContent toXContent) throws IOException { Response response = mock(Response.class); ContentType contentType = ContentType.parse(RequestConverters.REQUEST_BODY_CONTENT_TYPE.mediaType()); String requestBody = toXContent(toXContent, RequestConverters.REQUEST_BODY_CONTENT_TYPE, false).utf8ToString(); - when(response.getEntity()).thenReturn(new NStringEntity(requestBody, contentType)); + when(response.getEntity()).thenReturn(new StringEntity(requestBody, contentType)); when(restClient.performRequest(any(Request.class))).thenReturn(response); } @@ -308,14 +306,14 @@ public void testParseEntity() throws IOException { { IllegalStateException ise = expectThrows( IllegalStateException.class, - () -> restHighLevelClient.parseEntity(new NStringEntity("", (ContentType) null), null) + () -> restHighLevelClient.parseEntity(new StringEntity("", (ContentType) null), null) ); assertEquals("OpenSearch didn't return the [Content-Type] header, unable to parse response body", ise.getMessage()); } { - NStringEntity entity = new NStringEntity("", ContentType.APPLICATION_SVG_XML); + StringEntity entity = new StringEntity("", ContentType.APPLICATION_SVG_XML); IllegalStateException ise = expectThrows(IllegalStateException.class, () -> restHighLevelClient.parseEntity(entity, null)); - assertEquals("Unsupported Content-Type: " + entity.getContentType().getValue(), ise.getMessage()); + assertEquals("Unsupported Content-Type: " + entity.getContentType(), ise.getMessage()); } { CheckedFunction entityParser = parser -> { @@ -326,9 +324,9 @@ public void testParseEntity() throws IOException { assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); return value; }; - HttpEntity jsonEntity = new NStringEntity("{\"field\":\"value\"}", ContentType.APPLICATION_JSON); + HttpEntity jsonEntity = new StringEntity("{\"field\":\"value\"}", ContentType.APPLICATION_JSON); assertEquals("value", restHighLevelClient.parseEntity(jsonEntity, entityParser)); - HttpEntity yamlEntity = new NStringEntity("---\nfield: value\n", ContentType.create("application/yaml")); + HttpEntity yamlEntity = new StringEntity("---\nfield: value\n", ContentType.create("application/yaml")); assertEquals("value", restHighLevelClient.parseEntity(yamlEntity, entityParser)); HttpEntity smileEntity = createBinaryEntity(SmileXContent.contentBuilder(), ContentType.create("application/smile")); assertEquals("value", restHighLevelClient.parseEntity(smileEntity, entityParser)); @@ -342,13 +340,13 @@ private static HttpEntity createBinaryEntity(XContentBuilder xContentBuilder, Co builder.startObject(); builder.field("field", "value"); builder.endObject(); - return new NByteArrayEntity(BytesReference.bytes(builder).toBytesRef().bytes, contentType); + return new ByteArrayEntity(BytesReference.bytes(builder).toBytesRef().bytes, contentType); } } public void testConvertExistsResponse() { RestStatus restStatus = randomBoolean() ? RestStatus.OK : randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); boolean result = RestHighLevelClient.convertExistsResponse(response); assertEquals(restStatus == RestStatus.OK, result); @@ -357,7 +355,7 @@ public void testConvertExistsResponse() { public void testParseResponseException() throws IOException { { RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); OpenSearchException openSearchException = restHighLevelClient.parseResponseException(responseException); @@ -367,9 +365,9 @@ public void testParseResponseException() throws IOException { } { RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); httpResponse.setEntity( - new NStringEntity( + new StringEntity( "{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON ) @@ -383,8 +381,8 @@ public void testParseResponseException() throws IOException { } { RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); - httpResponse.setEntity(new NStringEntity("{\"error\":", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); + httpResponse.setEntity(new StringEntity("{\"error\":", ContentType.APPLICATION_JSON)); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); OpenSearchException openSearchException = restHighLevelClient.parseResponseException(responseException); @@ -395,8 +393,8 @@ public void testParseResponseException() throws IOException { } { RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); - httpResponse.setEntity(new NStringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); + httpResponse.setEntity(new StringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); OpenSearchException openSearchException = restHighLevelClient.parseResponseException(responseException); @@ -411,7 +409,7 @@ public void testPerformRequestOnSuccess() throws IOException { MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); when(restClient.performRequest(any(Request.class))).thenReturn(mockResponse); { @@ -437,7 +435,7 @@ public void testPerformRequestOnSuccess() throws IOException { ); assertEquals( "Unable to parse response body for Response{requestLine=GET / http/1.1, host=http://localhost:9200, " - + "response=http/1.1 " + + "response=HTTP/1.1 " + restStatus.getStatus() + " " + restStatus.name() @@ -451,7 +449,7 @@ public void testPerformRequestOnResponseExceptionWithoutEntity() throws IOExcept MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); when(restClient.performRequest(any(Request.class))).thenThrow(responseException); @@ -474,9 +472,9 @@ public void testPerformRequestOnResponseExceptionWithEntity() throws IOException MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); httpResponse.setEntity( - new NStringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON) + new StringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON) ); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); @@ -500,8 +498,8 @@ public void testPerformRequestOnResponseExceptionWithBrokenEntity() throws IOExc MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); - httpResponse.setEntity(new NStringEntity("{\"error\":", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); + httpResponse.setEntity(new StringEntity("{\"error\":", ContentType.APPLICATION_JSON)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); when(restClient.performRequest(any(Request.class))).thenThrow(responseException); @@ -525,8 +523,8 @@ public void testPerformRequestOnResponseExceptionWithBrokenEntity2() throws IOEx MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); - httpResponse.setEntity(new NStringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); + httpResponse.setEntity(new StringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); when(restClient.performRequest(any(Request.class))).thenThrow(responseException); @@ -549,7 +547,7 @@ public void testPerformRequestOnResponseExceptionWithBrokenEntity2() throws IOEx public void testPerformRequestOnResponseExceptionWithIgnores() throws IOException { MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.NOT_FOUND.getStatus(), RestStatus.NOT_FOUND.name()); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); when(restClient.performRequest(any(Request.class))).thenThrow(responseException); @@ -569,7 +567,7 @@ public void testPerformRequestOnResponseExceptionWithIgnores() throws IOExceptio public void testPerformRequestOnResponseExceptionWithIgnoresErrorNoBody() throws IOException { MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.NOT_FOUND.getStatus(), RestStatus.NOT_FOUND.name()); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); when(restClient.performRequest(any(Request.class))).thenThrow(responseException); @@ -591,8 +589,8 @@ public void testPerformRequestOnResponseExceptionWithIgnoresErrorNoBody() throws public void testPerformRequestOnResponseExceptionWithIgnoresErrorValidBody() throws IOException { MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); - httpResponse.setEntity(new NStringEntity("{\"error\":\"test error message\",\"status\":404}", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.NOT_FOUND.getStatus(), RestStatus.NOT_FOUND.name()); + httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":404}", ContentType.APPLICATION_JSON)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); when(restClient.performRequest(any(Request.class))).thenThrow(responseException); @@ -620,7 +618,7 @@ public void testWrapResponseListenerOnSuccess() { Collections.emptySet() ); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); responseListener.onSuccess(new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse)); assertNull(trackingActionListener.exception.get()); assertEquals(restStatus.getStatus(), trackingActionListener.statusCode.get()); @@ -633,13 +631,13 @@ public void testWrapResponseListenerOnSuccess() { Collections.emptySet() ); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); responseListener.onSuccess(new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse)); assertThat(trackingActionListener.exception.get(), instanceOf(IOException.class)); IOException ioe = (IOException) trackingActionListener.exception.get(); assertEquals( "Unable to parse response body for Response{requestLine=GET / http/1.1, host=http://localhost:9200, " - + "response=http/1.1 " + + "response=HTTP/1.1 " + restStatus.getStatus() + " " + restStatus.name() @@ -670,7 +668,7 @@ public void testWrapResponseListenerOnResponseExceptionWithoutEntity() throws IO Collections.emptySet() ); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); responseListener.onFailure(responseException); @@ -689,9 +687,9 @@ public void testWrapResponseListenerOnResponseExceptionWithEntity() throws IOExc Collections.emptySet() ); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); httpResponse.setEntity( - new NStringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON) + new StringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON) ); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); @@ -712,8 +710,8 @@ public void testWrapResponseListenerOnResponseExceptionWithBrokenEntity() throws Collections.emptySet() ); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); - httpResponse.setEntity(new NStringEntity("{\"error\":", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); + httpResponse.setEntity(new StringEntity("{\"error\":", ContentType.APPLICATION_JSON)); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); responseListener.onFailure(responseException); @@ -732,8 +730,8 @@ public void testWrapResponseListenerOnResponseExceptionWithBrokenEntity() throws Collections.emptySet() ); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); - httpResponse.setEntity(new NStringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); + httpResponse.setEntity(new StringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); responseListener.onFailure(responseException); @@ -753,7 +751,7 @@ public void testWrapResponseListenerOnResponseExceptionWithIgnores() throws IOEx trackingActionListener, Collections.singleton(404) ); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.NOT_FOUND.getStatus(), RestStatus.NOT_FOUND.name()); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); responseListener.onFailure(responseException); @@ -771,7 +769,7 @@ public void testWrapResponseListenerOnResponseExceptionWithIgnoresErrorNoBody() trackingActionListener, Collections.singleton(404) ); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.NOT_FOUND.getStatus(), RestStatus.NOT_FOUND.name()); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); responseListener.onFailure(responseException); @@ -791,8 +789,8 @@ public void testWrapResponseListenerOnResponseExceptionWithIgnoresErrorValidBody trackingActionListener, Collections.singleton(404) ); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); - httpResponse.setEntity(new NStringEntity("{\"error\":\"test error message\",\"status\":404}", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.NOT_FOUND.getStatus(), RestStatus.NOT_FOUND.name()); + httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":404}", ContentType.APPLICATION_JSON)); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); responseListener.onFailure(responseException); @@ -893,7 +891,8 @@ public void testApiNamingConventions() throws Exception { "cluster.get_weighted_routing", "cluster.delete_weighted_routing", "cluster.put_decommission_awareness", - "cluster.get_decommission_awareness", }; + "cluster.get_decommission_awareness", + "cluster.delete_decommission_awareness", }; List booleanReturnMethods = Arrays.asList("security.enable_user", "security.disable_user", "security.change_password"); Set deprecatedMethods = new HashSet<>(); deprecatedMethods.add("indices.force_merge"); @@ -1006,37 +1005,34 @@ private static void assertSyncMethod(Method method, String apiName, List } assertEquals("incorrect number of exceptions for method [" + method + "]", 1, method.getExceptionTypes().length); + final Class[] parameterTypes = method.getParameterTypes(); // a few methods don't accept a request object as argument if (APIS_WITHOUT_REQUEST_OBJECT.contains(apiName)) { - assertEquals("incorrect number of arguments for method [" + method + "]", 1, method.getParameterTypes().length); - assertThat( - "the parameter to method [" + method + "] is the wrong type", - method.getParameterTypes()[0], - equalTo(RequestOptions.class) - ); + assertEquals("incorrect number of arguments for method [" + method + "]", 1, method.getParameterCount()); + assertThat("the parameter to method [" + method + "] is the wrong type", parameterTypes[0], equalTo(RequestOptions.class)); } else { - assertEquals("incorrect number of arguments for method [" + method + "]", 2, method.getParameterTypes().length); + assertEquals("incorrect number of arguments for method [" + method + "]", 2, method.getParameterCount()); // This is no longer true for all methods. Some methods can contain these 2 args backwards because of deprecation - if (method.getParameterTypes()[0].equals(RequestOptions.class)) { + if (parameterTypes[0].equals(RequestOptions.class)) { assertThat( "the first parameter to method [" + method + "] is the wrong type", - method.getParameterTypes()[0], + parameterTypes[0], equalTo(RequestOptions.class) ); assertThat( "the second parameter to method [" + method + "] is the wrong type", - method.getParameterTypes()[1].getSimpleName(), + parameterTypes[1].getSimpleName(), endsWith("Request") ); } else { assertThat( "the first parameter to method [" + method + "] is the wrong type", - method.getParameterTypes()[0].getSimpleName(), + parameterTypes[0].getSimpleName(), endsWith("Request") ); assertThat( "the second parameter to method [" + method + "] is the wrong type", - method.getParameterTypes()[1], + parameterTypes[1], equalTo(RequestOptions.class) ); } @@ -1050,39 +1046,40 @@ private static void assertAsyncMethod(Map> methods, Method m ); assertThat("async method [" + method + "] should return Cancellable", method.getReturnType(), equalTo(Cancellable.class)); assertEquals("async method [" + method + "] should not throw any exceptions", 0, method.getExceptionTypes().length); + final Class[] parameterTypes = method.getParameterTypes(); if (APIS_WITHOUT_REQUEST_OBJECT.contains(apiName.replaceAll("_async$", ""))) { - assertEquals(2, method.getParameterTypes().length); - assertThat(method.getParameterTypes()[0], equalTo(RequestOptions.class)); - assertThat(method.getParameterTypes()[1], equalTo(ActionListener.class)); + assertEquals(2, parameterTypes.length); + assertThat(parameterTypes[0], equalTo(RequestOptions.class)); + assertThat(parameterTypes[1], equalTo(ActionListener.class)); } else { - assertEquals("async method [" + method + "] has the wrong number of arguments", 3, method.getParameterTypes().length); + assertEquals("async method [" + method + "] has the wrong number of arguments", 3, method.getParameterCount()); // This is no longer true for all methods. Some methods can contain these 2 args backwards because of deprecation - if (method.getParameterTypes()[0].equals(RequestOptions.class)) { + if (parameterTypes[0].equals(RequestOptions.class)) { assertThat( "the first parameter to async method [" + method + "] should be a request type", - method.getParameterTypes()[0], + parameterTypes[0], equalTo(RequestOptions.class) ); assertThat( "the second parameter to async method [" + method + "] is the wrong type", - method.getParameterTypes()[1].getSimpleName(), + parameterTypes[1].getSimpleName(), endsWith("Request") ); } else { assertThat( "the first parameter to async method [" + method + "] should be a request type", - method.getParameterTypes()[0].getSimpleName(), + parameterTypes[0].getSimpleName(), endsWith("Request") ); assertThat( "the second parameter to async method [" + method + "] is the wrong type", - method.getParameterTypes()[1], + parameterTypes[1], equalTo(RequestOptions.class) ); } assertThat( "the third parameter to async method [" + method + "] is the wrong type", - method.getParameterTypes()[2], + parameterTypes[2], equalTo(ActionListener.class) ); } @@ -1095,16 +1092,17 @@ private static void assertSubmitTaskMethod( ClientYamlSuiteRestSpec restSpec ) { String methodName = extractMethodName(apiName); + final Class[] parameterTypes = method.getParameterTypes(); assertTrue("submit task method [" + method.getName() + "] doesn't have corresponding sync method", methods.containsKey(methodName)); - assertEquals("submit task method [" + method + "] has the wrong number of arguments", 2, method.getParameterTypes().length); + assertEquals("submit task method [" + method + "] has the wrong number of arguments", 2, method.getParameterCount()); assertThat( "the first parameter to submit task method [" + method + "] is the wrong type", - method.getParameterTypes()[0].getSimpleName(), + parameterTypes[0].getSimpleName(), endsWith("Request") ); assertThat( "the second parameter to submit task method [" + method + "] is the wrong type", - method.getParameterTypes()[1], + parameterTypes[1], equalTo(RequestOptions.class) ); @@ -1163,6 +1161,6 @@ public void onFailure(Exception e) { } private static StatusLine newStatusLine(RestStatus restStatus) { - return new BasicStatusLine(HTTP_PROTOCOL, restStatus.getStatus(), restStatus.name()); + return new StatusLine(HTTP_PROTOCOL, restStatus.getStatus(), restStatus.name()); } } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java index 8b509e5d19e92..cc6f08217d057 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java @@ -32,8 +32,6 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchStatusException; import org.opensearch.action.explain.ExplainRequest; @@ -101,6 +99,8 @@ import org.opensearch.search.suggest.Suggest; import org.opensearch.search.suggest.SuggestBuilder; import org.opensearch.search.suggest.phrase.PhraseSuggestionBuilder; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.hamcrest.Matchers; import org.junit.Before; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java index 10baaa2e53dd4..e86de6ba718f9 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java @@ -32,10 +32,10 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.opensearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.opensearch.action.admin.cluster.repositories.put.PutRepositoryRequest; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/TasksRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/TasksRequestConvertersTests.java index 64fec3c8fb810..a777bbc5d1868 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/TasksRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/TasksRequestConvertersTests.java @@ -32,8 +32,8 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.client.tasks.CancelTasksRequest; import org.opensearch.tasks.TaskId; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/CRUDDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/CRUDDocumentationIT.java index 959c5a827f143..c63b311feebc7 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/CRUDDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/CRUDDocumentationIT.java @@ -32,7 +32,7 @@ package org.opensearch.client.documentation; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.DocWriteRequest; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/MiscellaneousDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/MiscellaneousDocumentationIT.java index 0213441a0b6a7..3edf639da8867 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/MiscellaneousDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/MiscellaneousDocumentationIT.java @@ -32,7 +32,7 @@ package org.opensearch.client.documentation; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.OpenSearchRestHighLevelClientTestCase; import org.opensearch.client.RequestOptions; import org.opensearch.client.RestClient; @@ -92,8 +92,8 @@ public void testInitializationFromClientBuilder() throws IOException { //tag::rest-high-level-client-init RestHighLevelClient client = new RestHighLevelClient( RestClient.builder( - new HttpHost("localhost", 9200, "http"), - new HttpHost("localhost", 9201, "http"))); + new HttpHost("http", "localhost", 9200), + new HttpHost("http", "localhost", 9201))); //end::rest-high-level-client-init //tag::rest-high-level-client-close diff --git a/client/rest/build.gradle b/client/rest/build.gradle index 01c186ed83fc2..eacef14d17ce2 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -40,12 +40,12 @@ group = 'org.opensearch.client' archivesBaseName = 'opensearch-rest-client' dependencies { - api "org.apache.httpcomponents:httpclient:${versions.httpclient}" - api "org.apache.httpcomponents:httpcore:${versions.httpcore}" - api "org.apache.httpcomponents:httpasyncclient:${versions.httpasyncclient}" - api "org.apache.httpcomponents:httpcore-nio:${versions.httpcore}" + api "org.apache.httpcomponents.client5:httpclient5:${versions.httpclient5}" + api "org.apache.httpcomponents.core5:httpcore5:${versions.httpcore5}" + api "org.apache.httpcomponents.core5:httpcore5-h2:${versions.httpcore5}" api "commons-codec:commons-codec:${versions.commonscodec}" api "commons-logging:commons-logging:${versions.commonslogging}" + api "org.slf4j:slf4j-api:${versions.slf4j}" testImplementation project(":client:test") testImplementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" @@ -54,6 +54,10 @@ dependencies { testImplementation "org.mockito:mockito-core:${versions.mockito}" testImplementation "org.objenesis:objenesis:${versions.objenesis}" testImplementation "net.bytebuddy:byte-buddy:${versions.bytebuddy}" + testImplementation "org.apache.logging.log4j:log4j-api:${versions.log4j}" + testImplementation "org.apache.logging.log4j:log4j-core:${versions.log4j}" + testImplementation "org.apache.logging.log4j:log4j-jul:${versions.log4j}" + testImplementation "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}" } tasks.withType(CheckForbiddenApis).configureEach { @@ -85,6 +89,10 @@ testingConventions { } thirdPartyAudit.ignoreMissingClasses( + 'org.conscrypt.Conscrypt', + 'org.slf4j.impl.StaticLoggerBinder', + 'org.slf4j.impl.StaticMDCBinder', + 'org.slf4j.impl.StaticMarkerBinder', //commons-logging optional dependencies 'org.apache.avalon.framework.logger.Logger', 'org.apache.log.Hierarchy', diff --git a/client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 b/client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 deleted file mode 100644 index 366a9e31069a6..0000000000000 --- a/client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cd18227f1eb8e9a263286c1d7362ceb24f6f9b32 \ No newline at end of file diff --git a/client/rest/licenses/httpasyncclient-LICENSE.txt b/client/rest/licenses/httpasyncclient-LICENSE.txt deleted file mode 100644 index 2c41ec88f61cf..0000000000000 --- a/client/rest/licenses/httpasyncclient-LICENSE.txt +++ /dev/null @@ -1,182 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - -This project contains annotations derived from JCIP-ANNOTATIONS -Copyright (c) 2005 Brian Goetz and Tim Peierls. -See http://www.jcip.net and the Creative Commons Attribution License -(http://creativecommons.org/licenses/by/2.5) - diff --git a/client/rest/licenses/httpasyncclient-NOTICE.txt b/client/rest/licenses/httpasyncclient-NOTICE.txt deleted file mode 100644 index b45be98d168a4..0000000000000 --- a/client/rest/licenses/httpasyncclient-NOTICE.txt +++ /dev/null @@ -1,5 +0,0 @@ -Apache HttpComponents AsyncClient -Copyright 2010-2016 The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). diff --git a/client/rest/licenses/httpclient-4.5.13.jar.sha1 b/client/rest/licenses/httpclient-4.5.13.jar.sha1 deleted file mode 100644 index 3281e21595b39..0000000000000 --- a/client/rest/licenses/httpclient-4.5.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5f6cae5ca7ecaac1ec2827a9e2d65ae2869cada \ No newline at end of file diff --git a/client/rest/licenses/httpclient5-5.1.3.jar.sha1 b/client/rest/licenses/httpclient5-5.1.3.jar.sha1 new file mode 100644 index 0000000000000..b18cf050ac8df --- /dev/null +++ b/client/rest/licenses/httpclient5-5.1.3.jar.sha1 @@ -0,0 +1 @@ +13c984b7b881afcff3a7f0bb95878724a48a4b66 \ No newline at end of file diff --git a/client/rest/licenses/httpclient-LICENSE.txt b/client/rest/licenses/httpclient5-LICENSE.txt similarity index 100% rename from client/rest/licenses/httpclient-LICENSE.txt rename to client/rest/licenses/httpclient5-LICENSE.txt diff --git a/client/rest/licenses/httpclient-NOTICE.txt b/client/rest/licenses/httpclient5-NOTICE.txt similarity index 72% rename from client/rest/licenses/httpclient-NOTICE.txt rename to client/rest/licenses/httpclient5-NOTICE.txt index 91e5c40c4c6d3..afee7c6e6880b 100644 --- a/client/rest/licenses/httpclient-NOTICE.txt +++ b/client/rest/licenses/httpclient5-NOTICE.txt @@ -1,5 +1,5 @@ Apache HttpComponents Client -Copyright 1999-2016 The Apache Software Foundation +Copyright 1999-2022 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). diff --git a/client/rest/licenses/httpcore-4.4.15.jar.sha1 b/client/rest/licenses/httpcore-4.4.15.jar.sha1 deleted file mode 100644 index 42a03b5d7a376..0000000000000 --- a/client/rest/licenses/httpcore-4.4.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/client/rest/licenses/httpcore-LICENSE.txt b/client/rest/licenses/httpcore-LICENSE.txt deleted file mode 100644 index e454a52586f29..0000000000000 --- a/client/rest/licenses/httpcore-LICENSE.txt +++ /dev/null @@ -1,178 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - diff --git a/client/rest/licenses/httpcore-nio-4.4.15.jar.sha1 b/client/rest/licenses/httpcore-nio-4.4.15.jar.sha1 deleted file mode 100644 index 251b35ab6a1a5..0000000000000 --- a/client/rest/licenses/httpcore-nio-4.4.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -85d2b6825d42db909a1474f0ffbd6328429b7a32 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-5.1.4.jar.sha1 b/client/rest/licenses/httpcore5-5.1.4.jar.sha1 new file mode 100644 index 0000000000000..c8981fb933736 --- /dev/null +++ b/client/rest/licenses/httpcore5-5.1.4.jar.sha1 @@ -0,0 +1 @@ +92538a62a4aacf96c9ea8992346a453e83da85fc \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-LICENSE.txt b/client/rest/licenses/httpcore5-LICENSE.txt new file mode 100644 index 0000000000000..32f01eda18fe9 --- /dev/null +++ b/client/rest/licenses/httpcore5-LICENSE.txt @@ -0,0 +1,558 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project includes Public Suffix List copied from + +licensed under the terms of the Mozilla Public License, v. 2.0 + +Full license text: + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/client/rest/licenses/httpcore-nio-NOTICE.txt b/client/rest/licenses/httpcore5-NOTICE.txt similarity index 56% rename from client/rest/licenses/httpcore-nio-NOTICE.txt rename to client/rest/licenses/httpcore5-NOTICE.txt index a2e17bb60009f..afee7c6e6880b 100644 --- a/client/rest/licenses/httpcore-nio-NOTICE.txt +++ b/client/rest/licenses/httpcore5-NOTICE.txt @@ -1,8 +1,6 @@ - -Apache HttpCore NIO -Copyright 2005-2016 The Apache Software Foundation +Apache HttpComponents Client +Copyright 1999-2022 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). - diff --git a/client/rest/licenses/httpcore5-h2-5.1.4.jar.sha1 b/client/rest/licenses/httpcore5-h2-5.1.4.jar.sha1 new file mode 100644 index 0000000000000..2369ee9dfb7e1 --- /dev/null +++ b/client/rest/licenses/httpcore5-h2-5.1.4.jar.sha1 @@ -0,0 +1 @@ +04de79e0bb34d65c86e4d163ae2f45d53746b70d \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-h2-LICENSE.txt b/client/rest/licenses/httpcore5-h2-LICENSE.txt new file mode 100644 index 0000000000000..32f01eda18fe9 --- /dev/null +++ b/client/rest/licenses/httpcore5-h2-LICENSE.txt @@ -0,0 +1,558 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project includes Public Suffix List copied from + +licensed under the terms of the Mozilla Public License, v. 2.0 + +Full license text: + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/client/rest/licenses/httpcore-NOTICE.txt b/client/rest/licenses/httpcore5-h2-NOTICE.txt similarity index 55% rename from client/rest/licenses/httpcore-NOTICE.txt rename to client/rest/licenses/httpcore5-h2-NOTICE.txt index 013448d3e9561..afee7c6e6880b 100644 --- a/client/rest/licenses/httpcore-NOTICE.txt +++ b/client/rest/licenses/httpcore5-h2-NOTICE.txt @@ -1,5 +1,6 @@ -Apache HttpComponents Core -Copyright 2005-2016 The Apache Software Foundation +Apache HttpComponents Client +Copyright 1999-2022 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). + diff --git a/client/rest/licenses/slf4j-api-1.7.36.jar.sha1 b/client/rest/licenses/slf4j-api-1.7.36.jar.sha1 new file mode 100644 index 0000000000000..77b9917528382 --- /dev/null +++ b/client/rest/licenses/slf4j-api-1.7.36.jar.sha1 @@ -0,0 +1 @@ +6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/client/rest/licenses/slf4j-api-LICENSE.txt b/client/rest/licenses/slf4j-api-LICENSE.txt new file mode 100644 index 0000000000000..8fda22f4d72f6 --- /dev/null +++ b/client/rest/licenses/slf4j-api-LICENSE.txt @@ -0,0 +1,21 @@ +Copyright (c) 2004-2014 QOS.ch +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/client/rest/licenses/slf4j-api-NOTICE.txt b/client/rest/licenses/slf4j-api-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/client/rest/src/main/java/org/opensearch/client/Cancellable.java b/client/rest/src/main/java/org/opensearch/client/Cancellable.java index 4bfc0704227aa..56e31a3742f35 100644 --- a/client/rest/src/main/java/org/opensearch/client/Cancellable.java +++ b/client/rest/src/main/java/org/opensearch/client/Cancellable.java @@ -31,24 +31,26 @@ package org.opensearch.client; -import org.apache.http.client.methods.AbstractExecutionAwareRequest; -import org.apache.http.client.methods.HttpRequestBase; +import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; +import org.apache.hc.core5.concurrent.CancellableDependency; import java.util.concurrent.CancellationException; /** * Represents an operation that can be cancelled. * Returned when executing async requests through {@link RestClient#performRequestAsync(Request, ResponseListener)}, so that the request - * can be cancelled if needed. Cancelling a request will result in calling {@link AbstractExecutionAwareRequest#abort()} on the underlying + * can be cancelled if needed. Cancelling a request will result in calling {@link CancellableDependency#cancel()} on the underlying * request object, which will in turn cancel its corresponding {@link java.util.concurrent.Future}. * Note that cancelling a request does not automatically translate to aborting its execution on the server side, which needs to be * specifically implemented in each API. */ -public class Cancellable { +public class Cancellable implements org.apache.hc.core5.concurrent.Cancellable { static final Cancellable NO_OP = new Cancellable(null) { @Override - public void cancel() {} + public boolean cancel() { + throw new UnsupportedOperationException(); + } @Override void runIfNotCancelled(Runnable runnable) { @@ -56,13 +58,13 @@ void runIfNotCancelled(Runnable runnable) { } }; - static Cancellable fromRequest(HttpRequestBase httpRequest) { + static Cancellable fromRequest(CancellableDependency httpRequest) { return new Cancellable(httpRequest); } - private final HttpRequestBase httpRequest; + private final CancellableDependency httpRequest; - private Cancellable(HttpRequestBase httpRequest) { + private Cancellable(CancellableDependency httpRequest) { this.httpRequest = httpRequest; } @@ -70,15 +72,15 @@ private Cancellable(HttpRequestBase httpRequest) { * Cancels the on-going request that is associated with the current instance of {@link Cancellable}. * */ - public synchronized void cancel() { - this.httpRequest.abort(); + public synchronized boolean cancel() { + return this.httpRequest.cancel(); } /** * Executes some arbitrary code iff the on-going request has not been cancelled, otherwise throws {@link CancellationException}. * This is needed to guarantee that cancelling a request works correctly even in case {@link #cancel()} is called between different - * attempts of the same request. The low-level client reuses the same instance of the {@link AbstractExecutionAwareRequest} by calling - * {@link AbstractExecutionAwareRequest#reset()} between subsequent retries. The {@link #cancel()} method can be called at anytime, + * attempts of the same request. The low-level client reuses the same instance of the {@link CancellableDependency} by calling + * {@link HttpUriRequestBase#reset()} between subsequent retries. The {@link #cancel()} method can be called at anytime, * and we need to handle the case where it gets called while there is no request being executed as one attempt may have failed and * the subsequent attempt has not been started yet. * If the request has already been cancelled we don't go ahead with the next attempt, and artificially raise the @@ -87,7 +89,7 @@ public synchronized void cancel() { * when there is no future to cancel, which would make cancelling the request a no-op. */ synchronized void runIfNotCancelled(Runnable runnable) { - if (this.httpRequest.isAborted()) { + if (this.httpRequest.isCancelled()) { throw newCancellationException(); } runnable.run(); diff --git a/client/rest/src/main/java/org/opensearch/client/HasAttributeNodeSelector.java b/client/rest/src/main/java/org/opensearch/client/HasAttributeNodeSelector.java index e6005c207ec93..0a54dbaf30364 100644 --- a/client/rest/src/main/java/org/opensearch/client/HasAttributeNodeSelector.java +++ b/client/rest/src/main/java/org/opensearch/client/HasAttributeNodeSelector.java @@ -57,6 +57,10 @@ public HasAttributeNodeSelector(String key, String value) { this.value = value; } + /** + * Select the {@link Node}s to which to send requests. + * @param nodes the {@link Node}s targeted for the sending requests + */ @Override public void select(Iterable nodes) { Iterator itr = nodes.iterator(); @@ -70,6 +74,10 @@ public void select(Iterable nodes) { } } + /** + * Compare two node selectors for equality + * @param o node selector instance to compare with + */ @Override public boolean equals(Object o) { if (this == o) { @@ -82,11 +90,17 @@ public boolean equals(Object o) { return Objects.equals(key, that.key) && Objects.equals(value, that.value); } + /** + * Calculate the hash code of the node selector + */ @Override public int hashCode() { return Objects.hash(key, value); } + /** + * Convert this node selector to string representation + */ @Override public String toString() { return key + "=" + value; diff --git a/client/rest/src/main/java/org/opensearch/client/HeapBufferedAsyncResponseConsumer.java b/client/rest/src/main/java/org/opensearch/client/HeapBufferedAsyncResponseConsumer.java deleted file mode 100644 index e2993e48a5a05..0000000000000 --- a/client/rest/src/main/java/org/opensearch/client/HeapBufferedAsyncResponseConsumer.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.client; - -import org.apache.http.ContentTooLongException; -import org.apache.http.HttpEntity; -import org.apache.http.HttpException; -import org.apache.http.HttpResponse; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.ContentDecoder; -import org.apache.http.nio.IOControl; -import org.apache.http.nio.entity.ContentBufferEntity; -import org.apache.http.nio.protocol.AbstractAsyncResponseConsumer; -import org.apache.http.nio.util.ByteBufferAllocator; -import org.apache.http.nio.util.HeapByteBufferAllocator; -import org.apache.http.nio.util.SimpleInputBuffer; -import org.apache.http.protocol.HttpContext; - -import java.io.IOException; - -/** - * Default implementation of {@link org.apache.http.nio.protocol.HttpAsyncResponseConsumer}. Buffers the whole - * response content in heap memory, meaning that the size of the buffer is equal to the content-length of the response. - * Limits the size of responses that can be read based on a configurable argument. Throws an exception in case the entity is longer - * than the configured buffer limit. - */ -public class HeapBufferedAsyncResponseConsumer extends AbstractAsyncResponseConsumer { - - private final int bufferLimitBytes; - private volatile HttpResponse response; - private volatile SimpleInputBuffer buf; - - /** - * Creates a new instance of this consumer with the provided buffer limit. - * - * @param bufferLimit the buffer limit. Must be greater than 0. - * @throws IllegalArgumentException if {@code bufferLimit} is less than or equal to 0. - */ - public HeapBufferedAsyncResponseConsumer(int bufferLimit) { - if (bufferLimit <= 0) { - throw new IllegalArgumentException("bufferLimit must be greater than 0"); - } - this.bufferLimitBytes = bufferLimit; - } - - /** - * Get the limit of the buffer. - */ - public int getBufferLimit() { - return bufferLimitBytes; - } - - @Override - protected void onResponseReceived(HttpResponse response) throws HttpException, IOException { - this.response = response; - } - - @Override - protected void onEntityEnclosed(HttpEntity entity, ContentType contentType) throws IOException { - long len = entity.getContentLength(); - if (len > bufferLimitBytes) { - throw new ContentTooLongException( - "entity content is too long [" + len + "] for the configured buffer limit [" + bufferLimitBytes + "]" - ); - } - if (len < 0) { - len = 4096; - } - this.buf = new SimpleInputBuffer((int) len, getByteBufferAllocator()); - this.response.setEntity(new ContentBufferEntity(entity, this.buf)); - } - - /** - * Returns the instance of {@link ByteBufferAllocator} to use for content buffering. - * Allows to plug in any {@link ByteBufferAllocator} implementation. - */ - protected ByteBufferAllocator getByteBufferAllocator() { - return HeapByteBufferAllocator.INSTANCE; - } - - @Override - protected void onContentReceived(ContentDecoder decoder, IOControl ioctrl) throws IOException { - this.buf.consumeContent(decoder); - } - - @Override - protected HttpResponse buildResult(HttpContext context) throws Exception { - return response; - } - - @Override - protected void releaseResources() { - response = null; - } -} diff --git a/client/rest/src/main/java/org/opensearch/client/HttpAsyncResponseConsumerFactory.java b/client/rest/src/main/java/org/opensearch/client/HttpAsyncResponseConsumerFactory.java index 7a56e03a1162c..6420a615484d0 100644 --- a/client/rest/src/main/java/org/opensearch/client/HttpAsyncResponseConsumerFactory.java +++ b/client/rest/src/main/java/org/opensearch/client/HttpAsyncResponseConsumerFactory.java @@ -32,30 +32,31 @@ package org.opensearch.client; -import org.apache.http.HttpResponse; -import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.opensearch.client.nio.HeapBufferedAsyncResponseConsumer; import static org.opensearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory.DEFAULT_BUFFER_LIMIT; /** - * Factory used to create instances of {@link HttpAsyncResponseConsumer}. Each request retry needs its own instance of the + * Factory used to create instances of {@link AsyncResponseConsumer}. Each request retry needs its own instance of the * consumer object. Users can implement this interface and pass their own instance to the specialized * performRequest methods that accept an {@link HttpAsyncResponseConsumerFactory} instance as argument. */ public interface HttpAsyncResponseConsumerFactory { /** - * Creates the default type of {@link HttpAsyncResponseConsumer}, based on heap buffering with a buffer limit of 100MB. + * Creates the default type of {@link AsyncResponseConsumer}, based on heap buffering with a buffer limit of 100MB. */ HttpAsyncResponseConsumerFactory DEFAULT = new HeapBufferedResponseConsumerFactory(DEFAULT_BUFFER_LIMIT); /** - * Creates the {@link HttpAsyncResponseConsumer}, called once per request attempt. + * Creates the {@link AsyncResponseConsumer}, called once per request attempt. */ - HttpAsyncResponseConsumer createHttpAsyncResponseConsumer(); + AsyncResponseConsumer createHttpAsyncResponseConsumer(); /** - * Default factory used to create instances of {@link HttpAsyncResponseConsumer}. + * Default factory used to create instances of {@link AsyncResponseConsumer}. * Creates one instance of {@link HeapBufferedAsyncResponseConsumer} for each request attempt, with a configurable * buffer limit which defaults to 100MB. */ @@ -75,8 +76,11 @@ public HeapBufferedResponseConsumerFactory(int bufferLimitBytes) { this.bufferLimit = bufferLimitBytes; } + /** + * Creates the {@link AsyncResponseConsumer}, called once per request attempt. + */ @Override - public HttpAsyncResponseConsumer createHttpAsyncResponseConsumer() { + public AsyncResponseConsumer createHttpAsyncResponseConsumer() { return new HeapBufferedAsyncResponseConsumer(bufferLimit); } } diff --git a/client/rest/src/main/java/org/opensearch/client/HttpDeleteWithEntity.java b/client/rest/src/main/java/org/opensearch/client/HttpDeleteWithEntity.java deleted file mode 100644 index 52618cd7edc75..0000000000000 --- a/client/rest/src/main/java/org/opensearch/client/HttpDeleteWithEntity.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.client; - -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; - -import java.net.URI; - -/** - * Allows to send DELETE requests providing a body (not supported out of the box) - */ -final class HttpDeleteWithEntity extends HttpEntityEnclosingRequestBase { - - static final String METHOD_NAME = HttpDelete.METHOD_NAME; - - HttpDeleteWithEntity(final URI uri) { - setURI(uri); - } - - @Override - public String getMethod() { - return METHOD_NAME; - } -} diff --git a/client/rest/src/main/java/org/opensearch/client/HttpGetWithEntity.java b/client/rest/src/main/java/org/opensearch/client/HttpGetWithEntity.java deleted file mode 100644 index 8ab639433f6be..0000000000000 --- a/client/rest/src/main/java/org/opensearch/client/HttpGetWithEntity.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.client; - -import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; -import org.apache.http.client.methods.HttpGet; - -import java.net.URI; - -/** - * Allows to send GET requests providing a body (not supported out of the box) - */ -final class HttpGetWithEntity extends HttpEntityEnclosingRequestBase { - - static final String METHOD_NAME = HttpGet.METHOD_NAME; - - HttpGetWithEntity(final URI uri) { - setURI(uri); - } - - @Override - public String getMethod() { - return METHOD_NAME; - } -} diff --git a/client/rest/src/main/java/org/opensearch/client/Node.java b/client/rest/src/main/java/org/opensearch/client/Node.java index c02ac6c68718f..8fe5dcfa00db0 100644 --- a/client/rest/src/main/java/org/opensearch/client/Node.java +++ b/client/rest/src/main/java/org/opensearch/client/Node.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import java.util.List; import java.util.Map; @@ -152,6 +152,9 @@ public Map> getAttributes() { return attributes; } + /** + * Convert node to string representation + */ @Override public String toString() { StringBuilder b = new StringBuilder(); @@ -174,6 +177,10 @@ public String toString() { return b.append(']').toString(); } + /** + * Compare two nodes for equality + * @param obj node instance to compare with + */ @Override public boolean equals(Object obj) { if (obj == null || obj.getClass() != getClass()) { @@ -188,6 +195,9 @@ public boolean equals(Object obj) { && Objects.equals(attributes, other.attributes); } + /** + * Calculate the hash code of the node + */ @Override public int hashCode() { return Objects.hash(host, boundHosts, name, version, roles, attributes); @@ -239,11 +249,25 @@ public boolean isIngest() { return roles.contains("ingest"); } + /** + * Returns whether the node is dedicated to provide search capability. + */ + public boolean isSearch() { + return roles.contains("search"); + } + + /** + * Convert roles to string representation + */ @Override public String toString() { return String.join(",", roles); } + /** + * Compare two roles for equality + * @param obj roles instance to compare with + */ @Override public boolean equals(Object obj) { if (obj == null || obj.getClass() != getClass()) { @@ -253,6 +277,9 @@ public boolean equals(Object obj) { return roles.equals(other.roles); } + /** + * Calculate the hash code of the roles + */ @Override public int hashCode() { return roles.hashCode(); diff --git a/client/rest/src/main/java/org/opensearch/client/PersistentCredentialsAuthenticationStrategy.java b/client/rest/src/main/java/org/opensearch/client/PersistentCredentialsAuthenticationStrategy.java deleted file mode 100644 index 8a35d6eb607ca..0000000000000 --- a/client/rest/src/main/java/org/opensearch/client/PersistentCredentialsAuthenticationStrategy.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - * - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.client; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.http.HttpHost; -import org.apache.http.auth.AuthScheme; -import org.apache.http.impl.client.TargetAuthenticationStrategy; -import org.apache.http.protocol.HttpContext; - -/** - * An {@link org.apache.http.client.AuthenticationStrategy} implementation that does not perform - * any special handling if authentication fails. - * The default handler in Apache HTTP client mimics standard browser behaviour of clearing authentication - * credentials if it receives a 401 response from the server. While this can be useful for browser, it is - * rarely the desired behaviour with the OpenSearch REST API. - * If the code using the REST client has configured credentials for the REST API, then we can and should - * assume that this is intentional, and those credentials represent the best possible authentication - * mechanism to the OpenSearch node. - * If we receive a 401 status, a probably cause is that the authentication mechanism in place was unable - * to perform the requisite password checks (the node has not yet recovered its state, or an external - * authentication provider was unavailable). - * If this occurs, then the desired behaviour is for the Rest client to retry with the same credentials - * (rather than trying with no credentials, or expecting the calling code to provide alternate credentials). - */ -final class PersistentCredentialsAuthenticationStrategy extends TargetAuthenticationStrategy { - - private final Log logger = LogFactory.getLog(PersistentCredentialsAuthenticationStrategy.class); - - @Override - public void authFailed(HttpHost host, AuthScheme authScheme, HttpContext context) { - if (logger.isDebugEnabled()) { - logger.debug( - "Authentication to " - + host - + " failed (scheme: " - + authScheme.getSchemeName() - + "). Preserving credentials for next request" - ); - } - // Do nothing. - // The superclass implementation of method will clear the credentials from the cache, but we don't - } -} diff --git a/client/rest/src/main/java/org/opensearch/client/PreferHasAttributeNodeSelector.java b/client/rest/src/main/java/org/opensearch/client/PreferHasAttributeNodeSelector.java index ddec1da068bf0..7cf7490692650 100644 --- a/client/rest/src/main/java/org/opensearch/client/PreferHasAttributeNodeSelector.java +++ b/client/rest/src/main/java/org/opensearch/client/PreferHasAttributeNodeSelector.java @@ -58,6 +58,10 @@ public PreferHasAttributeNodeSelector(String key, String value) { this.value = value; } + /** + * Select the {@link Node}s to which to send requests. + * @param nodes the {@link Node}s targeted for the sending requests + */ @Override public void select(Iterable nodes) { boolean foundAtLeastOne = false; @@ -99,6 +103,10 @@ public void select(Iterable nodes) { } } + /** + * Compare two node selectors for equality + * @param o node selector instance to compare with + */ @Override public boolean equals(Object o) { if (this == o) { @@ -111,11 +119,17 @@ public boolean equals(Object o) { return Objects.equals(key, that.key) && Objects.equals(value, that.value); } + /** + * Calculate the hash code of the node selector + */ @Override public int hashCode() { return Objects.hash(key, value); } + /** + * Convert this node selector to string representation + */ @Override public String toString() { return key + "=" + value; diff --git a/client/rest/src/main/java/org/opensearch/client/Request.java b/client/rest/src/main/java/org/opensearch/client/Request.java index df81ca7f717ae..441b01b0891ad 100644 --- a/client/rest/src/main/java/org/opensearch/client/Request.java +++ b/client/rest/src/main/java/org/opensearch/client/Request.java @@ -32,9 +32,9 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NStringEntity; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; import java.util.HashMap; import java.util.Map; @@ -133,7 +133,7 @@ public void setEntity(HttpEntity entity) { * @param entity JSON string to be set as the entity body of the request. */ public void setJsonEntity(String entity) { - setEntity(entity == null ? null : new NStringEntity(entity, ContentType.APPLICATION_JSON)); + setEntity(entity == null ? null : new StringEntity(entity, ContentType.APPLICATION_JSON)); } /** @@ -176,6 +176,9 @@ public RequestOptions getOptions() { return options; } + /** + * Convert request to string representation + */ @Override public String toString() { StringBuilder b = new StringBuilder(); @@ -192,6 +195,10 @@ public String toString() { return b.append('}').toString(); } + /** + * Compare two requests for equality + * @param obj request instance to compare with + */ @Override public boolean equals(Object obj) { if (obj == null || (obj.getClass() != getClass())) { @@ -209,6 +216,9 @@ public boolean equals(Object obj) { && options.equals(other.options); } + /** + * Calculate the hash code of the request + */ @Override public int hashCode() { return Objects.hash(method, endpoint, parameters, entity, options); diff --git a/client/rest/src/main/java/org/opensearch/client/RequestLogger.java b/client/rest/src/main/java/org/opensearch/client/RequestLogger.java index 297885fa3131b..0f2e0e6da834d 100644 --- a/client/rest/src/main/java/org/opensearch/client/RequestLogger.java +++ b/client/rest/src/main/java/org/opensearch/client/RequestLogger.java @@ -34,16 +34,16 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.http.HttpEntityEnclosingRequest; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.RequestLine; -import org.apache.http.client.methods.HttpUriRequest; -import org.apache.http.entity.BufferedHttpEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.classic.methods.HttpUriRequest; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.BufferedHttpEntity; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.message.StatusLine; import java.io.BufferedReader; import java.io.IOException; @@ -66,17 +66,10 @@ private RequestLogger() {} /** * Logs a request that yielded a response */ - static void logResponse(Log logger, HttpUriRequest request, HttpHost host, HttpResponse httpResponse) { + static void logResponse(Log logger, HttpUriRequest request, HttpHost host, ClassicHttpResponse httpResponse) { if (logger.isDebugEnabled()) { logger.debug( - "request [" - + request.getMethod() - + " " - + host - + getUri(request.getRequestLine()) - + "] returned [" - + httpResponse.getStatusLine() - + "]" + "request [" + request.getMethod() + " " + host + getUri(request) + "] returned [" + new StatusLine(httpResponse) + "]" ); } if (logger.isWarnEnabled()) { @@ -109,7 +102,7 @@ static void logResponse(Log logger, HttpUriRequest request, HttpHost host, HttpR */ static void logFailedRequest(Log logger, HttpUriRequest request, Node node, Exception e) { if (logger.isDebugEnabled()) { - logger.debug("request [" + request.getMethod() + " " + node.getHost() + getUri(request.getRequestLine()) + "] failed", e); + logger.debug("request [" + request.getMethod() + " " + node.getHost() + getUri(request) + "] failed", e); } if (tracer.isTraceEnabled()) { String traceRequest; @@ -127,7 +120,7 @@ static String buildWarningMessage(HttpUriRequest request, HttpHost host, Header[ StringBuilder message = new StringBuilder("request [").append(request.getMethod()) .append(" ") .append(host) - .append(getUri(request.getRequestLine())) + .append(getUri(request)) .append("] returned ") .append(warnings.length) .append(" warnings: "); @@ -144,17 +137,18 @@ static String buildWarningMessage(HttpUriRequest request, HttpHost host, Header[ * Creates curl output for given request */ static String buildTraceRequest(HttpUriRequest request, HttpHost host) throws IOException { - String requestLine = "curl -iX " + request.getMethod() + " '" + host + getUri(request.getRequestLine()) + "'"; - if (request instanceof HttpEntityEnclosingRequest) { - HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request; - if (enclosingRequest.getEntity() != null) { - requestLine += " -d '"; - HttpEntity entity = enclosingRequest.getEntity(); - if (entity.isRepeatable() == false) { - entity = new BufferedHttpEntity(enclosingRequest.getEntity()); - enclosingRequest.setEntity(entity); - } + String requestLine = "curl -iX " + request.getMethod() + " '" + host + getUri(request) + "'"; + if (request.getEntity() != null) { + requestLine += " -d '"; + HttpEntity entity = request.getEntity(); + if (entity.isRepeatable() == false) { + entity = new BufferedHttpEntity(request.getEntity()); + request.setEntity(entity); + } + try { requestLine += EntityUtils.toString(entity, StandardCharsets.UTF_8) + "'"; + } catch (final ParseException ex) { + throw new IOException(ex); } } return requestLine; @@ -163,10 +157,10 @@ static String buildTraceRequest(HttpUriRequest request, HttpHost host) throws IO /** * Creates curl output for given response */ - static String buildTraceResponse(HttpResponse httpResponse) throws IOException { + static String buildTraceResponse(ClassicHttpResponse httpResponse) throws IOException { StringBuilder responseLine = new StringBuilder(); - responseLine.append("# ").append(httpResponse.getStatusLine()); - for (Header header : httpResponse.getAllHeaders()) { + responseLine.append("# ").append(new StatusLine(httpResponse)); + for (Header header : httpResponse.getHeaders()) { responseLine.append("\n# ").append(header.getName()).append(": ").append(header.getValue()); } responseLine.append("\n#"); @@ -176,7 +170,7 @@ static String buildTraceResponse(HttpResponse httpResponse) throws IOException { entity = new BufferedHttpEntity(entity); } httpResponse.setEntity(entity); - ContentType contentType = ContentType.get(entity); + ContentType contentType = ContentType.parse(entity.getContentType()); Charset charset = StandardCharsets.UTF_8; if (contentType != null && contentType.getCharset() != null) { charset = contentType.getCharset(); @@ -191,10 +185,14 @@ static String buildTraceResponse(HttpResponse httpResponse) throws IOException { return responseLine.toString(); } - private static String getUri(RequestLine requestLine) { - if (requestLine.getUri().charAt(0) != '/') { - return "/" + requestLine.getUri(); + private static String getUri(HttpUriRequest request) { + final String uri = request.getRequestUri(); + if (uri == null) { + return "/"; + } else if (!uri.startsWith("/")) { + return "/" + uri; + } else { + return uri; } - return requestLine.getUri(); } } diff --git a/client/rest/src/main/java/org/opensearch/client/RequestOptions.java b/client/rest/src/main/java/org/opensearch/client/RequestOptions.java index 5390e303ff499..189d785faaf45 100644 --- a/client/rest/src/main/java/org/opensearch/client/RequestOptions.java +++ b/client/rest/src/main/java/org/opensearch/client/RequestOptions.java @@ -32,10 +32,10 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.message.BasicHeader; -import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; import org.opensearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; import java.util.ArrayList; @@ -86,7 +86,7 @@ public List
getHeaders() { /** * The {@link HttpAsyncResponseConsumerFactory} used to create one - * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the + * {@link AsyncResponseConsumer} callback per retry. Controls how the * response body gets streamed from a non-blocking HTTP connection on the * client side. */ @@ -124,6 +124,9 @@ public RequestConfig getRequestConfig() { return requestConfig; } + /** + * Convert request options to string representation + */ @Override public String toString() { StringBuilder b = new StringBuilder(); @@ -152,6 +155,10 @@ public String toString() { return b.append('}').toString(); } + /** + * Compare two request options for equality + * @param obj request options instance to compare with + */ @Override public boolean equals(Object obj) { if (obj == null || (obj.getClass() != getClass())) { @@ -167,6 +174,9 @@ public boolean equals(Object obj) { && Objects.equals(warningsHandler, other.warningsHandler); } + /** + * Calculate the hash code of the request options + */ @Override public int hashCode() { return Objects.hash(headers, httpAsyncResponseConsumerFactory, warningsHandler); @@ -218,11 +228,11 @@ public Builder addHeader(String name, String value) { /** * Set the {@link HttpAsyncResponseConsumerFactory} used to create one - * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the + * {@link AsyncResponseConsumer} callback per retry. Controls how the * response body gets streamed from a non-blocking HTTP connection on the * client side. * - * @param httpAsyncResponseConsumerFactory factory for creating {@link HttpAsyncResponseConsumer}. + * @param httpAsyncResponseConsumerFactory factory for creating {@link AsyncResponseConsumer}. * @throws NullPointerException if {@code httpAsyncResponseConsumerFactory} is null. */ public void setHttpAsyncResponseConsumerFactory(HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory) { diff --git a/client/rest/src/main/java/org/opensearch/client/Response.java b/client/rest/src/main/java/org/opensearch/client/Response.java index d380607b7df9e..c758826b776ba 100644 --- a/client/rest/src/main/java/org/opensearch/client/Response.java +++ b/client/rest/src/main/java/org/opensearch/client/Response.java @@ -32,12 +32,13 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.RequestLine; -import org.apache.http.StatusLine; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpResponse; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.message.StatusLine; import java.util.ArrayList; import java.util.List; @@ -53,9 +54,9 @@ public class Response { private final RequestLine requestLine; private final HttpHost host; - private final HttpResponse response; + private final ClassicHttpResponse response; - Response(RequestLine requestLine, HttpHost host, HttpResponse response) { + Response(RequestLine requestLine, HttpHost host, ClassicHttpResponse response) { Objects.requireNonNull(requestLine, "requestLine cannot be null"); Objects.requireNonNull(host, "host cannot be null"); Objects.requireNonNull(response, "response cannot be null"); @@ -82,14 +83,14 @@ public HttpHost getHost() { * Returns the status line of the current response */ public StatusLine getStatusLine() { - return response.getStatusLine(); + return new StatusLine(response); } /** * Returns all the response headers */ public Header[] getHeaders() { - return response.getAllHeaders(); + return response.getHeaders(); } /** @@ -199,12 +200,15 @@ public boolean hasWarnings() { return warnings != null && warnings.length > 0; } - HttpResponse getHttpResponse() { + ClassicHttpResponse getHttpResponse() { return response; } + /** + * Convert response to string representation + */ @Override public String toString() { - return "Response{" + "requestLine=" + requestLine + ", host=" + host + ", response=" + response.getStatusLine() + '}'; + return "Response{" + "requestLine=" + requestLine + ", host=" + host + ", response=" + getStatusLine() + '}'; } } diff --git a/client/rest/src/main/java/org/opensearch/client/ResponseException.java b/client/rest/src/main/java/org/opensearch/client/ResponseException.java index 8104c32c422e5..ed816c7e1177e 100644 --- a/client/rest/src/main/java/org/opensearch/client/ResponseException.java +++ b/client/rest/src/main/java/org/opensearch/client/ResponseException.java @@ -32,9 +32,10 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.entity.BufferedHttpEntity; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.BufferedHttpEntity; +import org.apache.hc.core5.http.io.entity.EntityUtils; import java.io.IOException; import java.util.Locale; @@ -77,7 +78,11 @@ static String buildMessage(Response response) throws IOException { entity = new BufferedHttpEntity(entity); response.getHttpResponse().setEntity(entity); } - message += "\n" + EntityUtils.toString(entity); + try { + message += "\n" + EntityUtils.toString(entity); + } catch (final ParseException ex) { + throw new IOException(ex); + } } return message; } diff --git a/client/rest/src/main/java/org/opensearch/client/RestClient.java b/client/rest/src/main/java/org/opensearch/client/RestClient.java index 92aed2c8fb179..9d140a145b004 100644 --- a/client/rest/src/main/java/org/opensearch/client/RestClient.java +++ b/client/rest/src/main/java/org/opensearch/client/RestClient.java @@ -33,36 +33,43 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.http.ConnectionClosedException; -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.http.entity.HttpEntityWrapper; -import org.apache.http.HttpHost; -import org.apache.http.HttpRequest; -import org.apache.http.HttpResponse; -import org.apache.http.client.AuthCache; -import org.apache.http.client.ClientProtocolException; -import org.apache.http.client.entity.GzipCompressingEntity; -import org.apache.http.client.entity.GzipDecompressingEntity; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpOptions; -import org.apache.http.client.methods.HttpPatch; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.client.methods.HttpRequestBase; -import org.apache.http.client.methods.HttpTrace; -import org.apache.http.client.protocol.HttpClientContext; -import org.apache.http.client.utils.URIBuilder; -import org.apache.http.concurrent.FutureCallback; -import org.apache.http.conn.ConnectTimeoutException; -import org.apache.http.impl.auth.BasicScheme; -import org.apache.http.impl.client.BasicAuthCache; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; -import org.apache.http.nio.client.methods.HttpAsyncMethods; -import org.apache.http.nio.protocol.HttpAsyncRequestProducer; -import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ConnectionClosedException; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.HttpEntityWrapper; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpRequest; +import org.apache.hc.client5.http.auth.AuthCache; +import org.apache.hc.client5.http.auth.AuthScheme; +import org.apache.hc.client5.http.auth.AuthScope; +import org.apache.hc.client5.http.auth.Credentials; +import org.apache.hc.client5.http.auth.CredentialsProvider; +import org.apache.hc.client5.http.ConnectTimeoutException; +import org.apache.hc.client5.http.ClientProtocolException; +import org.apache.hc.client5.http.entity.GzipDecompressingEntity; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpOptions; +import org.apache.hc.client5.http.classic.methods.HttpPatch; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; +import org.apache.hc.client5.http.classic.methods.HttpTrace; +import org.apache.hc.client5.http.protocol.HttpClientContext; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.nio.AsyncRequestProducer; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.net.URIBuilder; +import org.apache.hc.core5.reactor.IOReactorStatus; +import org.apache.hc.core5.util.Args; +import org.opensearch.client.http.HttpUriRequestProducer; +import org.apache.hc.core5.concurrent.FutureCallback; +import org.apache.hc.client5.http.impl.auth.BasicScheme; +import org.apache.hc.client5.http.impl.auth.BasicAuthCache; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; import javax.net.ssl.SSLHandshakeException; import java.io.ByteArrayInputStream; @@ -70,6 +77,7 @@ import java.io.Closeable; import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.net.ConnectException; import java.net.SocketTimeoutException; import java.net.URI; @@ -92,6 +100,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import java.util.zip.GZIPOutputStream; @@ -218,7 +227,7 @@ public static RestClientBuilder builder(String cloudId) { } String url = decodedParts[1] + "." + domain; - return builder(new HttpHost(url, port, "https")); + return builder(new HttpHost("https", url, port)); } /** @@ -287,7 +296,7 @@ public List getNodes() { * @return client running status */ public boolean isRunning() { - return client.isRunning(); + return client.getStatus() == IOReactorStatus.ACTIVE; } /** @@ -323,7 +332,7 @@ public Response performRequest(Request request) throws IOException { private Response performRequest(final NodeTuple> nodeTuple, final InternalRequest request, Exception previousException) throws IOException { RequestContext context = request.createContextForNextAttempt(nodeTuple.nodes.next(), nodeTuple.authCache); - HttpResponse httpResponse; + ClassicHttpResponse httpResponse; try { httpResponse = client.execute(context.requestProducer, context.asyncResponseConsumer, context.context, null).get(); } catch (Exception e) { @@ -353,18 +362,18 @@ private Response performRequest(final NodeTuple> nodeTuple, final throw responseOrResponseException.responseException; } - private ResponseOrResponseException convertResponse(InternalRequest request, Node node, HttpResponse httpResponse) throws IOException { + private ResponseOrResponseException convertResponse(InternalRequest request, Node node, ClassicHttpResponse httpResponse) + throws IOException { RequestLogger.logResponse(logger, request.httpRequest, node.getHost(), httpResponse); - int statusCode = httpResponse.getStatusLine().getStatusCode(); + int statusCode = httpResponse.getCode(); Optional.ofNullable(httpResponse.getEntity()) .map(HttpEntity::getContentEncoding) - .map(Header::getValue) .filter("gzip"::equalsIgnoreCase) .map(gzipHeaderValue -> new GzipDecompressingEntity(httpResponse.getEntity())) .ifPresent(httpResponse::setEntity); - Response response = new Response(request.httpRequest.getRequestLine(), node.getHost(), httpResponse); + Response response = new Response(new RequestLine(request.httpRequest), node.getHost(), httpResponse); if (isSuccessfulResponse(statusCode) || request.ignoreErrorCodes.contains(response.getStatusLine().getStatusCode())) { onResponse(node); if (request.warningsHandler.warningsShouldFailRequest(response.getWarnings())) { @@ -418,47 +427,56 @@ private void performRequestAsync( ) { request.cancellable.runIfNotCancelled(() -> { final RequestContext context = request.createContextForNextAttempt(nodeTuple.nodes.next(), nodeTuple.authCache); - client.execute(context.requestProducer, context.asyncResponseConsumer, context.context, new FutureCallback() { - @Override - public void completed(HttpResponse httpResponse) { - try { - ResponseOrResponseException responseOrResponseException = convertResponse(request, context.node, httpResponse); - if (responseOrResponseException.responseException == null) { - listener.onSuccess(responseOrResponseException.response); - } else { + Future future = client.execute( + context.requestProducer, + context.asyncResponseConsumer, + context.context, + new FutureCallback() { + @Override + public void completed(ClassicHttpResponse httpResponse) { + try { + ResponseOrResponseException responseOrResponseException = convertResponse(request, context.node, httpResponse); + if (responseOrResponseException.responseException == null) { + listener.onSuccess(responseOrResponseException.response); + } else { + if (nodeTuple.nodes.hasNext()) { + listener.trackFailure(responseOrResponseException.responseException); + performRequestAsync(nodeTuple, request, listener); + } else { + listener.onDefinitiveFailure(responseOrResponseException.responseException); + } + } + } catch (Exception e) { + listener.onDefinitiveFailure(e); + } + } + + @Override + public void failed(Exception failure) { + try { + RequestLogger.logFailedRequest(logger, request.httpRequest, context.node, failure); + onFailure(context.node); if (nodeTuple.nodes.hasNext()) { - listener.trackFailure(responseOrResponseException.responseException); + listener.trackFailure(failure); performRequestAsync(nodeTuple, request, listener); } else { - listener.onDefinitiveFailure(responseOrResponseException.responseException); + listener.onDefinitiveFailure(failure); } + } catch (Exception e) { + listener.onDefinitiveFailure(e); } - } catch (Exception e) { - listener.onDefinitiveFailure(e); } - } - @Override - public void failed(Exception failure) { - try { - RequestLogger.logFailedRequest(logger, request.httpRequest, context.node, failure); - onFailure(context.node); - if (nodeTuple.nodes.hasNext()) { - listener.trackFailure(failure); - performRequestAsync(nodeTuple, request, listener); - } else { - listener.onDefinitiveFailure(failure); - } - } catch (Exception e) { - listener.onDefinitiveFailure(e); + @Override + public void cancelled() { + listener.onDefinitiveFailure(Cancellable.newCancellationException()); } } + ); - @Override - public void cancelled() { - listener.onDefinitiveFailure(Cancellable.newCancellationException()); - } - }); + if (future instanceof org.apache.hc.core5.concurrent.Cancellable) { + request.httpRequest.setDependency((org.apache.hc.core5.concurrent.Cancellable) future); + } }); } @@ -583,6 +601,9 @@ private void onFailure(Node node) { failureListener.onFailure(node); } + /** + * Close the underlying {@link CloseableHttpAsyncClient} instance + */ @Override public void close() throws IOException { client.close(); @@ -608,12 +629,12 @@ private static void addSuppressedException(Exception suppressedException, Except } } - private HttpRequestBase createHttpRequest(String method, URI uri, HttpEntity entity) { + private HttpUriRequestBase createHttpRequest(String method, URI uri, HttpEntity entity) { switch (method.toUpperCase(Locale.ROOT)) { - case HttpDeleteWithEntity.METHOD_NAME: - return addRequestBody(new HttpDeleteWithEntity(uri), entity); - case HttpGetWithEntity.METHOD_NAME: - return addRequestBody(new HttpGetWithEntity(uri), entity); + case HttpDelete.METHOD_NAME: + return addRequestBody(new HttpDelete(uri), entity); + case HttpGet.METHOD_NAME: + return addRequestBody(new HttpGet(uri), entity); case HttpHead.METHOD_NAME: return addRequestBody(new HttpHead(uri), entity); case HttpOptions.METHOD_NAME: @@ -633,22 +654,18 @@ private HttpRequestBase createHttpRequest(String method, URI uri, HttpEntity ent } } - private HttpRequestBase addRequestBody(HttpRequestBase httpRequest, HttpEntity entity) { + private HttpUriRequestBase addRequestBody(HttpUriRequestBase httpRequest, HttpEntity entity) { if (entity != null) { - if (httpRequest instanceof HttpEntityEnclosingRequestBase) { - if (compressionEnabled) { - if (chunkedEnabled.isPresent()) { - entity = new ContentCompressingEntity(entity, chunkedEnabled.get()); - } else { - entity = new ContentCompressingEntity(entity); - } - } else if (chunkedEnabled.isPresent()) { - entity = new ContentHttpEntity(entity, chunkedEnabled.get()); + if (compressionEnabled) { + if (chunkedEnabled.isPresent()) { + entity = new ContentCompressingEntity(entity, chunkedEnabled.get()); + } else { + entity = new ContentCompressingEntity(entity); } - ((HttpEntityEnclosingRequestBase) httpRequest).setEntity(entity); - } else { - throw new UnsupportedOperationException(httpRequest.getMethod() + " with body is not supported"); + } else if (chunkedEnabled.isPresent()) { + entity = new ContentHttpEntity(entity, chunkedEnabled.get()); } + httpRequest.setEntity(entity); } return httpRequest; } @@ -673,7 +690,12 @@ static URI buildUri(String pathPrefix, String path, Map params) for (Map.Entry param : params.entrySet()) { uriBuilder.addParameter(param.getKey(), param.getValue()); } - return uriBuilder.build(); + + // The Apache HttpClient 5.x **does not** encode URIs but Apache HttpClient 4.x does. It leads + // to the issues with Unicode characters (f.e. document IDs could contain Unicode characters) and + // weird characters are being passed instead. By using `toASCIIString()`, the URI is already created + // with proper encoding. + return new URI(uriBuilder.build().toASCIIString()); } catch (URISyntaxException e) { throw new IllegalArgumentException(e.getMessage(), e); } @@ -802,7 +824,7 @@ public void remove() { private class InternalRequest { private final Request request; private final Set ignoreErrorCodes; - private final HttpRequestBase httpRequest; + private final HttpUriRequestBase httpRequest; private final Cancellable cancellable; private final WarningsHandler warningsHandler; @@ -839,7 +861,7 @@ private void setHeaders(HttpRequest httpRequest, Collection
requestHeade } } - private void setRequestConfig(HttpRequestBase httpRequest, RequestConfig requestConfig) { + private void setRequestConfig(HttpUriRequestBase httpRequest, RequestConfig requestConfig) { if (requestConfig != null) { httpRequest.setConfig(requestConfig); } @@ -851,21 +873,81 @@ RequestContext createContextForNextAttempt(Node node, AuthCache authCache) { } } + /** + * The Apache HttpClient 5 adds "Authorization" header even if the credentials for basic authentication are not provided + * (effectively, username and password are 'null'). To workaround that, wrapping the AuthCache around current HttpClientContext + * and ensuring that the credentials are indeed provided for particular HttpHost, otherwise returning no authentication scheme + * even if it is present in the cache. + */ + private static class WrappingAuthCache implements AuthCache { + private final HttpClientContext context; + private final AuthCache delegate; + private final boolean usePersistentCredentials = true; + + public WrappingAuthCache(HttpClientContext context, AuthCache delegate) { + this.context = context; + this.delegate = delegate; + } + + @Override + public void put(HttpHost host, AuthScheme authScheme) { + delegate.put(host, authScheme); + } + + @Override + public AuthScheme get(HttpHost host) { + AuthScheme authScheme = delegate.get(host); + + if (authScheme != null) { + final CredentialsProvider credsProvider = context.getCredentialsProvider(); + if (credsProvider != null) { + final String schemeName = authScheme.getName(); + final AuthScope authScope = new AuthScope(host, null, schemeName); + final Credentials creds = credsProvider.getCredentials(authScope, context); + + // See please https://issues.apache.org/jira/browse/HTTPCLIENT-2203 + if (authScheme instanceof BasicScheme) { + ((BasicScheme) authScheme).initPreemptive(creds); + } + + if (creds == null) { + return null; + } + } + } + + return authScheme; + } + + @Override + public void remove(HttpHost host) { + if (!usePersistentCredentials) { + delegate.remove(host); + } + } + + @Override + public void clear() { + delegate.clear(); + } + + } + private static class RequestContext { private final Node node; - private final HttpAsyncRequestProducer requestProducer; - private final HttpAsyncResponseConsumer asyncResponseConsumer; + private final AsyncRequestProducer requestProducer; + private final AsyncResponseConsumer asyncResponseConsumer; private final HttpClientContext context; RequestContext(InternalRequest request, Node node, AuthCache authCache) { this.node = node; // we stream the request body if the entity allows for it - this.requestProducer = HttpAsyncMethods.create(node.getHost(), request.httpRequest); + this.requestProducer = HttpUriRequestProducer.create(request.httpRequest, node.getHost()); this.asyncResponseConsumer = request.request.getOptions() .getHttpAsyncResponseConsumerFactory() .createHttpAsyncResponseConsumer(); this.context = HttpClientContext.create(); - context.setAuthCache(authCache); + context.setAuthCache(new WrappingAuthCache(context, authCache)); } } @@ -966,7 +1048,9 @@ private static Exception extractAndWrapCause(Exception exception) { /** * A gzip compressing entity that also implements {@code getContent()}. */ - public static class ContentCompressingEntity extends GzipCompressingEntity { + public static class ContentCompressingEntity extends HttpEntityWrapper { + private static final String GZIP_CODEC = "gzip"; + private Optional chunkedEnabled; /** @@ -979,6 +1063,14 @@ public ContentCompressingEntity(HttpEntity entity) { this.chunkedEnabled = Optional.empty(); } + /** + * Returns content encoding of the entity, if known. + */ + @Override + public String getContentEncoding() { + return GZIP_CODEC; + } + /** * Creates a {@link ContentCompressingEntity} instance with the provided HTTP entity. * @@ -990,11 +1082,14 @@ public ContentCompressingEntity(HttpEntity entity, boolean chunkedEnabled) { this.chunkedEnabled = Optional.of(chunkedEnabled); } + /** + * Returns a content stream of the entity. + */ @Override public InputStream getContent() throws IOException { ByteArrayInputOutputStream out = new ByteArrayInputOutputStream(1024); try (GZIPOutputStream gzipOut = new GZIPOutputStream(out)) { - wrappedEntity.writeTo(gzipOut); + super.writeTo(gzipOut); } return out.asInput(); } @@ -1030,9 +1125,24 @@ public long getContentLength() { return size; } } else { - return super.getContentLength(); + return -1; } } + + /** + * Writes the entity content out to the output stream. + * @param outStream the output stream to write entity content to + * @throws IOException if an I/O error occurs + */ + @Override + public void writeTo(final OutputStream outStream) throws IOException { + Args.notNull(outStream, "Output stream"); + final GZIPOutputStream gzip = new GZIPOutputStream(outStream); + super.writeTo(gzip); + // Only close output stream if the wrapped entity has been + // successfully written out + gzip.close(); + } } /** diff --git a/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java b/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java index 8841d371754c3..a01cf2f403099 100644 --- a/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java +++ b/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java @@ -32,15 +32,26 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClientBuilder; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; -import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; -import org.apache.http.nio.conn.SchemeIOSessionStrategy; +import org.apache.hc.core5.function.Factory; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.nio.ssl.TlsStrategy; +import org.apache.hc.core5.reactor.ssl.TlsDetails; +import org.apache.hc.core5.util.Timeout; +import org.apache.hc.client5.http.async.HttpAsyncClient; +import org.apache.hc.client5.http.auth.CredentialsProvider; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.client5.http.impl.classic.CloseableHttpClient; +import org.apache.hc.client5.http.impl.classic.HttpClientBuilder; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManager; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder; +import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder; +import org.apache.hc.client5.http.impl.DefaultAuthenticationStrategy; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; + import java.security.AccessController; import java.security.NoSuchAlgorithmException; import java.security.PrivilegedAction; @@ -50,19 +61,19 @@ /** * Helps creating a new {@link RestClient}. Allows to set the most common http client configuration options when internally - * creating the underlying {@link org.apache.http.nio.client.HttpAsyncClient}. Also allows to provide an externally created - * {@link org.apache.http.nio.client.HttpAsyncClient} in case additional customization is needed. + * creating the underlying {@link HttpAsyncClient}. Also allows to provide an externally created + * {@link HttpAsyncClient} in case additional customization is needed. */ public final class RestClientBuilder { /** - * The default connection timout in milliseconds. + * The default connection timeout in milliseconds. */ public static final int DEFAULT_CONNECT_TIMEOUT_MILLIS = 1000; /** - * The default socket timeout in milliseconds. + * The default response timeout in milliseconds. */ - public static final int DEFAULT_SOCKET_TIMEOUT_MILLIS = 30000; + public static final int DEFAULT_RESPONSE_TIMEOUT_MILLIS = 30000; /** * The default maximum of connections per route. @@ -296,20 +307,35 @@ public RestClient build() { private CloseableHttpAsyncClient createHttpClient() { // default timeouts are all infinite RequestConfig.Builder requestConfigBuilder = RequestConfig.custom() - .setConnectTimeout(DEFAULT_CONNECT_TIMEOUT_MILLIS) - .setSocketTimeout(DEFAULT_SOCKET_TIMEOUT_MILLIS); + .setConnectTimeout(Timeout.ofMilliseconds(DEFAULT_CONNECT_TIMEOUT_MILLIS)) + .setResponseTimeout(Timeout.ofMilliseconds(DEFAULT_RESPONSE_TIMEOUT_MILLIS)); if (requestConfigCallback != null) { requestConfigBuilder = requestConfigCallback.customizeRequestConfig(requestConfigBuilder); } try { - HttpAsyncClientBuilder httpClientBuilder = HttpAsyncClientBuilder.create() - .setDefaultRequestConfig(requestConfigBuilder.build()) - // default settings for connection pooling may be too constraining + final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create() + .setSslContext(SSLContext.getDefault()) + // See https://issues.apache.org/jira/browse/HTTPCLIENT-2219 + .setTlsDetailsFactory(new Factory() { + @Override + public TlsDetails create(final SSLEngine sslEngine) { + return new TlsDetails(sslEngine.getSession(), sslEngine.getApplicationProtocol()); + } + }) + .build(); + + final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() .setMaxConnPerRoute(DEFAULT_MAX_CONN_PER_ROUTE) .setMaxConnTotal(DEFAULT_MAX_CONN_TOTAL) - .setSSLContext(SSLContext.getDefault()) - .setTargetAuthenticationStrategy(new PersistentCredentialsAuthenticationStrategy()); + .setTlsStrategy(tlsStrategy) + .build(); + + HttpAsyncClientBuilder httpClientBuilder = HttpAsyncClientBuilder.create() + .setDefaultRequestConfig(requestConfigBuilder.build()) + .setConnectionManager(connectionManager) + .setTargetAuthenticationStrategy(DefaultAuthenticationStrategy.INSTANCE) + .disableAutomaticRetries(); if (httpClientConfigCallback != null) { httpClientBuilder = httpClientConfigCallback.customizeHttpClient(httpClientBuilder); } @@ -344,9 +370,9 @@ public interface RequestConfigCallback { public interface HttpClientConfigCallback { /** * Allows to customize the {@link CloseableHttpAsyncClient} being created and used by the {@link RestClient}. - * Commonly used to customize the default {@link org.apache.http.client.CredentialsProvider} for authentication - * or the {@link SchemeIOSessionStrategy} for communication through ssl without losing any other useful default - * value that the {@link RestClientBuilder} internally sets, like connection pooling. + * Commonly used to customize the default {@link CredentialsProvider} for authentication for communication + * through TLS/SSL without losing any other useful default value that the {@link RestClientBuilder} internally + * sets, like connection pooling. * * @param httpClientBuilder the {@link HttpClientBuilder} for customizing the client instance. */ diff --git a/client/rest/src/main/java/org/opensearch/client/http/HttpUriRequestProducer.java b/client/rest/src/main/java/org/opensearch/client/http/HttpUriRequestProducer.java new file mode 100644 index 0000000000000..a65427cd0b032 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/http/HttpUriRequestProducer.java @@ -0,0 +1,63 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client.http; + +import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.nio.AsyncEntityProducer; +import org.apache.hc.core5.http.nio.support.BasicRequestProducer; +import org.apache.hc.core5.net.URIAuthority; +import org.apache.hc.core5.util.Args; +import org.opensearch.client.nio.HttpEntityAsyncEntityProducer; + +/** + * The producer of the {@link HttpUriRequestBase} instances associated with a particular {@link HttpHost} + */ +public class HttpUriRequestProducer extends BasicRequestProducer { + private final HttpUriRequestBase request; + + HttpUriRequestProducer(final HttpUriRequestBase request, final AsyncEntityProducer entityProducer) { + super(request, entityProducer); + this.request = request; + } + + /** + * Get the produced {@link HttpUriRequestBase} instance + * @return produced {@link HttpUriRequestBase} instance + */ + public HttpUriRequestBase getRequest() { + return request; + } + + /** + * Create new request producer for {@link HttpUriRequestBase} instance and {@link HttpHost} + * @param request {@link HttpUriRequestBase} instance + * @param host {@link HttpHost} instance + * @return new request producer + */ + public static HttpUriRequestProducer create(final HttpUriRequestBase request, final HttpHost host) { + Args.notNull(request, "Request"); + Args.notNull(host, "HttpHost"); + + // TODO: Should we copy request here instead of modifying in place? + request.setAuthority(new URIAuthority(host)); + request.setScheme(host.getSchemeName()); + + final HttpEntity entity = request.getEntity(); + AsyncEntityProducer entityProducer = null; + + if (entity != null) { + entityProducer = new HttpEntityAsyncEntityProducer(entity); + } + + return new HttpUriRequestProducer(request, entityProducer); + } + +} diff --git a/client/rest/src/main/java/org/opensearch/client/http/package-info.java b/client/rest/src/main/java/org/opensearch/client/http/package-info.java new file mode 100644 index 0000000000000..32e0aa2016d53 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/http/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * HTTP support classes for REST client. + */ +package org.opensearch.client.http; diff --git a/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncEntityConsumer.java b/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncEntityConsumer.java new file mode 100644 index 0000000000000..9bd17d1c24c7e --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncEntityConsumer.java @@ -0,0 +1,139 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.client.nio; + +import org.apache.hc.core5.http.ContentTooLongException; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpException; +import org.apache.hc.core5.http.nio.AsyncEntityConsumer; +import org.apache.hc.core5.http.nio.entity.AbstractBinAsyncEntityConsumer; +import org.apache.hc.core5.util.ByteArrayBuffer; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.concurrent.atomic.AtomicReference; + +/** + * Default implementation of {@link AsyncEntityConsumer}. Buffers the whole + * response content in heap memory, meaning that the size of the buffer is equal to the content-length of the response. + * Limits the size of responses that can be read based on a configurable argument. Throws an exception in case the entity is longer + * than the configured buffer limit. + */ +public class HeapBufferedAsyncEntityConsumer extends AbstractBinAsyncEntityConsumer { + + private final int bufferLimitBytes; + private AtomicReference bufferRef = new AtomicReference<>(); + + /** + * Creates a new instance of this consumer with the provided buffer limit. + * + * @param bufferLimit the buffer limit. Must be greater than 0. + * @throws IllegalArgumentException if {@code bufferLimit} is less than or equal to 0. + */ + public HeapBufferedAsyncEntityConsumer(int bufferLimit) { + if (bufferLimit <= 0) { + throw new IllegalArgumentException("bufferLimit must be greater than 0"); + } + this.bufferLimitBytes = bufferLimit; + } + + /** + * Get the limit of the buffer. + */ + public int getBufferLimit() { + return bufferLimitBytes; + } + + /** + * Triggered to signal beginning of entity content stream. + * + * @param contentType the entity content type + */ + @Override + protected void streamStart(final ContentType contentType) throws HttpException, IOException {} + + /** + * Triggered to obtain the capacity increment. + * + * @return the number of bytes this consumer is prepared to process. + */ + @Override + protected int capacityIncrement() { + return Integer.MAX_VALUE; + } + + /** + * Triggered to pass incoming data packet to the data consumer. + * + * @param src the data packet. + * @param endOfStream flag indicating whether this data packet is the last in the data stream. + * + */ + @Override + protected void data(final ByteBuffer src, final boolean endOfStream) throws IOException { + if (src == null) { + return; + } + + ByteArrayBuffer buffer = bufferRef.get(); + if (buffer == null) { + buffer = new ByteArrayBuffer(bufferLimitBytes); + if (bufferRef.compareAndSet(null, buffer) == false) { + buffer = bufferRef.get(); + } + } + + int len = src.limit(); + if (buffer.length() + len > bufferLimitBytes) { + throw new ContentTooLongException( + "entity content is too long [" + len + "] for the configured buffer limit [" + bufferLimitBytes + "]" + ); + } + + if (len < 0) { + len = 4096; + } + + if (src.hasArray()) { + buffer.append(src.array(), src.arrayOffset() + src.position(), src.remaining()); + } else { + while (src.hasRemaining()) { + buffer.append(src.get()); + } + } + } + + /** + * Triggered to generate entity representation. + * + * @return the entity content + */ + @Override + protected byte[] generateContent() throws IOException { + final ByteArrayBuffer buffer = bufferRef.get(); + return buffer == null ? new byte[0] : buffer.toByteArray(); + } + + /** + * Release resources being held + */ + @Override + public void releaseResources() { + ByteArrayBuffer buffer = bufferRef.getAndSet(null); + if (buffer != null) { + buffer.clear(); + buffer = null; + } + } +} diff --git a/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncResponseConsumer.java b/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncResponseConsumer.java new file mode 100644 index 0000000000000..3d93478f49f99 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncResponseConsumer.java @@ -0,0 +1,123 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.client.nio; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpException; +import org.apache.hc.core5.http.HttpHeaders; +import org.apache.hc.core5.http.HttpResponse; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.http.nio.support.AbstractAsyncResponseConsumer; +import org.apache.hc.core5.http.protocol.HttpContext; + +import java.io.IOException; + +/** + * Default implementation of {@link AsyncResponseConsumer}. Buffers the whole + * response content in heap memory, meaning that the size of the buffer is equal to the content-length of the response. + * Limits the size of responses that can be read based on a configurable argument. Throws an exception in case the entity is longer + * than the configured buffer limit. + */ +public class HeapBufferedAsyncResponseConsumer extends AbstractAsyncResponseConsumer { + private static final Log LOGGER = LogFactory.getLog(HeapBufferedAsyncResponseConsumer.class); + private final int bufferLimit; + + /** + * Creates a new instance of this consumer with the provided buffer limit. + * + * @param bufferLimit the buffer limit. Must be greater than 0. + * @throws IllegalArgumentException if {@code bufferLimit} is less than or equal to 0. + */ + public HeapBufferedAsyncResponseConsumer(int bufferLimit) { + super(new HeapBufferedAsyncEntityConsumer(bufferLimit)); + this.bufferLimit = bufferLimit; + } + + /** + * Get the limit of the buffer. + */ + public int getBufferLimit() { + return bufferLimit; + } + + /** + * Triggered to signal receipt of an intermediate (1xx) HTTP response. + * + * @param response the intermediate (1xx) HTTP response. + * @param context the actual execution context. + */ + @Override + public void informationResponse(final HttpResponse response, final HttpContext context) throws HttpException, IOException {} + + /** + * Triggered to generate object that represents a result of response message processing. + * @param response the response message. + * @param entity the response entity. + * @param contentType the response content type. + * @return the result of response processing. + */ + @Override + protected ClassicHttpResponse buildResult(final HttpResponse response, final byte[] entity, final ContentType contentType) { + final ClassicHttpResponse classicResponse = new BasicClassicHttpResponse(response.getCode()); + classicResponse.setVersion(response.getVersion()); + classicResponse.setHeaders(response.getHeaders()); + classicResponse.setReasonPhrase(response.getReasonPhrase()); + if (response.getLocale() != null) { + classicResponse.setLocale(response.getLocale()); + } + + if (entity != null) { + String encoding = null; + + try { + final Header contentEncoding = response.getHeader(HttpHeaders.CONTENT_ENCODING); + if (contentEncoding != null) { + encoding = contentEncoding.getValue(); + } + } catch (final HttpException ex) { + LOGGER.debug("Unable to detect content encoding", ex); + } + + final ByteArrayEntity httpEntity = new ByteArrayEntity(entity, contentType, encoding); + classicResponse.setEntity(httpEntity); + } + + return classicResponse; + } +} diff --git a/client/rest/src/main/java/org/opensearch/client/nio/HttpEntityAsyncEntityProducer.java b/client/rest/src/main/java/org/opensearch/client/nio/HttpEntityAsyncEntityProducer.java new file mode 100644 index 0000000000000..81fe77ddcfbed --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/nio/HttpEntityAsyncEntityProducer.java @@ -0,0 +1,182 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client.nio; + +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.nio.AsyncEntityProducer; +import org.apache.hc.core5.http.nio.DataStreamChannel; +import org.apache.hc.core5.http.nio.ResourceHolder; +import org.apache.hc.core5.util.Args; +import org.apache.hc.core5.util.Asserts; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.ReadableByteChannel; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; + +/** + * The {@link AsyncEntityProducer} implementation for {@link HttpEntity} + */ +public class HttpEntityAsyncEntityProducer implements AsyncEntityProducer { + + private final HttpEntity entity; + private final ByteBuffer byteBuffer; + private final boolean chunked; + private final AtomicReference exception; + private final AtomicReference channelRef; + private boolean eof; + + /** + * Create new async HTTP entity producer + * @param entity HTTP entity + * @param bufferSize buffer size + */ + public HttpEntityAsyncEntityProducer(final HttpEntity entity, final int bufferSize) { + this.entity = Args.notNull(entity, "Http Entity"); + this.byteBuffer = ByteBuffer.allocate(bufferSize); + this.chunked = entity.isChunked(); + this.exception = new AtomicReference<>(); + this.channelRef = new AtomicReference<>(); + } + + /** + * Create new async HTTP entity producer with default buffer size (8192 bytes) + * @param entity HTTP entity + */ + public HttpEntityAsyncEntityProducer(final HttpEntity entity) { + this(entity, 8192); + } + + /** + * Determines whether the producer can consistently produce the same content + * after invocation of {@link ResourceHolder#releaseResources()}. + */ + @Override + public boolean isRepeatable() { + return entity.isRepeatable(); + } + + /** + * Returns content type of the entity, if known. + */ + @Override + public String getContentType() { + return entity.getContentType(); + } + + /** + * Returns length of the entity, if known. + */ + @Override + public long getContentLength() { + return entity.getContentLength(); + } + + /** + * Returns the number of bytes immediately available for output. + * This method can be used as a hint to control output events + * of the underlying I/O session. + * + * @return the number of bytes immediately available for output + */ + @Override + public int available() { + return Integer.MAX_VALUE; + } + + /** + * Returns content encoding of the entity, if known. + */ + @Override + public String getContentEncoding() { + return entity.getContentEncoding(); + } + + /** + * Returns chunked transfer hint for this entity. + *

+ * The behavior of wrapping entities is implementation dependent, + * but should respect the primary purpose. + *

+ */ + @Override + public boolean isChunked() { + return chunked; + } + + /** + * Preliminary declaration of trailing headers. + */ + @Override + public Set getTrailerNames() { + return entity.getTrailerNames(); + } + + /** + * Triggered to signal the ability of the underlying data channel + * to accept more data. The data producer can choose to write data + * immediately inside the call or asynchronously at some later point. + * + * @param channel the data channel capable to accepting more data. + */ + @Override + public void produce(final DataStreamChannel channel) throws IOException { + ReadableByteChannel stream = channelRef.get(); + if (stream == null) { + stream = Channels.newChannel(entity.getContent()); + Asserts.check(channelRef.getAndSet(stream) == null, "Illegal producer state"); + } + if (!eof) { + final int bytesRead = stream.read(byteBuffer); + if (bytesRead < 0) { + eof = true; + } + } + if (byteBuffer.position() > 0) { + byteBuffer.flip(); + channel.write(byteBuffer); + byteBuffer.compact(); + } + if (eof && byteBuffer.position() == 0) { + channel.endStream(); + releaseResources(); + } + } + + /** + * Triggered to signal a failure in data generation. + * + * @param cause the cause of the failure. + */ + @Override + public void failed(final Exception cause) { + if (exception.compareAndSet(null, cause)) { + releaseResources(); + } + } + + /** + * Release resources being held + */ + @Override + public void releaseResources() { + eof = false; + final ReadableByteChannel stream = channelRef.getAndSet(null); + if (stream != null) { + try { + stream.close(); + } catch (final IOException ex) { + /* Close quietly */ + } + } + } + +} diff --git a/client/rest/src/main/java/org/opensearch/client/nio/package-info.java b/client/rest/src/main/java/org/opensearch/client/nio/package-info.java new file mode 100644 index 0000000000000..ce4961ed21f7c --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/nio/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * NIO support classes for REST client. + */ +package org.opensearch.client.nio; diff --git a/client/rest/src/test/java/org/opensearch/client/FailureTrackingResponseListenerTests.java b/client/rest/src/test/java/org/opensearch/client/FailureTrackingResponseListenerTests.java index 0a997a586acc9..9722ec867a376 100644 --- a/client/rest/src/test/java/org/opensearch/client/FailureTrackingResponseListenerTests.java +++ b/client/rest/src/test/java/org/opensearch/client/FailureTrackingResponseListenerTests.java @@ -32,14 +32,11 @@ package org.opensearch.client; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.ProtocolVersion; -import org.apache.http.RequestLine; -import org.apache.http.StatusLine; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicRequestLine; -import org.apache.http.message.BasicStatusLine; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ProtocolVersion; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; import java.util.concurrent.atomic.AtomicReference; @@ -116,9 +113,8 @@ public void onFailure(Exception exception) { private static Response mockResponse() { ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1); - RequestLine requestLine = new BasicRequestLine("GET", "/", protocolVersion); - StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK"); - HttpResponse httpResponse = new BasicHttpResponse(statusLine); + RequestLine requestLine = new RequestLine("GET", "/", protocolVersion); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(200, "OK"); return new Response(requestLine, new HttpHost("localhost", 9200), httpResponse); } } diff --git a/client/rest/src/test/java/org/opensearch/client/HasAttributeNodeSelectorTests.java b/client/rest/src/test/java/org/opensearch/client/HasAttributeNodeSelectorTests.java index fd18bba6ee548..b5aca86e95d6c 100644 --- a/client/rest/src/test/java/org/opensearch/client/HasAttributeNodeSelectorTests.java +++ b/client/rest/src/test/java/org/opensearch/client/HasAttributeNodeSelectorTests.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node.Roles; import java.util.ArrayList; diff --git a/client/rest/src/test/java/org/opensearch/client/HeapBufferedAsyncResponseConsumerTests.java b/client/rest/src/test/java/org/opensearch/client/HeapBufferedAsyncResponseConsumerTests.java index 22852fe4cb793..ed329d973eb78 100644 --- a/client/rest/src/test/java/org/opensearch/client/HeapBufferedAsyncResponseConsumerTests.java +++ b/client/rest/src/test/java/org/opensearch/client/HeapBufferedAsyncResponseConsumerTests.java @@ -32,34 +32,31 @@ package org.opensearch.client; -import org.apache.http.ContentTooLongException; -import org.apache.http.HttpEntity; -import org.apache.http.HttpResponse; -import org.apache.http.ProtocolVersion; -import org.apache.http.StatusLine; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.nio.ContentDecoder; -import org.apache.http.nio.IOControl; -import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; -import org.apache.http.protocol.HttpContext; - +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentTooLongException; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.EntityDetails; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.impl.BasicEntityDetails; +import org.apache.hc.core5.http.io.entity.AbstractHttpEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.http.protocol.HttpContext; +import org.opensearch.client.nio.HeapBufferedAsyncResponseConsumer; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Modifier; +import java.nio.charset.StandardCharsets; import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.CoreMatchers.instanceOf; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertSame; import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; public class HeapBufferedAsyncResponseConsumerTests extends RestClientTestCase { @@ -67,33 +64,6 @@ public class HeapBufferedAsyncResponseConsumerTests extends RestClientTestCase { private static final int MAX_TEST_BUFFER_SIZE = 50 * 1024 * 1024; private static final int TEST_BUFFER_LIMIT = 10 * 1024 * 1024; - public void testResponseProcessing() throws Exception { - ContentDecoder contentDecoder = mock(ContentDecoder.class); - IOControl ioControl = mock(IOControl.class); - HttpContext httpContext = mock(HttpContext.class); - - HeapBufferedAsyncResponseConsumer consumer = spy(new HeapBufferedAsyncResponseConsumer(TEST_BUFFER_LIMIT)); - - ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1); - StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK"); - HttpResponse httpResponse = new BasicHttpResponse(statusLine); - httpResponse.setEntity(new StringEntity("test", ContentType.TEXT_PLAIN)); - - // everything goes well - consumer.responseReceived(httpResponse); - consumer.consumeContent(contentDecoder, ioControl); - consumer.responseCompleted(httpContext); - - verify(consumer).releaseResources(); - verify(consumer).buildResult(httpContext); - assertTrue(consumer.isDone()); - assertSame(httpResponse, consumer.getResult()); - - consumer.responseCompleted(httpContext); - verify(consumer, times(1)).releaseResources(); - verify(consumer, times(1)).buildResult(httpContext); - } - public void testDefaultBufferLimit() throws Exception { HeapBufferedAsyncResponseConsumer consumer = new HeapBufferedAsyncResponseConsumer(TEST_BUFFER_LIMIT); bufferLimitTest(consumer, TEST_BUFFER_LIMIT); @@ -127,7 +97,7 @@ public void testCanConfigureHeapBufferLimitFromOutsidePackage() throws ClassNotF assertThat(object, instanceOf(HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory.class)); HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory consumerFactory = (HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory) object; - HttpAsyncResponseConsumer consumer = consumerFactory.createHttpAsyncResponseConsumer(); + AsyncResponseConsumer consumer = consumerFactory.createHttpAsyncResponseConsumer(); assertThat(consumer, instanceOf(HeapBufferedAsyncResponseConsumer.class)); HeapBufferedAsyncResponseConsumer bufferedAsyncResponseConsumer = (HeapBufferedAsyncResponseConsumer) consumer; assertEquals(bufferLimit, bufferedAsyncResponseConsumer.getBufferLimit()); @@ -138,23 +108,40 @@ public void testHttpAsyncResponseConsumerFactoryVisibility() throws ClassNotFoun } private static void bufferLimitTest(HeapBufferedAsyncResponseConsumer consumer, int bufferLimit) throws Exception { - ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1); - StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK"); - consumer.onResponseReceived(new BasicHttpResponse(statusLine)); + HttpContext httpContext = mock(HttpContext.class); + + BasicClassicHttpResponse response = new BasicClassicHttpResponse(200, "OK"); + consumer.consumeResponse(response, null, httpContext, null); final AtomicReference contentLength = new AtomicReference<>(); - HttpEntity entity = new StringEntity("", ContentType.APPLICATION_JSON) { + HttpEntity entity = new AbstractHttpEntity(ContentType.APPLICATION_JSON, null, false) { @Override public long getContentLength() { return contentLength.get(); } + + @Override + public InputStream getContent() throws IOException, UnsupportedOperationException { + return new ByteArrayInputStream("".getBytes(StandardCharsets.UTF_8)); + } + + @Override + public boolean isStreaming() { + return false; + } + + @Override + public void close() throws IOException {} }; contentLength.set(randomLongBetween(0L, bufferLimit)); - consumer.onEntityEnclosed(entity, ContentType.APPLICATION_JSON); + response.setEntity(entity); + + final EntityDetails details = new BasicEntityDetails(4096, ContentType.APPLICATION_JSON); + consumer.consumeResponse(response, details, httpContext, null); contentLength.set(randomLongBetween(bufferLimit + 1, MAX_TEST_BUFFER_SIZE)); try { - consumer.onEntityEnclosed(entity, ContentType.APPLICATION_JSON); + consumer.consumeResponse(response, details, httpContext, null); } catch (ContentTooLongException e) { assertEquals( "entity content is too long [" + entity.getContentLength() + "] for the configured buffer limit [" + bufferLimit + "]", diff --git a/client/rest/src/test/java/org/opensearch/client/HostsTrackingFailureListener.java b/client/rest/src/test/java/org/opensearch/client/HostsTrackingFailureListener.java index 2b256e7205397..0e454c6f919f5 100644 --- a/client/rest/src/test/java/org/opensearch/client/HostsTrackingFailureListener.java +++ b/client/rest/src/test/java/org/opensearch/client/HostsTrackingFailureListener.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import java.util.HashSet; import java.util.List; diff --git a/client/rest/src/test/java/org/opensearch/client/NodeSelectorTests.java b/client/rest/src/test/java/org/opensearch/client/NodeSelectorTests.java index 65a831e59bfb0..cfc95f0281bcc 100644 --- a/client/rest/src/test/java/org/opensearch/client/NodeSelectorTests.java +++ b/client/rest/src/test/java/org/opensearch/client/NodeSelectorTests.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node.Roles; import java.util.ArrayList; diff --git a/client/rest/src/test/java/org/opensearch/client/NodeTests.java b/client/rest/src/test/java/org/opensearch/client/NodeTests.java index 352296fa3024a..748bec5fb7de5 100644 --- a/client/rest/src/test/java/org/opensearch/client/NodeTests.java +++ b/client/rest/src/test/java/org/opensearch/client/NodeTests.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node.Roles; import java.util.Arrays; @@ -48,7 +48,9 @@ import static java.util.Collections.singletonMap; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; +import static org.hamcrest.CoreMatchers.equalTo; public class NodeTests extends RestClientTestCase { public void testToString() { @@ -161,4 +163,9 @@ public void testEqualsAndHashCode() { ) ); } + + public void testIsSearchNode() { + Roles searchRole = new Roles(Collections.singleton("search")); + assertThat(searchRole.isSearch(), equalTo(true)); + } } diff --git a/client/rest/src/test/java/org/opensearch/client/PreferHasAttributeNodeSelectorTests.java b/client/rest/src/test/java/org/opensearch/client/PreferHasAttributeNodeSelectorTests.java index 0135cde573743..7dde1b96b3b45 100644 --- a/client/rest/src/test/java/org/opensearch/client/PreferHasAttributeNodeSelectorTests.java +++ b/client/rest/src/test/java/org/opensearch/client/PreferHasAttributeNodeSelectorTests.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node.Roles; import java.util.ArrayList; diff --git a/client/rest/src/test/java/org/opensearch/client/RequestLoggerTests.java b/client/rest/src/test/java/org/opensearch/client/RequestLoggerTests.java index 3c317db1b72d9..8dea2ad922bd6 100644 --- a/client/rest/src/test/java/org/opensearch/client/RequestLoggerTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RequestLoggerTests.java @@ -32,27 +32,29 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.http.HttpEntityEnclosingRequest; -import org.apache.http.HttpHost; -import org.apache.http.ProtocolVersion; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpOptions; -import org.apache.http.client.methods.HttpPatch; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.client.methods.HttpTrace; -import org.apache.http.client.methods.HttpUriRequest; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.InputStreamEntity; -import org.apache.http.entity.StringEntity; -import org.apache.http.message.BasicHeader; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.nio.entity.NByteArrayEntity; -import org.apache.http.nio.entity.NStringEntity; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpOptions; +import org.apache.hc.client5.http.classic.methods.HttpPatch; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpTrace; +import org.apache.hc.client5.http.classic.methods.HttpUriRequest; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.ProtocolVersion; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.InputStreamEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.http.message.StatusLine; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -66,8 +68,8 @@ import static org.junit.Assert.assertThat; public class RequestLoggerTests extends RestClientTestCase { - public void testTraceRequest() throws IOException, URISyntaxException { - HttpHost host = new HttpHost("localhost", 9200, randomBoolean() ? "http" : "https"); + public void testTraceRequest() throws IOException, URISyntaxException, ParseException { + HttpHost host = new HttpHost(randomBoolean() ? "http" : "https", "localhost", 9200); String expectedEndpoint = "/index/type/_api"; URI uri; if (randomBoolean()) { @@ -77,11 +79,10 @@ public void testTraceRequest() throws IOException, URISyntaxException { } HttpUriRequest request = randomHttpRequest(uri); String expected = "curl -iX " + request.getMethod() + " '" + host + expectedEndpoint + "'"; - boolean hasBody = request instanceof HttpEntityEnclosingRequest && randomBoolean(); + boolean hasBody = !(request instanceof HttpTrace) && randomBoolean(); String requestBody = "{ \"field\": \"value\" }"; if (hasBody) { expected += " -d '" + requestBody + "'"; - HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request; HttpEntity entity; switch (randomIntBetween(0, 4)) { case 0: @@ -94,10 +95,10 @@ public void testTraceRequest() throws IOException, URISyntaxException { ); break; case 2: - entity = new NStringEntity(requestBody, ContentType.APPLICATION_JSON); + entity = new StringEntity(requestBody, ContentType.APPLICATION_JSON); break; case 3: - entity = new NByteArrayEntity(requestBody.getBytes(StandardCharsets.UTF_8), ContentType.APPLICATION_JSON); + entity = new ByteArrayEntity(requestBody.getBytes(StandardCharsets.UTF_8), ContentType.APPLICATION_JSON); break; case 4: // Evil entity without a charset @@ -106,24 +107,24 @@ public void testTraceRequest() throws IOException, URISyntaxException { default: throw new UnsupportedOperationException(); } - enclosingRequest.setEntity(entity); + request.setEntity(entity); } String traceRequest = RequestLogger.buildTraceRequest(request, host); assertThat(traceRequest, equalTo(expected)); if (hasBody) { // check that the body is still readable as most entities are not repeatable - String body = EntityUtils.toString(((HttpEntityEnclosingRequest) request).getEntity(), StandardCharsets.UTF_8); + String body = EntityUtils.toString(request.getEntity(), StandardCharsets.UTF_8); assertThat(body, equalTo(requestBody)); } } - public void testTraceResponse() throws IOException { + public void testTraceResponse() throws IOException, ParseException { ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1); int statusCode = randomIntBetween(200, 599); String reasonPhrase = "REASON"; - BasicStatusLine statusLine = new BasicStatusLine(protocolVersion, statusCode, reasonPhrase); + StatusLine statusLine = new StatusLine(protocolVersion, statusCode, reasonPhrase); String expected = "# " + statusLine.toString(); - BasicHttpResponse httpResponse = new BasicHttpResponse(statusLine); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(statusCode, reasonPhrase); int numHeaders = randomIntBetween(0, 3); for (int i = 0; i < numHeaders; i++) { httpResponse.setHeader("header" + i, "value"); @@ -192,13 +193,13 @@ private static HttpUriRequest randomHttpRequest(URI uri) { int requestType = randomIntBetween(0, 7); switch (requestType) { case 0: - return new HttpGetWithEntity(uri); + return new HttpGet(uri); case 1: return new HttpPost(uri); case 2: return new HttpPut(uri); case 3: - return new HttpDeleteWithEntity(uri); + return new HttpDelete(uri); case 4: return new HttpHead(uri); case 5: diff --git a/client/rest/src/test/java/org/opensearch/client/RequestOptionsTests.java b/client/rest/src/test/java/org/opensearch/client/RequestOptionsTests.java index aaa40db1442ee..a7f9a48c73393 100644 --- a/client/rest/src/test/java/org/opensearch/client/RequestOptionsTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RequestOptionsTests.java @@ -32,8 +32,9 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.client.config.RequestConfig; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.util.Timeout; import org.opensearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; import java.util.ArrayList; @@ -108,15 +109,15 @@ public void testSetRequestBuilder() { RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); RequestConfig.Builder requestConfigBuilder = RequestConfig.custom(); - int socketTimeout = 10000; - int connectTimeout = 100; - requestConfigBuilder.setSocketTimeout(socketTimeout).setConnectTimeout(connectTimeout); + Timeout responseTimeout = Timeout.ofMilliseconds(10000); + Timeout connectTimeout = Timeout.ofMilliseconds(100); + requestConfigBuilder.setResponseTimeout(responseTimeout).setConnectTimeout(connectTimeout); RequestConfig requestConfig = requestConfigBuilder.build(); builder.setRequestConfig(requestConfig); RequestOptions options = builder.build(); assertSame(options.getRequestConfig(), requestConfig); - assertEquals(options.getRequestConfig().getSocketTimeout(), socketTimeout); + assertEquals(options.getRequestConfig().getResponseTimeout(), responseTimeout); assertEquals(options.getRequestConfig().getConnectTimeout(), connectTimeout); } diff --git a/client/rest/src/test/java/org/opensearch/client/RequestTests.java b/client/rest/src/test/java/org/opensearch/client/RequestTests.java index ba15c0d0b733c..d11982e9f9642 100644 --- a/client/rest/src/test/java/org/opensearch/client/RequestTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RequestTests.java @@ -32,15 +32,17 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ByteArrayEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.apache.http.nio.entity.NStringEntity; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.io.entity.InputStreamEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.opensearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; +import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.HashMap; import java.util.Map; @@ -133,7 +135,7 @@ public void testSetJsonEntity() throws IOException { final String json = randomAsciiLettersOfLengthBetween(1, 100); request.setJsonEntity(json); - assertEquals(ContentType.APPLICATION_JSON.toString(), request.getEntity().getContentType().getValue()); + assertEquals(ContentType.APPLICATION_JSON.toString(), request.getEntity().getContentType()); ByteArrayOutputStream os = new ByteArrayOutputStream(); request.getEntity().writeTo(os); assertEquals(json, new String(os.toByteArray(), ContentType.APPLICATION_JSON.getCharset())); @@ -201,7 +203,10 @@ private static Request randomRequest() { randomFrom( new HttpEntity[] { new StringEntity(randomAsciiAlphanumOfLength(10), ContentType.APPLICATION_JSON), - new NStringEntity(randomAsciiAlphanumOfLength(10), ContentType.APPLICATION_JSON), + new InputStreamEntity( + new ByteArrayInputStream(randomAsciiAlphanumOfLength(10).getBytes(StandardCharsets.UTF_8)), + ContentType.APPLICATION_JSON + ), new ByteArrayEntity(randomBytesOfLength(40), ContentType.APPLICATION_JSON) } ) ); diff --git a/client/rest/src/test/java/org/opensearch/client/ResponseExceptionTests.java b/client/rest/src/test/java/org/opensearch/client/ResponseExceptionTests.java index 8ecd3e1a29c99..dfbf105637962 100644 --- a/client/rest/src/test/java/org/opensearch/client/ResponseExceptionTests.java +++ b/client/rest/src/test/java/org/opensearch/client/ResponseExceptionTests.java @@ -32,19 +32,17 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.ProtocolVersion; -import org.apache.http.RequestLine; -import org.apache.http.StatusLine; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.InputStreamEntity; -import org.apache.http.entity.StringEntity; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicRequestLine; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.ProtocolVersion; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.InputStreamEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.message.RequestLine; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -57,10 +55,9 @@ public class ResponseExceptionTests extends RestClientTestCase { - public void testResponseException() throws IOException { + public void testResponseException() throws IOException, ParseException { ProtocolVersion protocolVersion = new ProtocolVersion("http", 1, 1); - StatusLine statusLine = new BasicStatusLine(protocolVersion, 500, "Internal Server Error"); - HttpResponse httpResponse = new BasicHttpResponse(statusLine); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(500, "Internal Server Error"); String responseBody = "{\"error\":{\"root_cause\": {}}}"; boolean hasBody = getRandom().nextBoolean(); @@ -78,7 +75,7 @@ public void testResponseException() throws IOException { httpResponse.setEntity(entity); } - RequestLine requestLine = new BasicRequestLine("GET", "/", protocolVersion); + RequestLine requestLine = new RequestLine("GET", "/", protocolVersion); HttpHost httpHost = new HttpHost("localhost", 9200); Response response = new Response(requestLine, httpHost, httpResponse); ResponseException responseException = new ResponseException(response); diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientBuilderIntegTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientBuilderIntegTests.java index 10bf9568c8798..f5e1735042e66 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientBuilderIntegTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientBuilderIntegTests.java @@ -36,7 +36,8 @@ import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpsConfigurator; import com.sun.net.httpserver.HttpsServer; -import org.apache.http.HttpHost; + +import org.apache.hc.core5.http.HttpHost; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -117,7 +118,7 @@ public void testBuilderUsesDefaultSSLContext() throws Exception { private RestClient buildRestClient() { InetSocketAddress address = httpsServer.getAddress(); - return RestClient.builder(new HttpHost(address.getHostString(), address.getPort(), "https")).build(); + return RestClient.builder(new HttpHost("https", address.getHostString(), address.getPort())).build(); } private static SSLContext getSslContext() throws Exception { diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientBuilderTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientBuilderTests.java index ac81cd1132a2f..7165174e688e1 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientBuilderTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientBuilderTests.java @@ -32,11 +32,12 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; -import org.apache.http.message.BasicHeader; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.util.Timeout; import java.io.IOException; import java.util.Base64; @@ -271,7 +272,7 @@ public RequestConfig.Builder customizeRequestConfig(RequestConfig.Builder reques RequestConfig requestConfig = requestConfigBuilder.build(); assertEquals(RequestConfig.DEFAULT.getConnectionRequestTimeout(), requestConfig.getConnectionRequestTimeout()); // this way we get notified if the default ever changes - assertEquals(-1, requestConfig.getConnectionRequestTimeout()); + assertEquals(Timeout.ofMinutes(3), requestConfig.getConnectionRequestTimeout()); return requestConfigBuilder; } }); diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientCompressionTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientCompressionTests.java index e8b7742044f67..bf2c19b8127a1 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientCompressionTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientCompressionTests.java @@ -11,10 +11,11 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; + +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -108,7 +109,7 @@ private static byte[] readAll(InputStream in) throws IOException { private RestClient createClient(boolean enableCompression, boolean chunkedEnabled) { InetSocketAddress address = httpServer.getAddress(); - return RestClient.builder(new HttpHost(address.getHostString(), address.getPort(), "http")) + return RestClient.builder(new HttpHost("http", address.getHostString(), address.getPort())) .setCompressionEnabled(enableCompression) .setChunkedEnabled(chunkedEnabled) .build(); diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientGzipCompressionTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientGzipCompressionTests.java index 8c4d993517fee..fdcb65ff101c9 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientGzipCompressionTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientGzipCompressionTests.java @@ -35,10 +35,11 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; + +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -126,7 +127,7 @@ private static byte[] readAll(InputStream in) throws IOException { private RestClient createClient(boolean enableCompression) { InetSocketAddress address = httpServer.getAddress(); - return RestClient.builder(new HttpHost(address.getHostString(), address.getPort(), "http")) + return RestClient.builder(new HttpHost("http", address.getHostString(), address.getPort())) .setCompressionEnabled(enableCompression) .build(); } @@ -184,7 +185,7 @@ public void testCompressingClientSync() throws Exception { public void testCompressingClientAsync() throws Exception { InetSocketAddress address = httpServer.getAddress(); - RestClient restClient = RestClient.builder(new HttpHost(address.getHostString(), address.getPort(), "http")) + RestClient restClient = RestClient.builder(new HttpHost("http", address.getHostString(), address.getPort())) .setCompressionEnabled(true) .build(); diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsIntegTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsIntegTests.java index 277446191a36e..8c62533072c70 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsIntegTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsIntegTests.java @@ -35,7 +35,8 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; -import org.apache.http.HttpHost; + +import org.apache.hc.core5.http.HttpHost; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -56,6 +57,7 @@ import static org.opensearch.client.RestClientTestUtil.getAllStatusCodes; import static org.opensearch.client.RestClientTestUtil.randomErrorNoRetryStatusCode; import static org.opensearch.client.RestClientTestUtil.randomOkStatusCode; +import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.instanceOf; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; @@ -63,7 +65,7 @@ import static org.junit.Assert.fail; /** - * Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}. + * Integration test to check interaction between {@link RestClient} and {@link org.apache.hc.client5.http.classic.HttpClient}. * Works against real http servers, multiple hosts. Also tests failover by randomly shutting down hosts. */ public class RestClientMultipleHostsIntegTests extends RestClientTestCase { @@ -299,7 +301,7 @@ public void testNodeSelector() throws Exception { } catch (ConnectException e) { // Windows isn't consistent here. Sometimes the message is even null! if (false == System.getProperty("os.name").startsWith("Windows")) { - assertEquals("Connection refused", e.getMessage()); + assertThat(e.getMessage(), containsString("Connection refused")); } } } else { diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsTests.java index d88d4f4afd9b1..62574e5ed6d5a 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsTests.java @@ -33,9 +33,10 @@ package org.opensearch.client; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; + +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; import org.junit.After; import java.io.IOException; diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java index 0500d282a506d..beee1c5ca21a0 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java @@ -36,30 +36,34 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; -import org.apache.http.Consts; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.auth.AuthScope; -import org.apache.http.auth.UsernamePasswordCredentials; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpRequestBase; -import org.apache.http.entity.ContentType; -import org.apache.http.impl.client.BasicCredentialsProvider; -import org.apache.http.impl.client.TargetAuthenticationStrategy; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; -import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; -import org.apache.http.message.BasicHeader; -import org.apache.http.nio.entity.NStringEntity; -import org.apache.http.util.EntityUtils; + +import org.apache.hc.client5.http.auth.AuthScope; +import org.apache.hc.client5.http.auth.UsernamePasswordCredentials; +import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; +import org.apache.hc.client5.http.impl.DefaultAuthenticationStrategy; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; +import org.apache.hc.client5.http.impl.auth.BasicCredentialsProvider; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.net.URIBuilder; import org.junit.After; import org.junit.Before; +import org.opensearch.client.http.HttpUriRequestProducer; +import org.opensearch.client.nio.HeapBufferedAsyncResponseConsumer; import java.io.IOException; import java.io.InputStreamReader; import java.io.OutputStream; import java.net.InetAddress; import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.HashSet; import java.util.List; @@ -86,7 +90,7 @@ import static org.junit.Assert.fail; /** - * Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}. + * Integration test to check interaction between {@link RestClient} and {@link org.apache.hc.client5.http.classic.HttpClient}. * Works against a real http server, one single host. */ public class RestClientSingleHostIntegTests extends RestClientTestCase { @@ -147,7 +151,7 @@ private static class ResponseHandler implements HttpHandler { public void handle(HttpExchange httpExchange) throws IOException { // copy request body to response body so we can verify it was sent StringBuilder body = new StringBuilder(); - try (InputStreamReader reader = new InputStreamReader(httpExchange.getRequestBody(), Consts.UTF_8)) { + try (InputStreamReader reader = new InputStreamReader(httpExchange.getRequestBody(), StandardCharsets.UTF_8)) { char[] buffer = new char[256]; int read; while ((read = reader.read(buffer)) != -1) { @@ -164,7 +168,7 @@ public void handle(HttpExchange httpExchange) throws IOException { httpExchange.sendResponseHeaders(statusCode, body.length() == 0 ? -1 : body.length()); if (body.length() > 0) { try (OutputStream out = httpExchange.getResponseBody()) { - out.write(body.toString().getBytes(Consts.UTF_8)); + out.write(body.toString().getBytes(StandardCharsets.UTF_8)); } } httpExchange.close(); @@ -172,18 +176,20 @@ public void handle(HttpExchange httpExchange) throws IOException { } private RestClient createRestClient(final boolean useAuth, final boolean usePreemptiveAuth) { - // provide the username/password for every request - final BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); - credentialsProvider.setCredentials(AuthScope.ANY, new UsernamePasswordCredentials("user", "pass")); - - final RestClientBuilder restClientBuilder = RestClient.builder( - new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()) - ).setDefaultHeaders(defaultHeaders); + final HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); + final RestClientBuilder restClientBuilder = RestClient.builder(httpHost).setDefaultHeaders(defaultHeaders); if (pathPrefix.length() > 0) { restClientBuilder.setPathPrefix(pathPrefix); } if (useAuth) { + // provide the username/password for every request + final BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); + credentialsProvider.setCredentials( + new AuthScope(httpHost, null, "Basic"), + new UsernamePasswordCredentials("user", "pass".toCharArray()) + ); + restClientBuilder.setHttpClientConfigCallback(new RestClientBuilder.HttpClientConfigCallback() { @Override public HttpAsyncClientBuilder customizeHttpClient(final HttpAsyncClientBuilder httpClientBuilder) { @@ -191,7 +197,7 @@ public HttpAsyncClientBuilder customizeHttpClient(final HttpAsyncClientBuilder h // disable preemptive auth by ignoring any authcache httpClientBuilder.disableAuthCaching(); // don't use the "persistent credentials strategy" - httpClientBuilder.setTargetAuthenticationStrategy(new TargetAuthenticationStrategy()); + httpClientBuilder.setTargetAuthenticationStrategy(DefaultAuthenticationStrategy.INSTANCE); } return httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider); @@ -220,7 +226,7 @@ public void testManyAsyncRequests() throws Exception { final List exceptions = new CopyOnWriteArrayList<>(); for (int i = 0; i < iters; i++) { Request request = new Request("PUT", "/200"); - request.setEntity(new NStringEntity("{}", ContentType.APPLICATION_JSON)); + request.setEntity(new StringEntity("{}", ContentType.APPLICATION_JSON)); restClient.performRequestAsync(request, new ResponseListener() { @Override public void onSuccess(Response response) { @@ -271,7 +277,7 @@ public void onFailure(Exception exception) { /** * This test verifies some assumptions that we rely upon around the way the async http client works when reusing the same request - * throughout multiple retries, and the use of the {@link HttpRequestBase#abort()} method. + * throughout multiple retries, and the use of the {@link HttpUriRequestBase#abort()} method. * In fact the low-level REST client reuses the same request instance throughout multiple retries, and relies on the http client * to set the future ref to the request properly so that when abort is called, the proper future gets cancelled. */ @@ -279,7 +285,10 @@ public void testRequestResetAndAbort() throws Exception { try (CloseableHttpAsyncClient client = HttpAsyncClientBuilder.create().build()) { client.start(); HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); - HttpGet httpGet = new HttpGet(pathPrefix + "/200"); + HttpUriRequestBase httpGet = new HttpUriRequestBase( + "GET", + new URIBuilder().setHttpHost(httpHost).setPath(pathPrefix + "/200").build() + ); // calling abort before the request is sent is a no-op httpGet.abort(); @@ -288,8 +297,11 @@ public void testRequestResetAndAbort() throws Exception { { httpGet.reset(); assertFalse(httpGet.isAborted()); + + Future future = client.execute(getRequestProducer(httpGet, httpHost), getResponseConsumer(), null); + httpGet.setDependency((org.apache.hc.core5.concurrent.Cancellable) future); httpGet.abort(); - Future future = client.execute(httpHost, httpGet, null); + try { future.get(); fail("expected cancellation exception"); @@ -300,8 +312,9 @@ public void testRequestResetAndAbort() throws Exception { } { httpGet.reset(); - Future future = client.execute(httpHost, httpGet, null); + Future future = client.execute(getRequestProducer(httpGet, httpHost), getResponseConsumer(), null); assertFalse(httpGet.isAborted()); + httpGet.setDependency((org.apache.hc.core5.concurrent.Cancellable) future); httpGet.abort(); assertTrue(httpGet.isAborted()); try { @@ -315,9 +328,9 @@ public void testRequestResetAndAbort() throws Exception { { httpGet.reset(); assertFalse(httpGet.isAborted()); - Future future = client.execute(httpHost, httpGet, null); + Future future = client.execute(getRequestProducer(httpGet, httpHost), getResponseConsumer(), null); assertFalse(httpGet.isAborted()); - assertEquals(200, future.get().getStatusLine().getStatusCode()); + assertEquals(200, future.get().getCode()); assertFalse(future.isCancelled()); } } @@ -325,7 +338,7 @@ public void testRequestResetAndAbort() throws Exception { /** * End to end test for headers. We test it explicitly against a real http client as there are different ways - * to set/add headers to the {@link org.apache.http.client.HttpClient}. + * to set/add headers to the {@link org.apache.hc.client5.http.classic.HttpClient}. * Exercises the test http server ability to send back whatever headers it received. */ public void testHeaders() throws Exception { @@ -365,7 +378,7 @@ public void testHeaders() throws Exception { /** * End to end test for delete with body. We test it explicitly as it is not supported - * out of the box by {@link org.apache.http.client.HttpClient}. + * out of the box by {@link org.apache.hc.client5.http.classic.HttpClient}. * Exercises the test http server ability to send back whatever body it received. */ public void testDeleteWithBody() throws Exception { @@ -374,7 +387,7 @@ public void testDeleteWithBody() throws Exception { /** * End to end test for get with body. We test it explicitly as it is not supported - * out of the box by {@link org.apache.http.client.HttpClient}. + * out of the box by {@link org.apache.hc.client5.http.classic.HttpClient}. * Exercises the test http server ability to send back whatever body it received. */ public void testGetWithBody() throws Exception { @@ -410,7 +423,7 @@ public void testEncodeParams() throws Exception { Request request = new Request("PUT", "/200"); request.addParameter("routing", "foo bar"); Response response = RestClientSingleHostTests.performRequestSyncOrAsync(restClient, request); - assertEquals(pathPrefix + "/200?routing=foo+bar", response.getRequestLine().getUri()); + assertEquals(pathPrefix + "/200?routing=foo%20bar", response.getRequestLine().getUri()); } { Request request = new Request("PUT", "/200"); @@ -540,4 +553,13 @@ private Response bodyTest(RestClient restClient, String method, int statusCode, return esResponse; } + + private AsyncResponseConsumer getResponseConsumer() { + return new HeapBufferedAsyncResponseConsumer(1024); + } + + private HttpUriRequestProducer getRequestProducer(HttpUriRequestBase request, HttpHost host) { + return HttpUriRequestProducer.create(request, host); + + } } diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostTests.java index e5ce5eb91ad5a..f46a91aa910f8 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostTests.java @@ -34,38 +34,42 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.http.ConnectionClosedException; -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.http.HttpEntityEnclosingRequest; -import org.apache.http.HttpHost; -import org.apache.http.HttpRequest; -import org.apache.http.HttpResponse; -import org.apache.http.ProtocolVersion; -import org.apache.http.StatusLine; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpOptions; -import org.apache.http.client.methods.HttpPatch; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.client.methods.HttpTrace; -import org.apache.http.client.methods.HttpUriRequest; -import org.apache.http.client.protocol.HttpClientContext; -import org.apache.http.client.utils.URIBuilder; -import org.apache.http.concurrent.FutureCallback; -import org.apache.http.conn.ConnectTimeoutException; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.nio.protocol.HttpAsyncRequestProducer; -import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.ConnectTimeoutException; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpOptions; +import org.apache.hc.client5.http.classic.methods.HttpPatch; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpTrace; +import org.apache.hc.client5.http.classic.methods.HttpUriRequest; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.core5.concurrent.FutureCallback; +import org.apache.hc.core5.function.Supplier; +import org.apache.hc.core5.http.ClassicHttpRequest; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ConnectionClosedException; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpRequest; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.nio.AsyncPushConsumer; +import org.apache.hc.core5.http.nio.AsyncRequestProducer; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.http.nio.HandlerFactory; +import org.apache.hc.core5.http.protocol.HttpContext; +import org.apache.hc.core5.io.CloseMode; +import org.apache.hc.core5.net.URIBuilder; +import org.apache.hc.core5.reactor.IOReactorStatus; +import org.apache.hc.core5.util.TimeValue; import org.junit.After; import org.junit.Before; -import org.mockito.ArgumentCaptor; -import org.mockito.stubbing.Answer; +import org.opensearch.client.http.HttpUriRequestProducer; import javax.net.ssl.SSLHandshakeException; import java.io.IOException; @@ -85,6 +89,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.atomic.LongAdder; import static java.util.Collections.singletonList; import static org.opensearch.client.RestClientTestUtil.getAllErrorStatusCodes; @@ -100,12 +105,6 @@ import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.nullable; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; /** * Tests for basic functionality of {@link RestClient} against one single host: tests http requests being sent, headers, @@ -122,10 +121,17 @@ public class RestClientSingleHostTests extends RestClientTestCase { private CloseableHttpAsyncClient httpClient; private HostsTrackingFailureListener failureListener; private boolean strictDeprecationMode; + private LongAdder requests; + private AtomicReference requestProducerCapture; @Before public void createRestClient() { - httpClient = mockHttpClient(exec); + requests = new LongAdder(); + requestProducerCapture = new AtomicReference<>(); + httpClient = mockHttpClient(exec, (target, requestProducer, responseConsumer, pushHandlerFactory, context, callback) -> { + requests.increment(); + requestProducerCapture.set(requestProducer); + }); defaultHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header-default"); node = new Node(new HttpHost("localhost", 9200)); failureListener = new HostsTrackingFailureListener(); @@ -143,41 +149,78 @@ public void createRestClient() { ); } + interface CloseableHttpAsyncClientListener { + void onExecute( + HttpHost target, + AsyncRequestProducer requestProducer, + AsyncResponseConsumer responseConsumer, + HandlerFactory pushHandlerFactory, + HttpContext context, + FutureCallback callback + ); + } + @SuppressWarnings("unchecked") - static CloseableHttpAsyncClient mockHttpClient(final ExecutorService exec) { - CloseableHttpAsyncClient httpClient = mock(CloseableHttpAsyncClient.class); - when( - httpClient.execute( - any(HttpAsyncRequestProducer.class), - any(HttpAsyncResponseConsumer.class), - any(HttpClientContext.class), - nullable(FutureCallback.class) - ) - ).thenAnswer((Answer>) invocationOnMock -> { - final HttpAsyncRequestProducer requestProducer = (HttpAsyncRequestProducer) invocationOnMock.getArguments()[0]; - final FutureCallback futureCallback = (FutureCallback) invocationOnMock.getArguments()[3]; - // Call the callback asynchronous to better simulate how async http client works - return exec.submit(() -> { - if (futureCallback != null) { - try { - HttpResponse httpResponse = responseOrException(requestProducer); - futureCallback.completed(httpResponse); - } catch (Exception e) { - futureCallback.failed(e); + static CloseableHttpAsyncClient mockHttpClient(final ExecutorService exec, final CloseableHttpAsyncClientListener... listeners) { + CloseableHttpAsyncClient httpClient = new CloseableHttpAsyncClient() { + @Override + public void close() throws IOException {} + + @Override + public void close(CloseMode closeMode) {} + + @Override + public void start() {} + + @Override + public void register(String hostname, String uriPattern, Supplier supplier) {} + + @Override + public void initiateShutdown() {} + + @Override + public IOReactorStatus getStatus() { + return null; + } + + @Override + protected Future doExecute( + HttpHost target, + AsyncRequestProducer requestProducer, + AsyncResponseConsumer responseConsumer, + HandlerFactory pushHandlerFactory, + HttpContext context, + FutureCallback callback + ) { + Arrays.stream(listeners) + .forEach(l -> l.onExecute(target, requestProducer, responseConsumer, pushHandlerFactory, context, callback)); + // Call the callback asynchronous to better simulate how async http client works + return exec.submit(() -> { + if (callback != null) { + try { + ClassicHttpResponse httpResponse = responseOrException(requestProducer); + callback.completed((T) httpResponse); + } catch (Exception e) { + callback.failed(e); + } + return null; } - return null; - } - return responseOrException(requestProducer); - }); - }); + return (T) responseOrException(requestProducer); + }); + } + + @Override + public void awaitShutdown(TimeValue waitTime) throws InterruptedException {} + }; + return httpClient; } - private static HttpResponse responseOrException(HttpAsyncRequestProducer requestProducer) throws Exception { - final HttpUriRequest request = (HttpUriRequest) requestProducer.generateRequest(); - final HttpHost httpHost = requestProducer.getTarget(); + private static ClassicHttpResponse responseOrException(AsyncRequestProducer requestProducer) throws Exception { + final ClassicHttpRequest request = getRequest(requestProducer); + final HttpHost httpHost = new HttpHost(request.getAuthority()); // return the desired status code or exception depending on the path - switch (request.getURI().getPath()) { + switch (request.getRequestUri()) { case "/soe": throw new SocketTimeoutException(httpHost.toString()); case "/coe": @@ -193,20 +236,17 @@ private static HttpResponse responseOrException(HttpAsyncRequestProducer request case "/runtime": throw new RuntimeException(); default: - int statusCode = Integer.parseInt(request.getURI().getPath().substring(1)); - StatusLine statusLine = new BasicStatusLine(new ProtocolVersion("http", 1, 1), statusCode, ""); + int statusCode = Integer.parseInt(request.getRequestUri().substring(1)); - final HttpResponse httpResponse = new BasicHttpResponse(statusLine); + final ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(statusCode, ""); // return the same body that was sent - if (request instanceof HttpEntityEnclosingRequest) { - HttpEntity entity = ((HttpEntityEnclosingRequest) request).getEntity(); - if (entity != null) { - assertTrue("the entity is not repeatable, cannot set it to the response directly", entity.isRepeatable()); - httpResponse.setEntity(entity); - } + HttpEntity entity = request.getEntity(); + if (entity != null) { + assertTrue("the entity is not repeatable, cannot set it to the response directly", entity.isRepeatable()); + httpResponse.setEntity(entity); } // return the same headers that were sent - httpResponse.setHeaders(request.getAllHeaders()); + httpResponse.setHeaders(request.getHeaders()); return httpResponse; } } @@ -224,26 +264,20 @@ public void shutdownExec() { */ @SuppressWarnings("unchecked") public void testInternalHttpRequest() throws Exception { - ArgumentCaptor requestArgumentCaptor = ArgumentCaptor.forClass(HttpAsyncRequestProducer.class); int times = 0; for (String httpMethod : getHttpMethods()) { - HttpUriRequest expectedRequest = performRandomRequest(httpMethod); - verify(httpClient, times(++times)).execute( - requestArgumentCaptor.capture(), - any(HttpAsyncResponseConsumer.class), - any(HttpClientContext.class), - nullable(FutureCallback.class) - ); - HttpUriRequest actualRequest = (HttpUriRequest) requestArgumentCaptor.getValue().generateRequest(); - assertEquals(expectedRequest.getURI(), actualRequest.getURI()); - assertEquals(expectedRequest.getClass(), actualRequest.getClass()); - assertArrayEquals(expectedRequest.getAllHeaders(), actualRequest.getAllHeaders()); - if (expectedRequest instanceof HttpEntityEnclosingRequest) { - HttpEntity expectedEntity = ((HttpEntityEnclosingRequest) expectedRequest).getEntity(); - if (expectedEntity != null) { - HttpEntity actualEntity = ((HttpEntityEnclosingRequest) actualRequest).getEntity(); - assertEquals(EntityUtils.toString(expectedEntity), EntityUtils.toString(actualEntity)); - } + ClassicHttpRequest expectedRequest = performRandomRequest(httpMethod); + assertThat(requests.intValue(), equalTo(++times)); + + ClassicHttpRequest actualRequest = getRequest(requestProducerCapture.get()); + assertEquals(expectedRequest.getRequestUri(), actualRequest.getRequestUri()); + assertEquals(expectedRequest.getMethod(), actualRequest.getMethod()); + assertArrayEquals(expectedRequest.getHeaders(), actualRequest.getHeaders()); + + HttpEntity expectedEntity = expectedRequest.getEntity(); + if (expectedEntity != null) { + HttpEntity actualEntity = actualRequest.getEntity(); + assertEquals(EntityUtils.toString(expectedEntity), EntityUtils.toString(actualEntity)); } } } @@ -414,14 +448,14 @@ public void testBody() throws Exception { } } } - for (String method : Arrays.asList("HEAD", "OPTIONS", "TRACE")) { + for (String method : Arrays.asList("TRACE")) { Request request = new Request(method, "/" + randomStatusCode(getRandom())); request.setEntity(entity); try { performRequestSyncOrAsync(restClient, request); fail("request should have failed"); - } catch (UnsupportedOperationException e) { - assertThat(e.getMessage(), equalTo(method + " with body is not supported")); + } catch (IllegalStateException e) { + assertThat(e.getMessage(), equalTo(method + " requests may not include an entity.")); } } } @@ -587,10 +621,10 @@ private HttpUriRequest performRandomRequest(String method) throws Exception { HttpUriRequest expectedRequest; switch (method) { case "DELETE": - expectedRequest = new HttpDeleteWithEntity(uri); + expectedRequest = new HttpDelete(uri); break; case "GET": - expectedRequest = new HttpGetWithEntity(uri); + expectedRequest = new HttpGet(uri); break; case "HEAD": expectedRequest = new HttpHead(uri); @@ -614,14 +648,14 @@ private HttpUriRequest performRandomRequest(String method) throws Exception { throw new UnsupportedOperationException("method not supported: " + method); } - if (expectedRequest instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean()) { + if (getRandom().nextBoolean() && !(expectedRequest instanceof HttpTrace /* no entity */)) { HttpEntity entity = new StringEntity(randomAsciiAlphanumOfLengthBetween(10, 100), ContentType.APPLICATION_JSON); - ((HttpEntityEnclosingRequest) expectedRequest).setEntity(entity); + expectedRequest.setEntity(entity); request.setEntity(entity); } final Set uniqueNames = new HashSet<>(); - if (randomBoolean()) { + if (randomBoolean() && !(expectedRequest instanceof HttpTrace /* no entity */)) { Header[] headers = RestClientTestUtil.randomHeaders(getRandom(), "Header"); RequestOptions.Builder options = request.getOptions().toBuilder(); for (Header header : headers) { @@ -698,4 +732,9 @@ private static void assertExceptionStackContainsCallingMethod(Throwable t) { t.printStackTrace(new PrintWriter(stack)); fail("didn't find the calling method (looks like " + myMethod + ") in:\n" + stack); } + + private static ClassicHttpRequest getRequest(AsyncRequestProducer requestProducer) throws NoSuchFieldException, IllegalAccessException { + assertThat(requestProducer, instanceOf(HttpUriRequestProducer.class)); + return ((HttpUriRequestProducer) requestProducer).getRequest(); + } } diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientTests.java index ca761dcb6b9b6..dd51da3a30d8c 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientTests.java @@ -32,12 +32,13 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.client.AuthCache; -import org.apache.http.impl.auth.BasicScheme; -import org.apache.http.impl.client.BasicAuthCache; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.auth.AuthCache; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.impl.auth.BasicAuthCache; +import org.apache.hc.client5.http.impl.auth.BasicScheme; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.reactor.IOReactorStatus; import org.opensearch.client.RestClient.NodeTuple; import java.io.IOException; @@ -410,10 +411,10 @@ public void testIsRunning() { CloseableHttpAsyncClient client = mock(CloseableHttpAsyncClient.class); RestClient restClient = new RestClient(client, new Header[] {}, nodes, null, null, null, false, false); - when(client.isRunning()).thenReturn(true); + when(client.getStatus()).thenReturn(IOReactorStatus.ACTIVE); assertTrue(restClient.isRunning()); - when(client.isRunning()).thenReturn(false); + when(client.getStatus()).thenReturn(IOReactorStatus.INACTIVE); assertFalse(restClient.isRunning()); } diff --git a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java index 066419844f048..b2807d35d230e 100644 --- a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java @@ -32,23 +32,30 @@ package org.opensearch.client.documentation; -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.RequestLine; -import org.apache.http.auth.AuthScope; -import org.apache.http.auth.UsernamePasswordCredentials; -import org.apache.http.client.CredentialsProvider; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.entity.ContentType; -import org.apache.http.impl.client.BasicCredentialsProvider; -import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; -import org.apache.http.impl.nio.reactor.IOReactorConfig; -import org.apache.http.message.BasicHeader; -import org.apache.http.nio.entity.NStringEntity; -import org.apache.http.ssl.SSLContextBuilder; -import org.apache.http.ssl.SSLContexts; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.auth.AuthScope; +import org.apache.hc.client5.http.auth.UsernamePasswordCredentials; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; +import org.apache.hc.client5.http.impl.auth.BasicCredentialsProvider; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManager; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder; +import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder; +import org.apache.hc.core5.function.Factory; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.nio.ssl.TlsStrategy; +import org.apache.hc.core5.reactor.IOReactorConfig; +import org.apache.hc.core5.reactor.ssl.TlsDetails; +import org.apache.hc.core5.ssl.SSLContextBuilder; +import org.apache.hc.core5.ssl.SSLContexts; +import org.apache.hc.core5.util.Timeout; import org.opensearch.client.Cancellable; import org.opensearch.client.HttpAsyncResponseConsumerFactory; import org.opensearch.client.Node; @@ -62,6 +69,8 @@ import org.opensearch.client.RestClientBuilder.HttpClientConfigCallback; import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; + import java.io.IOException; import java.io.InputStream; import java.nio.charset.StandardCharsets; @@ -109,12 +118,12 @@ public class RestClientDocumentation { // end::rest-client-options-singleton @SuppressWarnings("unused") - public void usage() throws IOException, InterruptedException { + public void usage() throws IOException, InterruptedException, ParseException { //tag::rest-client-init RestClient restClient = RestClient.builder( - new HttpHost("localhost", 9200, "http"), - new HttpHost("localhost", 9201, "http")).build(); + new HttpHost("http", "localhost", 9200), + new HttpHost("http", "localhost", 9201)).build(); //end::rest-client-init //tag::rest-client-close @@ -124,7 +133,7 @@ public void usage() throws IOException, InterruptedException { { //tag::rest-client-init-default-headers RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); Header[] defaultHeaders = new Header[]{new BasicHeader("header", "value")}; builder.setDefaultHeaders(defaultHeaders); // <1> //end::rest-client-init-default-headers @@ -132,14 +141,14 @@ public void usage() throws IOException, InterruptedException { { //tag::rest-client-init-node-selector RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); builder.setNodeSelector(NodeSelector.SKIP_DEDICATED_CLUSTER_MANAGERS); // <1> //end::rest-client-init-node-selector } { //tag::rest-client-init-allocation-aware-selector RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); builder.setNodeSelector(new NodeSelector() { // <1> @Override public void select(Iterable nodes) { @@ -173,7 +182,7 @@ public void select(Iterable nodes) { { //tag::rest-client-init-failure-listener RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); builder.setFailureListener(new RestClient.FailureListener() { @Override public void onFailure(Node node) { @@ -185,13 +194,13 @@ public void onFailure(Node node) { { //tag::rest-client-init-request-config-callback RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); builder.setRequestConfigCallback( new RestClientBuilder.RequestConfigCallback() { @Override public RequestConfig.Builder customizeRequestConfig( RequestConfig.Builder requestConfigBuilder) { - return requestConfigBuilder.setSocketTimeout(10000); // <1> + return requestConfigBuilder.setResponseTimeout(Timeout.ofMilliseconds(10000)); // <1> } }); //end::rest-client-init-request-config-callback @@ -199,13 +208,13 @@ public RequestConfig.Builder customizeRequestConfig( { //tag::rest-client-init-client-config-callback RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); builder.setHttpClientConfigCallback(new HttpClientConfigCallback() { @Override public HttpAsyncClientBuilder customizeHttpClient( HttpAsyncClientBuilder httpClientBuilder) { return httpClientBuilder.setProxy( - new HttpHost("proxy", 9000, "http")); // <1> + new HttpHost("http", "proxy", 9000)); // <1> } }); //end::rest-client-init-client-config-callback @@ -244,7 +253,7 @@ public void onFailure(Exception exception) { request.addParameter("pretty", "true"); //end::rest-client-parameters //tag::rest-client-body - request.setEntity(new NStringEntity( + request.setEntity(new StringEntity( "{\"json\":\"text\"}", ContentType.APPLICATION_JSON)); //end::rest-client-body @@ -334,8 +343,8 @@ public void commonConfiguration() throws Exception { public RequestConfig.Builder customizeRequestConfig( RequestConfig.Builder requestConfigBuilder) { return requestConfigBuilder - .setConnectTimeout(5000) - .setSocketTimeout(60000); + .setConnectTimeout(Timeout.ofMilliseconds(5000)) + .setResponseTimeout(Timeout.ofMilliseconds(60000)); } }); //end::rest-client-config-timeouts @@ -343,8 +352,8 @@ public RequestConfig.Builder customizeRequestConfig( { //tag::rest-client-config-request-options-timeouts RequestConfig requestConfig = RequestConfig.custom() - .setConnectTimeout(5000) - .setSocketTimeout(60000) + .setConnectTimeout(Timeout.ofMilliseconds(5000)) + .setResponseTimeout(Timeout.ofMilliseconds(60000)) .build(); RequestOptions options = RequestOptions.DEFAULT.toBuilder() .setRequestConfig(requestConfig) @@ -359,7 +368,7 @@ public RequestConfig.Builder customizeRequestConfig( @Override public HttpAsyncClientBuilder customizeHttpClient( HttpAsyncClientBuilder httpClientBuilder) { - return httpClientBuilder.setDefaultIOReactorConfig( + return httpClientBuilder.setIOReactorConfig( IOReactorConfig.custom() .setIoThreadCount(1) .build()); @@ -369,10 +378,9 @@ public HttpAsyncClientBuilder customizeHttpClient( } { //tag::rest-client-config-basic-auth - final CredentialsProvider credentialsProvider = - new BasicCredentialsProvider(); - credentialsProvider.setCredentials(AuthScope.ANY, - new UsernamePasswordCredentials("user", "password")); + final BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); + credentialsProvider.setCredentials(new AuthScope(new HttpHost("localhost", 9200)), + new UsernamePasswordCredentials("user", "password".toCharArray())); RestClientBuilder builder = RestClient.builder( new HttpHost("localhost", 9200)) @@ -388,10 +396,10 @@ public HttpAsyncClientBuilder customizeHttpClient( } { //tag::rest-client-config-disable-preemptive-auth - final CredentialsProvider credentialsProvider = + final BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); - credentialsProvider.setCredentials(AuthScope.ANY, - new UsernamePasswordCredentials("user", "password")); + credentialsProvider.setCredentials(new AuthScope(new HttpHost("localhost", 9200)), + new UsernamePasswordCredentials("user", "password".toCharArray())); RestClientBuilder builder = RestClient.builder( new HttpHost("localhost", 9200)) @@ -418,12 +426,27 @@ public HttpAsyncClientBuilder customizeHttpClient( .loadTrustMaterial(truststore, null); final SSLContext sslContext = sslBuilder.build(); RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "https")) + new HttpHost("https", "localhost", 9200)) .setHttpClientConfigCallback(new HttpClientConfigCallback() { @Override public HttpAsyncClientBuilder customizeHttpClient( HttpAsyncClientBuilder httpClientBuilder) { - return httpClientBuilder.setSSLContext(sslContext); + final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create() + .setSslContext(sslContext) + // See https://issues.apache.org/jira/browse/HTTPCLIENT-2219 + .setTlsDetailsFactory(new Factory() { + @Override + public TlsDetails create(final SSLEngine sslEngine) { + return new TlsDetails(sslEngine.getSession(), sslEngine.getApplicationProtocol()); + } + }) + .build(); + + final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() + .setTlsStrategy(tlsStrategy) + .build(); + + return httpClientBuilder.setConnectionManager(connectionManager); } }); //end::rest-client-config-encrypted-communication @@ -444,12 +467,27 @@ public HttpAsyncClientBuilder customizeHttpClient( .loadTrustMaterial(trustStore, null); final SSLContext sslContext = sslContextBuilder.build(); RestClient.builder( - new HttpHost("localhost", 9200, "https")) + new HttpHost("https", "localhost", 9200)) .setHttpClientConfigCallback(new HttpClientConfigCallback() { @Override public HttpAsyncClientBuilder customizeHttpClient( HttpAsyncClientBuilder httpClientBuilder) { - return httpClientBuilder.setSSLContext(sslContext); + final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create() + .setSslContext(sslContext) + // See please https://issues.apache.org/jira/browse/HTTPCLIENT-2219 + .setTlsDetailsFactory(new Factory() { + @Override + public TlsDetails create(final SSLEngine sslEngine) { + return new TlsDetails(sslEngine.getSession(), sslEngine.getApplicationProtocol()); + } + }) + .build(); + + final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() + .setTlsStrategy(tlsStrategy) + .build(); + + return httpClientBuilder.setConnectionManager(connectionManager); } }); //end::rest-client-config-trust-ca-pem @@ -473,12 +511,20 @@ public HttpAsyncClientBuilder customizeHttpClient( .loadKeyMaterial(keyStore, keyStorePass.toCharArray()); final SSLContext sslContext = sslBuilder.build(); RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "https")) + new HttpHost("https", "localhost", 9200)) .setHttpClientConfigCallback(new HttpClientConfigCallback() { @Override public HttpAsyncClientBuilder customizeHttpClient( HttpAsyncClientBuilder httpClientBuilder) { - return httpClientBuilder.setSSLContext(sslContext); + final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create() + .setSslContext(sslContext) + .build(); + + final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() + .setTlsStrategy(tlsStrategy) + .build(); + + return httpClientBuilder.setConnectionManager(connectionManager); } }); //end::rest-client-config-mutual-tls-authentication @@ -486,7 +532,7 @@ public HttpAsyncClientBuilder customizeHttpClient( { //tag::rest-client-auth-bearer-token RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); Header[] defaultHeaders = new Header[]{new BasicHeader("Authorization", "Bearer u6iuAxZ0RG1Kcm5jVFI4eU4tZU9aVFEwT2F3")}; @@ -502,7 +548,7 @@ public HttpAsyncClientBuilder customizeHttpClient( (apiKeyId + ":" + apiKeySecret) .getBytes(StandardCharsets.UTF_8)); RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); Header[] defaultHeaders = new Header[]{new BasicHeader("Authorization", "ApiKey " + apiKeyAuth)}; diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle index b7cb0d87c02d9..eb3306cf2cea2 100644 --- a/client/sniffer/build.gradle +++ b/client/sniffer/build.gradle @@ -38,8 +38,8 @@ archivesBaseName = 'opensearch-rest-client-sniffer' dependencies { api project(":client:rest") - api "org.apache.httpcomponents:httpclient:${versions.httpclient}" - api "org.apache.httpcomponents:httpcore:${versions.httpcore}" + api "org.apache.httpcomponents.client5:httpclient5:${versions.httpclient5}" + api "org.apache.httpcomponents.core5:httpcore5:${versions.httpcore5}" api "commons-codec:commons-codec:${versions.commonscodec}" api "commons-logging:commons-logging:${versions.commonslogging}" api "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" @@ -84,6 +84,7 @@ testingConventions { } thirdPartyAudit.ignoreMissingClasses( + 'org.conscrypt.Conscrypt', //commons-logging optional dependencies 'org.apache.avalon.framework.logger.Logger', 'org.apache.log.Hierarchy', diff --git a/client/sniffer/licenses/httpclient-4.5.13.jar.sha1 b/client/sniffer/licenses/httpclient-4.5.13.jar.sha1 deleted file mode 100644 index 3281e21595b39..0000000000000 --- a/client/sniffer/licenses/httpclient-4.5.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5f6cae5ca7ecaac1ec2827a9e2d65ae2869cada \ No newline at end of file diff --git a/client/sniffer/licenses/httpclient5-5.1.3.jar.sha1 b/client/sniffer/licenses/httpclient5-5.1.3.jar.sha1 new file mode 100644 index 0000000000000..b18cf050ac8df --- /dev/null +++ b/client/sniffer/licenses/httpclient5-5.1.3.jar.sha1 @@ -0,0 +1 @@ +13c984b7b881afcff3a7f0bb95878724a48a4b66 \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore-4.4.15.jar.sha1 b/client/sniffer/licenses/httpcore-4.4.15.jar.sha1 deleted file mode 100644 index 42a03b5d7a376..0000000000000 --- a/client/sniffer/licenses/httpcore-4.4.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore5-5.1.4.jar.sha1 b/client/sniffer/licenses/httpcore5-5.1.4.jar.sha1 new file mode 100644 index 0000000000000..c8981fb933736 --- /dev/null +++ b/client/sniffer/licenses/httpcore5-5.1.4.jar.sha1 @@ -0,0 +1 @@ +92538a62a4aacf96c9ea8992346a453e83da85fc \ No newline at end of file diff --git a/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java b/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java index c1a0fcf9a8acf..e6696c1fc4039 100644 --- a/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java +++ b/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java @@ -37,8 +37,8 @@ import com.fasterxml.jackson.core.JsonToken; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node; import org.opensearch.client.Node.Roles; import org.opensearch.client.Request; @@ -192,12 +192,12 @@ private static Node readNode(String nodeId, JsonParser parser, Scheme scheme) th publishAddressAsURI = URI.create(scheme + "://" + address); host = publishAddressAsURI.getHost(); } - publishedHost = new HttpHost(host, publishAddressAsURI.getPort(), publishAddressAsURI.getScheme()); + publishedHost = new HttpHost(publishAddressAsURI.getScheme(), host, publishAddressAsURI.getPort()); } else if (parser.currentToken() == JsonToken.START_ARRAY && "bound_address".equals(parser.getCurrentName())) { while (parser.nextToken() != JsonToken.END_ARRAY) { URI boundAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString()); boundHosts.add( - new HttpHost(boundAddressAsURI.getHost(), boundAddressAsURI.getPort(), boundAddressAsURI.getScheme()) + new HttpHost(boundAddressAsURI.getScheme(), boundAddressAsURI.getHost(), boundAddressAsURI.getPort()) ); } } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) { diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/MockNodesSniffer.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/MockNodesSniffer.java index cbf349e534deb..9b5e89fbeb038 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/MockNodesSniffer.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/MockNodesSniffer.java @@ -32,7 +32,7 @@ package org.opensearch.client.sniff; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node; import java.util.Collections; diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferParseTests.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferParseTests.java index 58b60ac13dee8..fd38eceee6224 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferParseTests.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferParseTests.java @@ -33,10 +33,11 @@ package org.opensearch.client.sniff; import com.fasterxml.jackson.core.JsonFactory; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.InputStreamEntity; + +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.io.entity.InputStreamEntity; import org.opensearch.client.Node; import org.opensearch.client.Node.Roles; import org.opensearch.client.RestClientTestCase; diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java index 1d06e9353726d..b678fb050e8f8 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java @@ -40,14 +40,13 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; -import org.apache.http.Consts; -import org.apache.http.HttpHost; -import org.apache.http.client.methods.HttpGet; import org.opensearch.client.Node; import org.opensearch.client.Response; import org.opensearch.client.ResponseException; import org.opensearch.client.RestClient; import org.opensearch.client.RestClientTestCase; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.core5.http.HttpHost; import org.junit.After; import org.junit.Before; @@ -56,6 +55,7 @@ import java.io.StringWriter; import java.net.InetAddress; import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -181,7 +181,7 @@ public void handle(HttpExchange httpExchange) throws IOException { String nodesInfoBody = sniffResponse.nodesInfoBody; httpExchange.sendResponseHeaders(sniffResponse.nodesInfoResponseCode, nodesInfoBody.length()); try (OutputStream out = httpExchange.getResponseBody()) { - out.write(nodesInfoBody.getBytes(Consts.UTF_8)); + out.write(nodesInfoBody.getBytes(StandardCharsets.UTF_8)); return; } } @@ -210,14 +210,14 @@ private static SniffResponse buildSniffResponse(OpenSearchNodesSniffer.Scheme sc String nodeId = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 5, 10); String host = "host" + i; int port = RandomNumbers.randomIntBetween(getRandom(), 9200, 9299); - HttpHost publishHost = new HttpHost(host, port, scheme.toString()); + HttpHost publishHost = new HttpHost(scheme.toString(), host, port); Set boundHosts = new HashSet<>(); boundHosts.add(publishHost); if (randomBoolean()) { int bound = between(1, 5); for (int b = 0; b < bound; b++) { - boundHosts.add(new HttpHost(host + b, port, scheme.toString())); + boundHosts.add(new HttpHost(scheme.toString(), host + b, port)); } } diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/SniffOnFailureListenerTests.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/SniffOnFailureListenerTests.java index e4d1058282f5c..faab6babcaca6 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/SniffOnFailureListenerTests.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/SniffOnFailureListenerTests.java @@ -32,7 +32,7 @@ package org.opensearch.client.sniff; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node; import org.opensearch.client.RestClient; import org.opensearch.client.RestClientTestCase; diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferBuilderTests.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferBuilderTests.java index 25a3162e238ed..24ee540aa6364 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferBuilderTests.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferBuilderTests.java @@ -33,7 +33,8 @@ package org.opensearch.client.sniff; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; -import org.apache.http.HttpHost; + +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.RestClient; import org.opensearch.client.RestClientTestCase; diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferTests.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferTests.java index 304243e73c078..36923281dde6b 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferTests.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferTests.java @@ -32,12 +32,12 @@ package org.opensearch.client.sniff; -import org.apache.http.HttpHost; import org.opensearch.client.Node; import org.opensearch.client.RestClient; import org.opensearch.client.RestClientTestCase; import org.opensearch.client.sniff.Sniffer.DefaultScheduler; import org.opensearch.client.sniff.Sniffer.Scheduler; +import org.apache.hc.core5.http.HttpHost; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java index 3b612aab80851..8f3e446d8aefb 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java @@ -32,7 +32,7 @@ package org.opensearch.client.sniff.documentation; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node; import org.opensearch.client.RestClient; import org.opensearch.client.sniff.OpenSearchNodesSniffer; @@ -69,7 +69,7 @@ public void usage() throws IOException { { //tag::sniffer-init RestClient restClient = RestClient.builder( - new HttpHost("localhost", 9200, "http")) + new HttpHost("http", "localhost", 9200)) .build(); Sniffer sniffer = Sniffer.builder(restClient).build(); //end::sniffer-init @@ -82,7 +82,7 @@ public void usage() throws IOException { { //tag::sniffer-interval RestClient restClient = RestClient.builder( - new HttpHost("localhost", 9200, "http")) + new HttpHost("http", "localhost", 9200)) .build(); Sniffer sniffer = Sniffer.builder(restClient) .setSniffIntervalMillis(60000).build(); @@ -105,7 +105,7 @@ public void usage() throws IOException { { //tag::sniffer-https RestClient restClient = RestClient.builder( - new HttpHost("localhost", 9200, "http")) + new HttpHost("http", "localhost", 9200)) .build(); NodesSniffer nodesSniffer = new OpenSearchNodesSniffer( restClient, @@ -118,7 +118,7 @@ public void usage() throws IOException { { //tag::sniff-request-timeout RestClient restClient = RestClient.builder( - new HttpHost("localhost", 9200, "http")) + new HttpHost("http", "localhost", 9200)) .build(); NodesSniffer nodesSniffer = new OpenSearchNodesSniffer( restClient, @@ -131,7 +131,7 @@ public void usage() throws IOException { { //tag::custom-nodes-sniffer RestClient restClient = RestClient.builder( - new HttpHost("localhost", 9200, "http")) + new HttpHost("http", "localhost", 9200)) .build(); NodesSniffer nodesSniffer = new NodesSniffer() { @Override diff --git a/client/test/build.gradle b/client/test/build.gradle index 07d874cf01ea7..13e9bd6b9e34a 100644 --- a/client/test/build.gradle +++ b/client/test/build.gradle @@ -35,7 +35,7 @@ sourceCompatibility = JavaVersion.VERSION_11 group = "${group}.client.test" dependencies { - api "org.apache.httpcomponents:httpcore:${versions.httpcore}" + api "org.apache.httpcomponents.core5:httpcore5:${versions.httpcore5}" api "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" api "junit:junit:${versions.junit}" api "org.hamcrest:hamcrest:${versions.hamcrest}" diff --git a/client/test/src/main/java/org/opensearch/client/RestClientTestCase.java b/client/test/src/main/java/org/opensearch/client/RestClientTestCase.java index 2b3e867929e27..b4eacdbf88827 100644 --- a/client/test/src/main/java/org/opensearch/client/RestClientTestCase.java +++ b/client/test/src/main/java/org/opensearch/client/RestClientTestCase.java @@ -43,7 +43,8 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; -import org.apache.http.Header; + +import org.apache.hc.core5.http.Header; import java.util.ArrayList; import java.util.HashMap; diff --git a/client/test/src/main/java/org/opensearch/client/RestClientTestUtil.java b/client/test/src/main/java/org/opensearch/client/RestClientTestUtil.java index aeba9bde4bff4..6a01ed30e0c63 100644 --- a/client/test/src/main/java/org/opensearch/client/RestClientTestUtil.java +++ b/client/test/src/main/java/org/opensearch/client/RestClientTestUtil.java @@ -35,8 +35,9 @@ import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; -import org.apache.http.Header; -import org.apache.http.message.BasicHeader; + +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.message.BasicHeader; import java.util.ArrayList; import java.util.Arrays; diff --git a/distribution/README.md b/distribution/README.md new file mode 100644 index 0000000000000..b9e948b625659 --- /dev/null +++ b/distribution/README.md @@ -0,0 +1,12 @@ +# Distributions +This subproject contains the necessary tooling to build the various distributions. +Note that some of this can only be run on the specific architecture and does not support cross-compile. + +The following distributions are being built: +* Archives (`*.zip`, `*.tar`): these form the basis for all other OpenSearch distributions +* Packages (`*.deb`, `*.rpm`): specific package formats for some Linux distributions +* Docker images +* Backwards compatibility tests: used internally for version compatibility testing, not for public consumption + +## With or Without JDK? +For each supported platform there should be both a target bundled with a JDK and a target without a bundled JDK. diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle index 1376b8d419f6e..587175eef4008 100644 --- a/distribution/archives/build.gradle +++ b/distribution/archives/build.gradle @@ -137,6 +137,13 @@ distribution_archives { } } + noJdkLinuxArm64Tar { + archiveClassifier = 'no-jdk-linux-arm64' + content { + archiveFiles(modulesFiles('linux-arm64'), 'tar', 'linux', 'arm64', false) + } + } + linuxTar { archiveClassifier = 'linux-x64' content { @@ -151,6 +158,8 @@ distribution_archives { } } + // Should really be `no-jdk-linux-s390x` as it ships without a JDK, however it seems that the build can't handle + // the absence of the `linux-s390x` target. linuxS390xTar { archiveClassifier = 'linux-s390x' content { diff --git a/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/WaitForRefreshAndCloseIT.java b/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/WaitForRefreshAndCloseIT.java index 37ffe32d19509..07576dacffb03 100644 --- a/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/WaitForRefreshAndCloseIT.java +++ b/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/WaitForRefreshAndCloseIT.java @@ -32,13 +32,14 @@ package org.opensearch.test.rest; -import org.apache.http.util.EntityUtils; import org.opensearch.action.ActionFuture; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.client.Request; import org.opensearch.client.Response; import org.opensearch.client.ResponseException; import org.opensearch.client.ResponseListener; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.junit.After; import org.junit.Before; @@ -145,6 +146,8 @@ public void onSuccess(Response response) { future.onResponse(EntityUtils.toString(response.getEntity())); } catch (IOException e) { future.onFailure(e); + } catch (ParseException e) { + future.onFailure(e); } } diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index df3049d7684c4..d9db3448104c8 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -350,6 +350,10 @@ tasks.register('buildArm64Deb', Deb) { configure(commonDebConfig(true, 'arm64')) } +tasks.register('buildNoJdkArm64Deb', Deb) { + configure(commonDebConfig(false, 'arm64')) +} + tasks.register('buildDeb', Deb) { configure(commonDebConfig(true, 'x64')) } @@ -387,6 +391,10 @@ tasks.register('buildArm64Rpm', Rpm) { configure(commonRpmConfig(true, 'arm64')) } +tasks.register('buildNoJdkArm64Rpm', Rpm) { + configure(commonRpmConfig(false, 'arm64')) +} + tasks.register('buildRpm', Rpm) { configure(commonRpmConfig(true, 'x64')) } diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.4.2.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.4.2.jar.sha1 new file mode 100644 index 0000000000000..a7782e8aac18d --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.4.2.jar.sha1 @@ -0,0 +1 @@ +325c06bdfeb628cfb80ebaaf1a26cc1eb558a585 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.4.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.4.jar.sha1 deleted file mode 100644 index fcc6491d1f78d..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -98b0edfa8e4084078f10b7b356c300ded4a71491 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/ValidateInputTask.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/ValidateInputTask.java index f32581cd1791c..95e055cedda43 100644 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/ValidateInputTask.java +++ b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/ValidateInputTask.java @@ -31,7 +31,7 @@ public void accept(final Tuple input) { // check if the elasticsearch version is supported if (taskInput.getVersion().isPresent()) { final Version version = taskInput.getVersion().get(); - if (version.equals(LegacyESVersion.V_7_10_2) == false) { + if (version.equals(LegacyESVersion.fromId(7100299)) == false) { throw new RuntimeException( String.format(Locale.getDefault(), "The installed version %s of elasticsearch is not supported.", version) ); diff --git a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ValidateInputTaskTests.java b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ValidateInputTaskTests.java index 07cb19b132f31..195c57e5b457f 100644 --- a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ValidateInputTaskTests.java +++ b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ValidateInputTaskTests.java @@ -39,7 +39,7 @@ public void setTask() { public void testUnsupportedEsVersion() { TaskInput taskInput = new TaskInput(env); - taskInput.setVersion(LegacyESVersion.V_7_10_1); + taskInput.setVersion(LegacyESVersion.fromId(7100199)); final RuntimeException e = expectThrows(RuntimeException.class, () -> task.accept(new Tuple<>(taskInput, terminal))); @@ -51,7 +51,7 @@ public void testGetSummaryFields() { taskInput.setEsConfig(PathUtils.get("es_home")); taskInput.setCluster("some-cluster"); taskInput.setNode("some-node"); - taskInput.setVersion(LegacyESVersion.V_7_10_2); + taskInput.setVersion(LegacyESVersion.fromId(7100299)); taskInput.setBaseUrl("some-url"); taskInput.setPlugins(Arrays.asList("plugin-1", "plugin-2")); diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java index 57865e15d523a..1ee4eff6ba055 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java @@ -124,7 +124,7 @@ import org.apache.lucene.analysis.tr.TurkishAnalyzer; import org.apache.lucene.analysis.util.ElisionFilter; import org.apache.lucene.util.SetOnce; -import org.opensearch.LegacyESVersion; +import org.opensearch.Version; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; @@ -347,7 +347,12 @@ public Map> getTokenizers() { tokenizers.put("simple_pattern_split", SimplePatternSplitTokenizerFactory::new); tokenizers.put("thai", ThaiTokenizerFactory::new); tokenizers.put("nGram", (IndexSettings indexSettings, Environment environment, String name, Settings settings) -> { - if (indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_6_0)) { + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_3_0_0)) { + throw new IllegalArgumentException( + "The [nGram] tokenizer name was deprecated pre 1.0. " + + "Please use the tokenizer name to [ngram] for indices created in versions 3.0 or higher instead." + ); + } else { deprecationLogger.deprecate( "nGram_tokenizer_deprecation", "The [nGram] tokenizer name is deprecated and will be removed in a future version. " @@ -358,7 +363,12 @@ public Map> getTokenizers() { }); tokenizers.put("ngram", NGramTokenizerFactory::new); tokenizers.put("edgeNGram", (IndexSettings indexSettings, Environment environment, String name, Settings settings) -> { - if (indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_6_0)) { + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_3_0_0)) { + throw new IllegalArgumentException( + "The [edgeNGram] tokenizer name was deprecated pre 1.0. " + + "Please use the tokenizer name to [edge_ngram] for indices created in versions 3.0 or higher instead." + ); + } else { deprecationLogger.deprecate( "edgeNGram_tokenizer_deprecation", "The [edgeNGram] tokenizer name is deprecated and will be removed in a future version. " @@ -485,19 +495,10 @@ public List getPreConfiguredTokenFilters() { filters.add(PreConfiguredTokenFilter.singleton("dutch_stem", false, input -> new SnowballFilter(input, new DutchStemmer()))); filters.add(PreConfiguredTokenFilter.singleton("edge_ngram", false, false, input -> new EdgeNGramTokenFilter(input, 1))); filters.add(PreConfiguredTokenFilter.openSearchVersion("edgeNGram", false, false, (reader, version) -> { - if (version.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "The [edgeNGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " - + "Please change the filter name to [edge_ngram] instead." - ); - } else { - deprecationLogger.deprecate( - "edgeNGram_deprecation", - "The [edgeNGram] token filter name is deprecated and will be removed in a future version. " - + "Please change the filter name to [edge_ngram] instead." - ); - } - return new EdgeNGramTokenFilter(reader, 1); + throw new IllegalArgumentException( + "The [edgeNGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " + + "Please change the filter name to [edge_ngram] instead." + ); })); filters.add( PreConfiguredTokenFilter.singleton("elision", true, input -> new ElisionFilter(input, FrenchAnalyzer.DEFAULT_ARTICLES)) @@ -524,19 +525,10 @@ public List getPreConfiguredTokenFilters() { ); filters.add(PreConfiguredTokenFilter.singleton("ngram", false, false, reader -> new NGramTokenFilter(reader, 1, 2, false))); filters.add(PreConfiguredTokenFilter.openSearchVersion("nGram", false, false, (reader, version) -> { - if (version.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "The [nGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " - + "Please change the filter name to [ngram] instead." - ); - } else { - deprecationLogger.deprecate( - "nGram_deprecation", - "The [nGram] token filter name is deprecated and will be removed in a future version. " - + "Please change the filter name to [ngram] instead." - ); - } - return new NGramTokenFilter(reader, 1, 2, false); + throw new IllegalArgumentException( + "The [nGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " + + "Please change the filter name to [ngram] instead." + ); })); filters.add(PreConfiguredTokenFilter.singleton("persian_normalization", true, PersianNormalizationFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("porter_stem", false, PorterStemFilter::new)); @@ -581,18 +573,22 @@ public List getPreConfiguredTokenFilters() { ) ) ); - filters.add(PreConfiguredTokenFilter.openSearchVersion("word_delimiter_graph", false, false, (input, version) -> { - boolean adjustOffsets = version.onOrAfter(LegacyESVersion.V_7_3_0); - return new WordDelimiterGraphFilter( - input, - adjustOffsets, - WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE, - WordDelimiterGraphFilter.GENERATE_WORD_PARTS | WordDelimiterGraphFilter.GENERATE_NUMBER_PARTS - | WordDelimiterGraphFilter.SPLIT_ON_CASE_CHANGE | WordDelimiterGraphFilter.SPLIT_ON_NUMERICS - | WordDelimiterGraphFilter.STEM_ENGLISH_POSSESSIVE, - null - ); - })); + filters.add( + PreConfiguredTokenFilter.openSearchVersion( + "word_delimiter_graph", + false, + false, + (input, version) -> new WordDelimiterGraphFilter( + input, + true, + WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE, + WordDelimiterGraphFilter.GENERATE_WORD_PARTS | WordDelimiterGraphFilter.GENERATE_NUMBER_PARTS + | WordDelimiterGraphFilter.SPLIT_ON_CASE_CHANGE | WordDelimiterGraphFilter.SPLIT_ON_NUMERICS + | WordDelimiterGraphFilter.STEM_ENGLISH_POSSESSIVE, + null + ) + ) + ); return filters; } @@ -606,12 +602,12 @@ public List getPreConfiguredTokenizers() { tokenizers.add(PreConfiguredTokenizer.singleton("letter", LetterTokenizer::new)); tokenizers.add(PreConfiguredTokenizer.singleton("whitespace", WhitespaceTokenizer::new)); tokenizers.add(PreConfiguredTokenizer.singleton("ngram", NGramTokenizer::new)); - tokenizers.add(PreConfiguredTokenizer.openSearchVersion("edge_ngram", (version) -> { - if (version.onOrAfter(LegacyESVersion.V_7_3_0)) { - return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); - } - return new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE); - })); + tokenizers.add( + PreConfiguredTokenizer.openSearchVersion( + "edge_ngram", + (version) -> new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE) + ) + ); tokenizers.add(PreConfiguredTokenizer.singleton("pattern", () -> new PatternTokenizer(Regex.compile("\\W+", null), -1))); tokenizers.add(PreConfiguredTokenizer.singleton("thai", ThaiTokenizer::new)); // TODO deprecate and remove in API @@ -620,7 +616,12 @@ public List getPreConfiguredTokenizers() { // Temporary shim for aliases. TODO deprecate after they are moved tokenizers.add(PreConfiguredTokenizer.openSearchVersion("nGram", (version) -> { - if (version.onOrAfter(LegacyESVersion.V_7_6_0)) { + if (version.onOrAfter(Version.V_3_0_0)) { + throw new IllegalArgumentException( + "The [nGram] tokenizer name was deprecated pre 1.0. " + + "Please use the tokenizer name to [ngram] for indices created in versions 3.0 or higher instead." + ); + } else { deprecationLogger.deprecate( "nGram_tokenizer_deprecation", "The [nGram] tokenizer name is deprecated and will be removed in a future version. " @@ -630,17 +631,19 @@ public List getPreConfiguredTokenizers() { return new NGramTokenizer(); })); tokenizers.add(PreConfiguredTokenizer.openSearchVersion("edgeNGram", (version) -> { - if (version.onOrAfter(LegacyESVersion.V_7_6_0)) { + if (version.onOrAfter(Version.V_3_0_0)) { + throw new IllegalArgumentException( + "The [edgeNGram] tokenizer name was deprecated pre 1.0. " + + "Please use the tokenizer name to [edge_ngram] for indices created in versions 3.0 or higher instead." + ); + } else { deprecationLogger.deprecate( "edgeNGram_tokenizer_deprecation", "The [edgeNGram] tokenizer name is deprecated and will be removed in a future version. " + "Please change the tokenizer name to [edge_ngram] instead." ); } - if (version.onOrAfter(LegacyESVersion.V_7_3_0)) { - return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); - } - return new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE); + return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); })); tokenizers.add(PreConfiguredTokenizer.singleton("PathHierarchy", PathHierarchyTokenizer::new)); diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/ConcatenateGraphTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/ConcatenateGraphTokenFilterFactory.java index 0d1a2b185d1d3..7c1c15ef74e30 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/ConcatenateGraphTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/ConcatenateGraphTokenFilterFactory.java @@ -11,7 +11,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.ConcatenateGraphFilter; import org.apache.lucene.util.automaton.TooComplexToDeterminizeException; -import org.opensearch.LegacyESVersion; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; @@ -24,11 +23,6 @@ * max_graph_expansions is 100 as the default value of 10_000 seems to be unnecessarily large and preserve_separator is false. * *
    - *
  • preserve_separator: - * For LegacyESVersion lesser than {@link LegacyESVersion#V_7_6_0} i.e. lucene versions lesser - * than {@link org.apache.lucene.util.Version#LUCENE_8_4_0} - * Whether {@link ConcatenateGraphFilter#SEP_LABEL} should separate the input tokens in the concatenated token. - *
  • *
  • token_separator: * Separator to use for concatenation. Must be a String with a single character or empty. * If not present, {@link ConcatenateGraphTokenFilterFactory#DEFAULT_TOKEN_SEPARATOR} will be used. @@ -59,17 +53,11 @@ public class ConcatenateGraphTokenFilterFactory extends AbstractTokenFilterFacto ConcatenateGraphTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); - if (indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_6_0)) { // i.e. Lucene 8.4.0 - String separator = settings.get("token_separator", DEFAULT_TOKEN_SEPARATOR); - if (separator.length() > 1) { - throw new IllegalArgumentException("token_separator must be either empty or a single character"); - } - tokenSeparator = separator.length() == 0 ? null : separator.charAt(0); // null means no separator while concatenating - } else { - boolean preserveSep = settings.getAsBoolean("preserve_separator", ConcatenateGraphFilter.DEFAULT_PRESERVE_SEP); - tokenSeparator = preserveSep ? ConcatenateGraphFilter.DEFAULT_TOKEN_SEPARATOR : null; + String separator = settings.get("token_separator", DEFAULT_TOKEN_SEPARATOR); + if (separator.length() > 1) { + throw new IllegalArgumentException("token_separator must be either empty or a single character"); } - + tokenSeparator = separator.length() == 0 ? null : separator.charAt(0); // null means no separator while concatenating maxGraphExpansions = settings.getAsInt("max_graph_expansions", DEFAULT_MAX_GRAPH_EXPANSIONS); preservePositionIncrements = settings.getAsBoolean("preserve_position_increments", DEFAULT_PRESERVE_POSITION_INCREMENTS); } diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/EnglishPluralStemFilter.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/EnglishPluralStemFilter.java new file mode 100644 index 0000000000000..c30318a31527b --- /dev/null +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/EnglishPluralStemFilter.java @@ -0,0 +1,182 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.analysis.common; + +import org.apache.lucene.analysis.TokenFilter; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.en.EnglishMinimalStemFilter; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.analysis.tokenattributes.KeywordAttribute; + +import java.io.IOException; + +public final class EnglishPluralStemFilter extends TokenFilter { + private final EnglishPluralStemmer stemmer = new EnglishPluralStemmer(); + private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); + private final KeywordAttribute keywordAttr = addAttribute(KeywordAttribute.class); + + public EnglishPluralStemFilter(TokenStream input) { + super(input); + } + + @Override + public boolean incrementToken() throws IOException { + if (input.incrementToken()) { + if (!keywordAttr.isKeyword()) { + final int newlen = stemmer.stem(termAtt.buffer(), termAtt.length()); + termAtt.setLength(newlen); + } + return true; + } else { + return false; + } + } + + /** + * Plural stemmer for English based on the {@link EnglishMinimalStemFilter} + *

    + * This stemmer removes plurals but beyond EnglishMinimalStemFilter adds + * four new suffix rules to remove dangling e characters: + *

      + *
    • xes - "boxes" becomes "box"
    • + *
    • sses - "dresses" becomes "dress"
    • + *
    • shes - "dishes" becomes "dish"
    • + *
    • tches - "watches" becomes "watch"
    • + *
    + * See https://github.com/elastic/elasticsearch/issues/42892 + *

    + * In addition the s stemmer logic is amended so that + *

      + *
    • ees->ee so that bees matches bee
    • + *
    • ies->y only on longer words to that ties matches tie
    • + *
    • oes->o rule so that tomatoes matches tomato but retains e for some words eg shoes to shoe
    • + *
    + */ + public static class EnglishPluralStemmer { + + // Words ending in oes that retain the e when stemmed + public static final char[][] oesExceptions = { "shoes".toCharArray(), "canoes".toCharArray(), "oboes".toCharArray() }; + // Words ending in ches that retain the e when stemmed + public static final char[][] chesExceptions = { + "cliches".toCharArray(), + "avalanches".toCharArray(), + "mustaches".toCharArray(), + "moustaches".toCharArray(), + "quiches".toCharArray(), + "headaches".toCharArray(), + "heartaches".toCharArray(), + "porsches".toCharArray(), + "tranches".toCharArray(), + "caches".toCharArray() }; + + @SuppressWarnings("fallthrough") + public int stem(char s[], int len) { + if (len < 3 || s[len - 1] != 's') return len; + + switch (s[len - 2]) { + case 'u': + case 's': + return len; + case 'e': + // Modified ies->y logic from original s-stemmer - only work on strings > 4 + // so spies -> spy still but pies->pie. + // The original code also special-cased aies and eies for no good reason as far as I can tell. + // ( no words of consequence - eg http://www.thefreedictionary.com/words-that-end-in-aies ) + if (len > 4 && s[len - 3] == 'i') { + s[len - 3] = 'y'; + return len - 2; + } + + // Suffix rules to remove any dangling "e" + if (len > 3) { + // xes (but >1 prefix so we can stem "boxes->box" but keep "axes->axe") + if (len > 4 && s[len - 3] == 'x') { + return len - 2; + } + // oes + if (len > 3 && s[len - 3] == 'o') { + if (isException(s, len, oesExceptions)) { + // Only remove the S + return len - 1; + } + // Remove the es + return len - 2; + } + if (len > 4) { + // shes/sses + if (s[len - 4] == 's' && (s[len - 3] == 'h' || s[len - 3] == 's')) { + return len - 2; + } + + // ches + if (len > 4) { + if (s[len - 4] == 'c' && s[len - 3] == 'h') { + if (isException(s, len, chesExceptions)) { + // Only remove the S + return len - 1; + } + // Remove the es + return len - 2; + + } + } + } + } + + default: + return len - 1; + } + } + + private boolean isException(char[] s, int len, char[][] exceptionsList) { + for (char[] oesRule : exceptionsList) { + int rulePos = oesRule.length - 1; + int sPos = len - 1; + boolean matched = true; + while (rulePos >= 0 && sPos >= 0) { + if (oesRule[rulePos] != s[sPos]) { + matched = false; + break; + } + rulePos--; + sPos--; + } + if (matched) { + return true; + } + } + return false; + } + } + +} diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/NGramTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/NGramTokenFilterFactory.java index 218bb74b84667..a6adf680a454c 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/NGramTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/NGramTokenFilterFactory.java @@ -34,7 +34,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.ngram.NGramTokenFilter; -import org.opensearch.LegacyESVersion; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; @@ -54,25 +53,15 @@ public class NGramTokenFilterFactory extends AbstractTokenFilterFactory { this.maxGram = settings.getAsInt("max_gram", 2); int ngramDiff = maxGram - minGram; if (ngramDiff > maxAllowedNgramDiff) { - if (indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "The difference between max_gram and min_gram in NGram Tokenizer must be less than or equal to: [" - + maxAllowedNgramDiff - + "] but was [" - + ngramDiff - + "]. This limit can be set by changing the [" - + IndexSettings.MAX_NGRAM_DIFF_SETTING.getKey() - + "] index level setting." - ); - } else { - deprecationLogger.deprecate( - "ngram_big_difference", - "Deprecated big difference between max_gram and min_gram in NGram Tokenizer," - + "expected difference must be less than or equal to: [" - + maxAllowedNgramDiff - + "]" - ); - } + throw new IllegalArgumentException( + "The difference between max_gram and min_gram in NGram Tokenizer must be less than or equal to: [" + + maxAllowedNgramDiff + + "] but was [" + + ngramDiff + + "]. This limit can be set by changing the [" + + IndexSettings.MAX_NGRAM_DIFF_SETTING.getKey() + + "] index level setting." + ); } preserveOriginal = settings.getAsBoolean(PRESERVE_ORIG_KEY, false); } diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/StemmerTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/StemmerTokenFilterFactory.java index 5d96f01265cf6..fc045447e159e 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/StemmerTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/StemmerTokenFilterFactory.java @@ -154,6 +154,8 @@ public TokenStream create(TokenStream tokenStream) { return new SnowballFilter(tokenStream, new EnglishStemmer()); } else if ("minimal_english".equalsIgnoreCase(language) || "minimalEnglish".equalsIgnoreCase(language)) { return new EnglishMinimalStemFilter(tokenStream); + } else if ("plural_english".equalsIgnoreCase(language) || "pluralEnglish".equalsIgnoreCase(language)) { + return new EnglishPluralStemFilter(tokenStream); } else if ("possessive_english".equalsIgnoreCase(language) || "possessiveEnglish".equalsIgnoreCase(language)) { return new EnglishPossessiveFilter(tokenStream); diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/StemmerTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/StemmerTokenFilterFactoryTests.java index 2cd7b74cd8c35..18d3727475065 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/StemmerTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/StemmerTokenFilterFactoryTests.java @@ -111,6 +111,83 @@ public void testPorter2FilterFactory() throws IOException { } } + public void testEnglishPluralFilter() throws IOException { + int iters = scaledRandomIntBetween(20, 100); + for (int i = 0; i < iters; i++) { + + Version v = VersionUtils.randomVersion(random()); + Settings settings = Settings.builder() + .put("index.analysis.filter.my_plurals.type", "stemmer") + .put("index.analysis.filter.my_plurals.language", "plural_english") + .put("index.analysis.analyzer.my_plurals.tokenizer", "whitespace") + .put("index.analysis.analyzer.my_plurals.filter", "my_plurals") + .put(SETTING_VERSION_CREATED, v) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + + OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, PLUGIN); + TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_plurals"); + assertThat(tokenFilter, instanceOf(StemmerTokenFilterFactory.class)); + Tokenizer tokenizer = new WhitespaceTokenizer(); + tokenizer.setReader(new StringReader("dresses")); + TokenStream create = tokenFilter.create(tokenizer); + IndexAnalyzers indexAnalyzers = analysis.indexAnalyzers; + NamedAnalyzer analyzer = indexAnalyzers.get("my_plurals"); + assertThat(create, instanceOf(EnglishPluralStemFilter.class)); + + // Check old EnglishMinimalStemmer ("S" stemmer) logic + assertAnalyzesTo(analyzer, "phones", new String[] { "phone" }); + assertAnalyzesTo(analyzer, "horses", new String[] { "horse" }); + assertAnalyzesTo(analyzer, "cameras", new String[] { "camera" }); + + // The orginal s stemmer gives up on stemming oes words because English has no fixed rule for the stem + // (see https://howtospell.co.uk/making-O-words-plural ) + // This stemmer removes the es but retains e for a small number of exceptions + assertAnalyzesTo(analyzer, "mosquitoes", new String[] { "mosquito" }); + assertAnalyzesTo(analyzer, "heroes", new String[] { "hero" }); + // oes exceptions that retain the e. + assertAnalyzesTo(analyzer, "shoes", new String[] { "shoe" }); + assertAnalyzesTo(analyzer, "horseshoes", new String[] { "horseshoe" }); + assertAnalyzesTo(analyzer, "canoes", new String[] { "canoe" }); + assertAnalyzesTo(analyzer, "oboes", new String[] { "oboe" }); + + // Check improved EnglishPluralStemFilter logic + // sses + assertAnalyzesTo(analyzer, "dresses", new String[] { "dress" }); + assertAnalyzesTo(analyzer, "possess", new String[] { "possess" }); + assertAnalyzesTo(analyzer, "possesses", new String[] { "possess" }); + // xes + assertAnalyzesTo(analyzer, "boxes", new String[] { "box" }); + assertAnalyzesTo(analyzer, "axes", new String[] { "axe" }); + // shes + assertAnalyzesTo(analyzer, "dishes", new String[] { "dish" }); + assertAnalyzesTo(analyzer, "washes", new String[] { "wash" }); + // ees + assertAnalyzesTo(analyzer, "employees", new String[] { "employee" }); + assertAnalyzesTo(analyzer, "bees", new String[] { "bee" }); + // tch + assertAnalyzesTo(analyzer, "watches", new String[] { "watch" }); + assertAnalyzesTo(analyzer, "itches", new String[] { "itch" }); + // ies->y but only for length >4 + assertAnalyzesTo(analyzer, "spies", new String[] { "spy" }); + assertAnalyzesTo(analyzer, "ties", new String[] { "tie" }); + assertAnalyzesTo(analyzer, "lies", new String[] { "lie" }); + assertAnalyzesTo(analyzer, "pies", new String[] { "pie" }); + assertAnalyzesTo(analyzer, "dies", new String[] { "die" }); + + assertAnalyzesTo(analyzer, "lunches", new String[] { "lunch" }); + assertAnalyzesTo(analyzer, "avalanches", new String[] { "avalanche" }); + assertAnalyzesTo(analyzer, "headaches", new String[] { "headache" }); + assertAnalyzesTo(analyzer, "caches", new String[] { "cache" }); + assertAnalyzesTo(analyzer, "beaches", new String[] { "beach" }); + assertAnalyzesTo(analyzer, "britches", new String[] { "britch" }); + assertAnalyzesTo(analyzer, "cockroaches", new String[] { "cockroach" }); + assertAnalyzesTo(analyzer, "cliches", new String[] { "cliche" }); + assertAnalyzesTo(analyzer, "quiches", new String[] { "quiche" }); + + } + } + public void testMultipleLanguagesThrowsException() throws IOException { Version v = VersionUtils.randomVersion(random()); Settings settings = Settings.builder() diff --git a/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java b/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java index 77abba7f54677..8ca1d2a0c214f 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java +++ b/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java @@ -35,9 +35,8 @@ import org.opensearch.geo.search.aggregations.bucket.composite.GeoTileGridValuesSourceBuilder; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregator; -import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoHashGrid; -import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoTileGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGrid; import org.opensearch.geo.search.aggregations.metrics.GeoBounds; import org.opensearch.geo.search.aggregations.metrics.GeoBoundsAggregationBuilder; import org.opensearch.geo.search.aggregations.metrics.GeoBoundsGeoShapeAggregator; @@ -78,18 +77,18 @@ public List getAggregations() { GeoHashGridAggregationBuilder.NAME, GeoHashGridAggregationBuilder::new, GeoHashGridAggregationBuilder.PARSER - ).addResultReader(InternalGeoHashGrid::new).setAggregatorRegistrar(GeoHashGridAggregationBuilder::registerAggregators); + ).addResultReader(GeoHashGrid::new).setAggregatorRegistrar(GeoHashGridAggregationBuilder::registerAggregators); final AggregationSpec geoTileGrid = new AggregationSpec( GeoTileGridAggregationBuilder.NAME, GeoTileGridAggregationBuilder::new, GeoTileGridAggregationBuilder.PARSER - ).addResultReader(InternalGeoTileGrid::new).setAggregatorRegistrar(GeoTileGridAggregationBuilder::registerAggregators); + ).addResultReader(GeoTileGrid::new).setAggregatorRegistrar(GeoTileGridAggregationBuilder::registerAggregators); return List.of(geoBounds, geoHashGrid, geoTileGrid); } /** - * Registering the {@link GeoTileGridAggregator} in the {@link CompositeAggregation}. + * Registering the geotile grid in the {@link CompositeAggregation}. * * @return a {@link List} of {@link CompositeAggregationSpec} */ diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java index 84d5943da287f..9e671118637b9 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java @@ -33,7 +33,6 @@ package org.opensearch.geo.search.aggregations.bucket.composite; import org.apache.lucene.index.IndexReader; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.common.geo.GeoPoint; @@ -175,9 +174,7 @@ public static void register(ValuesSourceRegistry.Builder builder) { public GeoTileGridValuesSourceBuilder(StreamInput in) throws IOException { super(in); this.precision = in.readInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_6_0)) { - this.geoBoundingBox = new GeoBoundingBox(in); - } + this.geoBoundingBox = new GeoBoundingBox(in); } public GeoTileGridValuesSourceBuilder precision(int precision) { @@ -198,9 +195,7 @@ public GeoTileGridValuesSourceBuilder format(String format) { @Override protected void innerWriteTo(StreamOutput out) throws IOException { out.writeInt(precision); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_6_0)) { - geoBoundingBox.writeTo(out); - } + geoBoundingBox.writeTo(out); } @Override diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGrid.java similarity index 72% rename from modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGrid.java index 9dbed7b27307a..b58c19a7186e6 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGrid.java @@ -54,30 +54,30 @@ * All geo-grid hash-encoding in a grid are of the same precision and held internally as a single long * for efficiency's sake. * - * @opensearch.internal + * @opensearch.api */ -public abstract class InternalGeoGrid extends InternalMultiBucketAggregation< - InternalGeoGrid, - InternalGeoGridBucket> implements GeoGrid { +public abstract class BaseGeoGrid extends InternalMultiBucketAggregation + implements + GeoGrid { protected final int requiredSize; - protected final List buckets; + protected final List buckets; - InternalGeoGrid(String name, int requiredSize, List buckets, Map metadata) { + protected BaseGeoGrid(String name, int requiredSize, List buckets, Map metadata) { super(name, metadata); this.requiredSize = requiredSize; this.buckets = buckets; } - abstract Writeable.Reader getBucketReader(); + protected abstract Writeable.Reader getBucketReader(); /** * Read from a stream. */ - public InternalGeoGrid(StreamInput in) throws IOException { + public BaseGeoGrid(StreamInput in) throws IOException { super(in); requiredSize = readSize(in); - buckets = (List) in.readList(getBucketReader()); + buckets = (List) in.readList(getBucketReader()); } @Override @@ -86,24 +86,24 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeList(buckets); } - abstract InternalGeoGrid create(String name, int requiredSize, List buckets, Map metadata); + protected abstract BaseGeoGrid create(String name, int requiredSize, List buckets, Map metadata); @Override - public List getBuckets() { + public List getBuckets() { return unmodifiableList(buckets); } @Override - public InternalGeoGrid reduce(List aggregations, ReduceContext reduceContext) { - LongObjectPagedHashMap> buckets = null; + public BaseGeoGrid reduce(List aggregations, ReduceContext reduceContext) { + LongObjectPagedHashMap> buckets = null; for (InternalAggregation aggregation : aggregations) { - InternalGeoGrid grid = (InternalGeoGrid) aggregation; + BaseGeoGrid grid = (BaseGeoGrid) aggregation; if (buckets == null) { buckets = new LongObjectPagedHashMap<>(grid.buckets.size(), reduceContext.bigArrays()); } for (Object obj : grid.buckets) { - InternalGeoGridBucket bucket = (InternalGeoGridBucket) obj; - List existingBuckets = buckets.get(bucket.hashAsLong()); + BaseGeoGridBucket bucket = (BaseGeoGridBucket) obj; + List existingBuckets = buckets.get(bucket.hashAsLong()); if (existingBuckets == null) { existingBuckets = new ArrayList<>(aggregations.size()); buckets.put(bucket.hashAsLong(), existingBuckets); @@ -113,13 +113,13 @@ public InternalGeoGrid reduce(List aggregations, ReduceCont } final int size = Math.toIntExact(reduceContext.isFinalReduce() == false ? buckets.size() : Math.min(requiredSize, buckets.size())); - BucketPriorityQueue ordered = new BucketPriorityQueue<>(size); - for (LongObjectPagedHashMap.Cursor> cursor : buckets) { - List sameCellBuckets = cursor.value; + BucketPriorityQueue ordered = new BucketPriorityQueue<>(size); + for (LongObjectPagedHashMap.Cursor> cursor : buckets) { + List sameCellBuckets = cursor.value; ordered.insertWithOverflow(reduceBucket(sameCellBuckets, reduceContext)); } buckets.close(); - InternalGeoGridBucket[] list = new InternalGeoGridBucket[ordered.size()]; + BaseGeoGridBucket[] list = new BaseGeoGridBucket[ordered.size()]; for (int i = ordered.size() - 1; i >= 0; i--) { list[i] = ordered.pop(); } @@ -128,11 +128,11 @@ public InternalGeoGrid reduce(List aggregations, ReduceCont } @Override - protected InternalGeoGridBucket reduceBucket(List buckets, ReduceContext context) { + protected BaseGeoGridBucket reduceBucket(List buckets, ReduceContext context) { assert buckets.size() > 0; List aggregationsList = new ArrayList<>(buckets.size()); long docCount = 0; - for (InternalGeoGridBucket bucket : buckets) { + for (BaseGeoGridBucket bucket : buckets) { docCount += bucket.docCount; aggregationsList.add(bucket.aggregations); } @@ -140,12 +140,12 @@ protected InternalGeoGridBucket reduceBucket(List buckets return createBucket(buckets.get(0).hashAsLong, docCount, aggs); } - abstract B createBucket(long hashAsLong, long docCount, InternalAggregations aggregations); + protected abstract B createBucket(long hashAsLong, long docCount, InternalAggregations aggregations); @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { builder.startArray(CommonFields.BUCKETS.getPreferredName()); - for (InternalGeoGridBucket bucket : buckets) { + for (BaseGeoGridBucket bucket : buckets) { bucket.toXContent(builder, params); } builder.endArray(); @@ -168,7 +168,7 @@ public boolean equals(Object obj) { if (obj == null || getClass() != obj.getClass()) return false; if (super.equals(obj) == false) return false; - InternalGeoGrid other = (InternalGeoGrid) obj; + BaseGeoGrid other = (BaseGeoGrid) obj; return Objects.equals(requiredSize, other.requiredSize) && Objects.equals(buckets, other.buckets); } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGridBucket.java similarity index 87% rename from modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGridBucket.java index 93fcdbd098400..f362d2b3d33d6 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGridBucket.java @@ -45,12 +45,12 @@ /** * Base implementation of geogrid aggs * - * @opensearch.internal + * @opensearch.api */ -public abstract class InternalGeoGridBucket extends InternalMultiBucketAggregation.InternalBucket +public abstract class BaseGeoGridBucket extends InternalMultiBucketAggregation.InternalBucket implements GeoGrid.Bucket, - Comparable { + Comparable { protected long hashAsLong; protected long docCount; @@ -58,7 +58,7 @@ public abstract class InternalGeoGridBucket ext long bucketOrd; - public InternalGeoGridBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { + public BaseGeoGridBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { this.docCount = docCount; this.aggregations = aggregations; this.hashAsLong = hashAsLong; @@ -67,7 +67,7 @@ public InternalGeoGridBucket(long hashAsLong, long docCount, InternalAggregation /** * Read from a stream. */ - public InternalGeoGridBucket(StreamInput in) throws IOException { + public BaseGeoGridBucket(StreamInput in) throws IOException { hashAsLong = in.readLong(); docCount = in.readVLong(); aggregations = InternalAggregations.readFrom(in); @@ -80,7 +80,7 @@ public void writeTo(StreamOutput out) throws IOException { aggregations.writeTo(out); } - long hashAsLong() { + public long hashAsLong() { return hashAsLong; } @@ -95,7 +95,7 @@ public Aggregations getAggregations() { } @Override - public int compareTo(InternalGeoGridBucket other) { + public int compareTo(BaseGeoGridBucket other) { if (this.hashAsLong > other.hashAsLong) { return 1; } @@ -119,7 +119,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - InternalGeoGridBucket bucket = (InternalGeoGridBucket) o; + BaseGeoGridBucket bucket = (BaseGeoGridBucket) o; return hashAsLong == bucket.hashAsLong && docCount == bucket.docCount && Objects.equals(aggregations, bucket.aggregations); } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BucketPriorityQueue.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BucketPriorityQueue.java index 70d0552b3e80b..83fcdf4f66424 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BucketPriorityQueue.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BucketPriorityQueue.java @@ -38,14 +38,14 @@ * * @opensearch.internal */ -class BucketPriorityQueue extends PriorityQueue { +class BucketPriorityQueue extends PriorityQueue { BucketPriorityQueue(int size) { super(size); } @Override - protected boolean lessThan(InternalGeoGridBucket o1, InternalGeoGridBucket o2) { + protected boolean lessThan(BaseGeoGridBucket o1, BaseGeoGridBucket o2) { int cmp = Long.compare(o2.getDocCount(), o1.getDocCount()); if (cmp == 0) { cmp = o2.compareTo(o1); diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellIdSource.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellIdSource.java index d40029e9a762d..89ce288770185 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellIdSource.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellIdSource.java @@ -43,7 +43,7 @@ * Wrapper class to help convert {@link MultiGeoPointValues} * to numeric long values for bucketing. * - * @opensearch.internal + * @opensearch.api */ public class CellIdSource extends ValuesSource.Numeric { private final ValuesSource.GeoPoint valuesSource; diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGrid.java index 4ae888640efc8..b2fe6e33ef95c 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGrid.java @@ -39,13 +39,13 @@ * A geo-grid aggregation. Defines multiple buckets, each representing a cell in a geo-grid of a specific * precision. * - * @opensearch.internal + * @opensearch.api */ public interface GeoGrid extends MultiBucketsAggregation { /** * A bucket that is associated with a geo-grid cell. The key of the bucket is - * the {@link InternalGeoGridBucket#getKeyAsString()} of the cell + * the {@link BaseGeoGridBucket#getKeyAsString()} of the cell */ interface Bucket extends MultiBucketsAggregation.Bucket {} diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java index 4a904b3aa2b16..abc892396fbf7 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java @@ -32,7 +32,6 @@ package org.opensearch.geo.search.aggregations.bucket.geogrid; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.common.ParseField; import org.opensearch.common.geo.GeoBoundingBox; @@ -58,9 +57,9 @@ import java.util.function.Function; /** - * Base Aggregation Builder for geohash_grid and geotile_grid aggs + * Base Aggregation Builder for geogrid aggs * - * @opensearch.internal + * @opensearch.api */ public abstract class GeoGridAggregationBuilder extends ValuesSourceAggregationBuilder { /* recognized field names in JSON */ @@ -125,9 +124,7 @@ public GeoGridAggregationBuilder(StreamInput in) throws IOException { precision = in.readVInt(); requiredSize = in.readVInt(); shardSize = in.readVInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_6_0)) { - geoBoundingBox = new GeoBoundingBox(in); - } + geoBoundingBox = new GeoBoundingBox(in); } @Override @@ -140,9 +137,7 @@ protected void innerWriteTo(StreamOutput out) throws IOException { out.writeVInt(precision); out.writeVInt(requiredSize); out.writeVInt(shardSize); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_6_0)) { - geoBoundingBox.writeTo(out); - } + geoBoundingBox.writeTo(out); } /** diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregator.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregator.java index 909772c61a960..db07ac8f947e5 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregator.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregator.java @@ -55,16 +55,16 @@ /** * Aggregates data expressed as longs (for efficiency's sake) but formats results as aggregation-specific strings. * - * @opensearch.internal + * @opensearch.api */ -public abstract class GeoGridAggregator extends BucketsAggregator { +public abstract class GeoGridAggregator extends BucketsAggregator { protected final int requiredSize; protected final int shardSize; protected final ValuesSource.Numeric valuesSource; protected final LongKeyedBucketOrds bucketOrds; - GeoGridAggregator( + protected GeoGridAggregator( String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, @@ -118,23 +118,23 @@ public void collect(int doc, long owningBucketOrd) throws IOException { }; } - abstract T buildAggregation(String name, int requiredSize, List buckets, Map metadata); + protected abstract T buildAggregation(String name, int requiredSize, List buckets, Map metadata); /** * This method is used to return a re-usable instance of the bucket when building * the aggregation. - * @return a new {@link InternalGeoGridBucket} implementation with empty parameters + * @return a new {@link BaseGeoGridBucket} implementation with empty parameters */ - abstract InternalGeoGridBucket newEmptyBucket(); + protected abstract BaseGeoGridBucket newEmptyBucket(); @Override public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { - InternalGeoGridBucket[][] topBucketsPerOrd = new InternalGeoGridBucket[owningBucketOrds.length][]; + BaseGeoGridBucket[][] topBucketsPerOrd = new BaseGeoGridBucket[owningBucketOrds.length][]; for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { int size = (int) Math.min(bucketOrds.bucketsInOrd(owningBucketOrds[ordIdx]), shardSize); - BucketPriorityQueue ordered = new BucketPriorityQueue<>(size); - InternalGeoGridBucket spare = null; + BucketPriorityQueue ordered = new BucketPriorityQueue<>(size); + BaseGeoGridBucket spare = null; LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); while (ordsEnum.next()) { if (spare == null) { @@ -149,7 +149,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I spare = ordered.insertWithOverflow(spare); } - topBucketsPerOrd[ordIdx] = new InternalGeoGridBucket[ordered.size()]; + topBucketsPerOrd[ordIdx] = new BaseGeoGridBucket[ordered.size()]; for (int i = ordered.size() - 1; i >= 0; --i) { topBucketsPerOrd[ordIdx][i] = ordered.pop(); } @@ -163,7 +163,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I } @Override - public InternalGeoGrid buildEmptyAggregation() { + public BaseGeoGrid buildEmptyAggregation() { return buildAggregation(name, requiredSize, Collections.emptyList(), metadata()); } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGrid.java similarity index 70% rename from modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGrid.java index ff1247300939a..aa1d5504ad24f 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGrid.java @@ -43,40 +43,40 @@ * All geohashes in a grid are of the same precision and held internally as a single long * for efficiency's sake. * - * @opensearch.internal + * @opensearch.api */ -public class InternalGeoHashGrid extends InternalGeoGrid { +public class GeoHashGrid extends BaseGeoGrid { - InternalGeoHashGrid(String name, int requiredSize, List buckets, Map metadata) { + GeoHashGrid(String name, int requiredSize, List buckets, Map metadata) { super(name, requiredSize, buckets, metadata); } - public InternalGeoHashGrid(StreamInput in) throws IOException { + public GeoHashGrid(StreamInput in) throws IOException { super(in); } @Override - public InternalGeoGrid create(List buckets) { - return new InternalGeoHashGrid(name, requiredSize, buckets, metadata); + public BaseGeoGrid create(List buckets) { + return new GeoHashGrid(name, requiredSize, buckets, metadata); } @Override - public InternalGeoGridBucket createBucket(InternalAggregations aggregations, InternalGeoGridBucket prototype) { + public BaseGeoGridBucket createBucket(InternalAggregations aggregations, BaseGeoGridBucket prototype) { return new InternalGeoHashGridBucket(prototype.hashAsLong, prototype.docCount, aggregations); } @Override - InternalGeoGrid create(String name, int requiredSize, List buckets, Map metadata) { - return new InternalGeoHashGrid(name, requiredSize, buckets, metadata); + protected BaseGeoGrid create(String name, int requiredSize, List buckets, Map metadata) { + return new GeoHashGrid(name, requiredSize, buckets, metadata); } @Override - InternalGeoHashGridBucket createBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { + protected InternalGeoHashGridBucket createBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { return new InternalGeoHashGridBucket(hashAsLong, docCount, aggregations); } @Override - Reader getBucketReader() { + protected Reader getBucketReader() { return InternalGeoHashGridBucket::new; } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java index bbaf9613fb216..760d7d643c0a5 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java @@ -51,7 +51,7 @@ /** * Aggregation Builder for geohash_grid * - * @opensearch.internal + * @opensearch.api */ public class GeoHashGridAggregationBuilder extends GeoGridAggregationBuilder { public static final String NAME = "geohash_grid"; diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java index 6ca7a4d8a9cb8..9ff9fe7d8f9ba 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java @@ -47,9 +47,9 @@ * * @opensearch.internal */ -public class GeoHashGridAggregator extends GeoGridAggregator { +class GeoHashGridAggregator extends GeoGridAggregator { - public GeoHashGridAggregator( + GeoHashGridAggregator( String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, @@ -64,16 +64,17 @@ public GeoHashGridAggregator( } @Override - InternalGeoHashGrid buildAggregation(String name, int requiredSize, List buckets, Map metadata) { - return new InternalGeoHashGrid(name, requiredSize, buckets, metadata); + protected GeoHashGrid buildAggregation(String name, int requiredSize, List buckets, Map metadata) { + return new GeoHashGrid(name, requiredSize, buckets, metadata); } @Override - public InternalGeoHashGrid buildEmptyAggregation() { - return new InternalGeoHashGrid(name, requiredSize, Collections.emptyList(), metadata()); + public GeoHashGrid buildEmptyAggregation() { + return new GeoHashGrid(name, requiredSize, Collections.emptyList(), metadata()); } - InternalGeoGridBucket newEmptyBucket() { + @Override + protected BaseGeoGridBucket newEmptyBucket() { return new InternalGeoHashGridBucket(0, 0, null); } } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java index 1914c07e831f7..898a7d82a4dec 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java @@ -58,7 +58,7 @@ * * @opensearch.internal */ -public class GeoHashGridAggregatorFactory extends ValuesSourceAggregatorFactory { +class GeoHashGridAggregatorFactory extends ValuesSourceAggregatorFactory { private final int precision; private final int requiredSize; @@ -86,7 +86,7 @@ public class GeoHashGridAggregatorFactory extends ValuesSourceAggregatorFactory @Override protected Aggregator createUnmapped(SearchContext searchContext, Aggregator parent, Map metadata) throws IOException { - final InternalAggregation aggregation = new InternalGeoHashGrid(name, requiredSize, emptyList(), metadata); + final InternalAggregation aggregation = new GeoHashGrid(name, requiredSize, emptyList(), metadata); return new NonCollectingAggregator(name, searchContext, parent, factories, metadata) { @Override public InternalAggregation buildEmptyAggregation() { diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGrid.java similarity index 70% rename from modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGrid.java index fa544b5893f0c..91c523c80855e 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGrid.java @@ -43,40 +43,40 @@ * All geohashes in a grid are of the same precision and held internally as a single long * for efficiency's sake. * - * @opensearch.internal + * @opensearch.api */ -public class InternalGeoTileGrid extends InternalGeoGrid { +public class GeoTileGrid extends BaseGeoGrid { - InternalGeoTileGrid(String name, int requiredSize, List buckets, Map metadata) { + GeoTileGrid(String name, int requiredSize, List buckets, Map metadata) { super(name, requiredSize, buckets, metadata); } - public InternalGeoTileGrid(StreamInput in) throws IOException { + public GeoTileGrid(StreamInput in) throws IOException { super(in); } @Override - public InternalGeoGrid create(List buckets) { - return new InternalGeoTileGrid(name, requiredSize, buckets, metadata); + public BaseGeoGrid create(List buckets) { + return new GeoTileGrid(name, requiredSize, buckets, metadata); } @Override - public InternalGeoGridBucket createBucket(InternalAggregations aggregations, InternalGeoGridBucket prototype) { + public BaseGeoGridBucket createBucket(InternalAggregations aggregations, BaseGeoGridBucket prototype) { return new InternalGeoTileGridBucket(prototype.hashAsLong, prototype.docCount, aggregations); } @Override - InternalGeoGrid create(String name, int requiredSize, List buckets, Map metadata) { - return new InternalGeoTileGrid(name, requiredSize, buckets, metadata); + protected BaseGeoGrid create(String name, int requiredSize, List buckets, Map metadata) { + return new GeoTileGrid(name, requiredSize, buckets, metadata); } @Override - InternalGeoTileGridBucket createBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { + protected InternalGeoTileGridBucket createBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { return new InternalGeoTileGridBucket(hashAsLong, docCount, aggregations); } @Override - Reader getBucketReader() { + protected Reader getBucketReader() { return InternalGeoTileGridBucket::new; } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java index 76ad515f34fe5..0f1f87bdc57fa 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java @@ -51,7 +51,7 @@ /** * Aggregation Builder for geotile_grid agg * - * @opensearch.internal + * @opensearch.api */ public class GeoTileGridAggregationBuilder extends GeoGridAggregationBuilder { public static final String NAME = "geotile_grid"; diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java index a205a9afde41e..8faed4e9cd2d4 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java @@ -48,9 +48,9 @@ * * @opensearch.internal */ -public class GeoTileGridAggregator extends GeoGridAggregator { +class GeoTileGridAggregator extends GeoGridAggregator { - public GeoTileGridAggregator( + GeoTileGridAggregator( String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, @@ -65,16 +65,17 @@ public GeoTileGridAggregator( } @Override - InternalGeoTileGrid buildAggregation(String name, int requiredSize, List buckets, Map metadata) { - return new InternalGeoTileGrid(name, requiredSize, buckets, metadata); + protected GeoTileGrid buildAggregation(String name, int requiredSize, List buckets, Map metadata) { + return new GeoTileGrid(name, requiredSize, buckets, metadata); } @Override - public InternalGeoTileGrid buildEmptyAggregation() { - return new InternalGeoTileGrid(name, requiredSize, Collections.emptyList(), metadata()); + public GeoTileGrid buildEmptyAggregation() { + return new GeoTileGrid(name, requiredSize, Collections.emptyList(), metadata()); } - InternalGeoGridBucket newEmptyBucket() { + @Override + protected BaseGeoGridBucket newEmptyBucket() { return new InternalGeoTileGridBucket(0, 0, null); } } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java index b830988a3d410..6eb73727ad6c8 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java @@ -57,7 +57,7 @@ * * @opensearch.internal */ -public class GeoTileGridAggregatorFactory extends ValuesSourceAggregatorFactory { +class GeoTileGridAggregatorFactory extends ValuesSourceAggregatorFactory { private final int precision; private final int requiredSize; @@ -85,7 +85,7 @@ public class GeoTileGridAggregatorFactory extends ValuesSourceAggregatorFactory @Override protected Aggregator createUnmapped(SearchContext searchContext, Aggregator parent, Map metadata) throws IOException { - final InternalAggregation aggregation = new InternalGeoTileGrid(name, requiredSize, Collections.emptyList(), metadata); + final InternalAggregation aggregation = new GeoTileGrid(name, requiredSize, Collections.emptyList(), metadata); return new NonCollectingAggregator(name, searchContext, parent, factories, metadata) { @Override public InternalAggregation buildEmptyAggregation() { diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java index 659909e868651..6e7ed8a679681 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java @@ -43,7 +43,7 @@ * * @opensearch.internal */ -public class InternalGeoHashGridBucket extends InternalGeoGridBucket { +class InternalGeoHashGridBucket extends BaseGeoGridBucket { InternalGeoHashGridBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { super(hashAsLong, docCount, aggregations); } @@ -51,7 +51,7 @@ public class InternalGeoHashGridBucket extends InternalGeoGridBucket { +class InternalGeoTileGridBucket extends BaseGeoGridBucket { InternalGeoTileGridBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { super(hashAsLong, docCount, aggregations); } @@ -52,7 +52,7 @@ public class InternalGeoTileGridBucket extends InternalGeoGridBucket implements GeoGrid { @@ -63,7 +63,7 @@ public static ObjectParser createParser( return parser; } - protected void setName(String name) { + public void setName(String name) { super.setName(name); } } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java index 80124cda50b19..cbe3a2ee89dd7 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java @@ -40,7 +40,7 @@ /** * A single geo grid bucket result parsed between nodes * - * @opensearch.internal + * @opensearch.api */ public abstract class ParsedGeoGridBucket extends ParsedMultiBucketAggregation.ParsedBucket implements GeoGrid.Bucket { diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java index 109524e755c4d..343149f8e19ab 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java @@ -42,7 +42,7 @@ * * @opensearch.internal */ -public class ParsedGeoHashGrid extends ParsedGeoGrid { +class ParsedGeoHashGrid extends ParsedGeoGrid { private static final ObjectParser PARSER = createParser( ParsedGeoHashGrid::new, diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java index 4e6e454b08324..6704273f45580 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java @@ -41,7 +41,7 @@ * * @opensearch.internal */ -public class ParsedGeoHashGridBucket extends ParsedGeoGridBucket { +class ParsedGeoHashGridBucket extends ParsedGeoGridBucket { @Override public GeoPoint getKey() { diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java index 8734c96a15578..cb64a0e153e87 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java @@ -42,7 +42,7 @@ * * @opensearch.internal */ -public class ParsedGeoTileGrid extends ParsedGeoGrid { +class ParsedGeoTileGrid extends ParsedGeoGrid { private static final ObjectParser PARSER = createParser( ParsedGeoTileGrid::new, diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java index 3c7c292f9d193..bc7fde8d66d0a 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java @@ -17,7 +17,6 @@ import org.opensearch.common.geo.GeoPoint; import org.opensearch.geo.GeoModulePlugin; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregator; import org.opensearch.index.mapper.GeoPointFieldMapper; import org.opensearch.plugins.SearchPlugin; import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; @@ -31,7 +30,7 @@ import java.util.Map; /** - * Testing the {@link GeoTileGridAggregator} as part of CompositeAggregation. + * Testing the geo tile grid as part of CompositeAggregation. */ public class GeoTileGridAggregationCompositeAggregatorTests extends BaseCompositeAggregatorTestCase { diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java index d6153637f656d..5ec10a7f4f7cf 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java @@ -73,7 +73,7 @@ import static org.hamcrest.Matchers.equalTo; -public abstract class GeoGridAggregatorTestCase extends AggregatorTestCase { +public abstract class GeoGridAggregatorTestCase extends AggregatorTestCase { private static final String FIELD_NAME = "location"; protected static final double GEOHASH_TOLERANCE = 1E-5D; @@ -201,9 +201,9 @@ public void testAsSubAgg() throws IOException { Consumer verify = (terms) -> { Map> actual = new TreeMap<>(); for (StringTerms.Bucket tb : terms.getBuckets()) { - InternalGeoGrid gg = tb.getAggregations().get("gg"); + BaseGeoGrid gg = tb.getAggregations().get("gg"); Map sub = new TreeMap<>(); - for (InternalGeoGridBucket ggb : gg.getBuckets()) { + for (BaseGeoGridBucket ggb : gg.getBuckets()) { sub.put(ggb.getKeyAsString(), ggb.getDocCount()); } actual.put(tb.getKeyAsString(), sub); @@ -299,7 +299,7 @@ private void testCase( String field, int precision, GeoBoundingBox geoBoundingBox, - Consumer> verify, + Consumer> verify, CheckedConsumer buildIndex ) throws IOException { testCase(query, precision, geoBoundingBox, verify, buildIndex, createBuilder("_name").field(field)); @@ -309,7 +309,7 @@ private void testCase( Query query, int precision, GeoBoundingBox geoBoundingBox, - Consumer> verify, + Consumer> verify, CheckedConsumer buildIndex, GeoGridAggregationBuilder aggregationBuilder ) throws IOException { @@ -333,7 +333,7 @@ private void testCase( aggregator.preCollection(); indexSearcher.search(query, aggregator); aggregator.postCollection(); - verify.accept((InternalGeoGrid) aggregator.buildTopLevel()); + verify.accept((BaseGeoGrid) aggregator.buildTopLevel()); indexReader.close(); directory.close(); diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridTestCase.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridTestCase.java index 432736a2b43fe..2a655239997b6 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridTestCase.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridTestCase.java @@ -50,16 +50,16 @@ import static org.hamcrest.Matchers.equalTo; -public abstract class GeoGridTestCase> extends - InternalMultiBucketAggregationTestCase { +public abstract class GeoGridTestCase> extends InternalMultiBucketAggregationTestCase< + T> { /** - * Instantiate a {@link InternalGeoGrid}-derived class using the same parameters as constructor. + * Instantiate a {@link BaseGeoGrid}-derived class using the same parameters as constructor. */ - protected abstract T createInternalGeoGrid(String name, int size, List buckets, Map metadata); + protected abstract T createInternalGeoGrid(String name, int size, List buckets, Map metadata); /** - * Instantiate a {@link InternalGeoGridBucket}-derived class using the same parameters as constructor. + * Instantiate a {@link BaseGeoGridBucket}-derived class using the same parameters as constructor. */ protected abstract B createInternalGeoGridBucket(Long key, long docCount, InternalAggregations aggregations); @@ -117,7 +117,7 @@ protected List getNamedXContents() { protected T createTestInstance(String name, Map metadata, InternalAggregations aggregations) { final int precision = randomPrecision(); int size = randomNumberOfBuckets(); - List buckets = new ArrayList<>(size); + List buckets = new ArrayList<>(size); for (int i = 0; i < size; i++) { double latitude = randomDoubleBetween(-90.0, 90.0, false); double longitude = randomDoubleBetween(-180.0, 180.0, false); @@ -176,7 +176,7 @@ protected Class implementationClass() { protected T mutateInstance(T instance) { String name = instance.getName(); int size = instance.getRequiredSize(); - List buckets = instance.getBuckets(); + List buckets = instance.getBuckets(); Map metadata = instance.getMetadata(); switch (between(0, 3)) { case 0: @@ -206,7 +206,7 @@ protected T mutateInstance(T instance) { } public void testCreateFromBuckets() { - InternalGeoGrid original = createTestInstance(); + BaseGeoGrid original = createTestInstance(); assertThat(original, equalTo(original.create(original.buckets))); } } diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridTests.java index c84c6ef5ec076..ada943b6dd369 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridTests.java @@ -37,16 +37,11 @@ import java.util.List; import java.util.Map; -public class GeoHashGridTests extends GeoGridTestCase { +public class GeoHashGridTests extends GeoGridTestCase { @Override - protected InternalGeoHashGrid createInternalGeoGrid( - String name, - int size, - List buckets, - Map metadata - ) { - return new InternalGeoHashGrid(name, size, buckets, metadata); + protected GeoHashGrid createInternalGeoGrid(String name, int size, List buckets, Map metadata) { + return new GeoHashGrid(name, size, buckets, metadata); } @Override diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridTests.java index ead67e0455d94..b59e9ec2cff53 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridTests.java @@ -37,16 +37,11 @@ import java.util.List; import java.util.Map; -public class GeoTileGridTests extends GeoGridTestCase { +public class GeoTileGridTests extends GeoGridTestCase { @Override - protected InternalGeoTileGrid createInternalGeoGrid( - String name, - int size, - List buckets, - Map metadata - ) { - return new InternalGeoTileGrid(name, size, buckets, metadata); + protected GeoTileGrid createInternalGeoGrid(String name, int size, List buckets, Map metadata) { + return new GeoTileGrid(name, size, buckets, metadata); } @Override diff --git a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java index c0d7e51047c6b..706c73e7416f5 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java +++ b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java @@ -10,8 +10,8 @@ import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoHashGrid; -import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoTileGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGrid; import org.opensearch.geo.search.aggregations.metrics.GeoBounds; import org.opensearch.geo.search.aggregations.metrics.GeoBoundsAggregationBuilder; @@ -24,14 +24,14 @@ public static GeoBoundsAggregationBuilder geoBounds(String name) { } /** - * Create a new {@link InternalGeoHashGrid} aggregation with the given name. + * Create a new {@link GeoHashGrid} aggregation with the given name. */ public static GeoHashGridAggregationBuilder geohashGrid(String name) { return new GeoHashGridAggregationBuilder(name); } /** - * Create a new {@link InternalGeoTileGrid} aggregation with the given name. + * Create a new {@link GeoTileGrid} aggregation with the given name. */ public static GeoTileGridAggregationBuilder geotileGrid(String name) { return new GeoTileGridAggregationBuilder(name); diff --git a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java index 3473cf2d94b76..89debdf5abd95 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java +++ b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java @@ -8,7 +8,7 @@ package org.opensearch.geo.tests.common; -import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.BaseGeoGrid; import org.opensearch.geo.search.aggregations.metrics.InternalGeoBounds; public class AggregationInspectionHelper { @@ -17,7 +17,7 @@ public static boolean hasValue(InternalGeoBounds agg) { return (agg.topLeft() == null && agg.bottomRight() == null) == false; } - public static boolean hasValue(InternalGeoGrid agg) { + public static boolean hasValue(BaseGeoGrid agg) { return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); } } diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/GrokProcessorGetAction.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/GrokProcessorGetAction.java index bb587350f4256..518323e0901cf 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/GrokProcessorGetAction.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/GrokProcessorGetAction.java @@ -31,7 +31,6 @@ package org.opensearch.ingest.common; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; @@ -79,7 +78,7 @@ public Request(boolean sorted) { Request(StreamInput in) throws IOException { super(in); - this.sorted = in.getVersion().onOrAfter(LegacyESVersion.V_7_10_0) ? in.readBoolean() : false; + this.sorted = in.readBoolean(); } @Override @@ -90,9 +89,7 @@ public ActionRequestValidationException validate() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { - out.writeBoolean(sorted); - } + out.writeBoolean(sorted); } public boolean sorted() { diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.13.4.2.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.13.4.2.jar.sha1 new file mode 100644 index 0000000000000..a7782e8aac18d --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-databind-2.13.4.2.jar.sha1 @@ -0,0 +1 @@ +325c06bdfeb628cfb80ebaaf1a26cc1eb558a585 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.13.4.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.13.4.jar.sha1 deleted file mode 100644 index fcc6491d1f78d..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-databind-2.13.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -98b0edfa8e4084078f10b7b356c300ded4a71491 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-9.3.jar.sha1 b/modules/lang-expression/licenses/asm-9.3.jar.sha1 deleted file mode 100644 index 71d3966a6f6f9..0000000000000 --- a/modules/lang-expression/licenses/asm-9.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8e6300ef51c1d801a7ed62d07cd221aca3a90640 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-9.4.jar.sha1 b/modules/lang-expression/licenses/asm-9.4.jar.sha1 new file mode 100644 index 0000000000000..75f2b0fe9a112 --- /dev/null +++ b/modules/lang-expression/licenses/asm-9.4.jar.sha1 @@ -0,0 +1 @@ +b4e0e2d2e023aa317b7cfcfc916377ea348e07d1 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-commons-9.3.jar.sha1 b/modules/lang-expression/licenses/asm-commons-9.3.jar.sha1 deleted file mode 100644 index fd7cd4943a57c..0000000000000 --- a/modules/lang-expression/licenses/asm-commons-9.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1f2a432d1212f5c352ae607d7b61dcae20c20af5 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-commons-9.4.jar.sha1 b/modules/lang-expression/licenses/asm-commons-9.4.jar.sha1 new file mode 100644 index 0000000000000..e0e2a2f4e63e9 --- /dev/null +++ b/modules/lang-expression/licenses/asm-commons-9.4.jar.sha1 @@ -0,0 +1 @@ +8fc2810ddbcbbec0a8bbccb3f8eda58321839912 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-tree-9.3.jar.sha1 b/modules/lang-expression/licenses/asm-tree-9.3.jar.sha1 deleted file mode 100644 index 238f0006424d3..0000000000000 --- a/modules/lang-expression/licenses/asm-tree-9.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -78d2ecd61318b5a58cd04fb237636c0e86b77d97 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-tree-9.4.jar.sha1 b/modules/lang-expression/licenses/asm-tree-9.4.jar.sha1 new file mode 100644 index 0000000000000..50ce6d740aab7 --- /dev/null +++ b/modules/lang-expression/licenses/asm-tree-9.4.jar.sha1 @@ -0,0 +1 @@ +a99175a17d7fdc18cbcbd0e8ea6a5d276844190a \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.4.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.4.0.jar.sha1 deleted file mode 100644 index 2b647c1270e14..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -19749e264805171009836cbedecc5494b13cd920 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.5.0-snapshot-a4ef70f.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..0e1f3e37f508a --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +c92a0928724b04224157ce2d3e105953f57f94db \ No newline at end of file diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java index 1802d03e20942..7c2c403fdd487 100644 --- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java @@ -32,7 +32,6 @@ package org.opensearch.script.mustache; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionResponse; import org.opensearch.action.search.MultiSearchResponse; @@ -125,11 +124,7 @@ public String toString() { MultiSearchTemplateResponse(StreamInput in) throws IOException { super(in); items = in.readArray(Item::new, Item[]::new); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - tookInMillis = in.readVLong(); - } else { - tookInMillis = -1L; - } + tookInMillis = in.readVLong(); } MultiSearchTemplateResponse(Item[] items, long tookInMillis) { @@ -159,9 +154,7 @@ public TimeValue getTook() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeArray(items); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeVLong(tookInMillis); - } + out.writeVLong(tookInMillis); } @Override diff --git a/modules/lang-painless/licenses/asm-9.3.jar.sha1 b/modules/lang-painless/licenses/asm-9.3.jar.sha1 deleted file mode 100644 index 71d3966a6f6f9..0000000000000 --- a/modules/lang-painless/licenses/asm-9.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8e6300ef51c1d801a7ed62d07cd221aca3a90640 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-9.4.jar.sha1 b/modules/lang-painless/licenses/asm-9.4.jar.sha1 new file mode 100644 index 0000000000000..75f2b0fe9a112 --- /dev/null +++ b/modules/lang-painless/licenses/asm-9.4.jar.sha1 @@ -0,0 +1 @@ +b4e0e2d2e023aa317b7cfcfc916377ea348e07d1 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-analysis-9.3.jar.sha1 b/modules/lang-painless/licenses/asm-analysis-9.3.jar.sha1 deleted file mode 100644 index f5a04d0196823..0000000000000 --- a/modules/lang-painless/licenses/asm-analysis-9.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4b071f211b37c38e0e9f5998550197c8593f6ad8 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-analysis-9.4.jar.sha1 b/modules/lang-painless/licenses/asm-analysis-9.4.jar.sha1 new file mode 100644 index 0000000000000..850a070775e4d --- /dev/null +++ b/modules/lang-painless/licenses/asm-analysis-9.4.jar.sha1 @@ -0,0 +1 @@ +0a5fec9dfc039448d4fd098fbaffcaf55373b223 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-commons-9.3.jar.sha1 b/modules/lang-painless/licenses/asm-commons-9.3.jar.sha1 deleted file mode 100644 index fd7cd4943a57c..0000000000000 --- a/modules/lang-painless/licenses/asm-commons-9.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1f2a432d1212f5c352ae607d7b61dcae20c20af5 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-commons-9.4.jar.sha1 b/modules/lang-painless/licenses/asm-commons-9.4.jar.sha1 new file mode 100644 index 0000000000000..e0e2a2f4e63e9 --- /dev/null +++ b/modules/lang-painless/licenses/asm-commons-9.4.jar.sha1 @@ -0,0 +1 @@ +8fc2810ddbcbbec0a8bbccb3f8eda58321839912 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-tree-9.3.jar.sha1 b/modules/lang-painless/licenses/asm-tree-9.3.jar.sha1 deleted file mode 100644 index 238f0006424d3..0000000000000 --- a/modules/lang-painless/licenses/asm-tree-9.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -78d2ecd61318b5a58cd04fb237636c0e86b77d97 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-tree-9.4.jar.sha1 b/modules/lang-painless/licenses/asm-tree-9.4.jar.sha1 new file mode 100644 index 0000000000000..50ce6d740aab7 --- /dev/null +++ b/modules/lang-painless/licenses/asm-tree-9.4.jar.sha1 @@ -0,0 +1 @@ +a99175a17d7fdc18cbcbd0e8ea6a5d276844190a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-util-9.3.jar.sha1 b/modules/lang-painless/licenses/asm-util-9.3.jar.sha1 deleted file mode 100644 index 8859c317794ba..0000000000000 --- a/modules/lang-painless/licenses/asm-util-9.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9595bc05510d0bd4b610188b77333fe4851a1975 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-util-9.4.jar.sha1 b/modules/lang-painless/licenses/asm-util-9.4.jar.sha1 new file mode 100644 index 0000000000000..8c5854f41bcda --- /dev/null +++ b/modules/lang-painless/licenses/asm-util-9.4.jar.sha1 @@ -0,0 +1 @@ +ab1e0a84b72561dbaf1ee260321e72148ebf4b19 \ No newline at end of file diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java b/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java index 7b9efa4deb207..35c676653fdc3 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java @@ -212,8 +212,9 @@ private static void addFactoryMethod(Map> additionalClasses, Cl } additionalClasses.put(factoryClass.getName(), factoryClass); - for (int i = 0; i < factoryMethod.getParameterTypes().length; ++i) { - Class parameterClazz = factoryMethod.getParameterTypes()[i]; + final Class[] parameterTypes = factoryMethod.getParameterTypes(); + for (int i = 0; i < parameterTypes.length; ++i) { + Class parameterClazz = parameterTypes[i]; additionalClasses.put(parameterClazz.getName(), parameterClazz); } } diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessScriptEngine.java b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessScriptEngine.java index ca6e68706709a..e9edfb73c740c 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessScriptEngine.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessScriptEngine.java @@ -195,11 +195,12 @@ private Type generateStatefulFactory(Loader loader, ScriptContext context } } - for (int count = 0; count < newFactory.getParameterTypes().length; ++count) { + final Class[] parameterTypes = newFactory.getParameterTypes(); + for (int count = 0; count < parameterTypes.length; ++count) { writer.visitField( Opcodes.ACC_PRIVATE | Opcodes.ACC_FINAL, "$arg" + count, - Type.getType(newFactory.getParameterTypes()[count]).getDescriptor(), + Type.getType(parameterTypes[count]).getDescriptor(), null, null ).visitEnd(); @@ -211,7 +212,7 @@ private Type generateStatefulFactory(Loader loader, ScriptContext context ); org.objectweb.asm.commons.Method init = new org.objectweb.asm.commons.Method( "", - MethodType.methodType(void.class, newFactory.getParameterTypes()).toMethodDescriptorString() + MethodType.methodType(void.class, parameterTypes).toMethodDescriptorString() ); GeneratorAdapter constructor = new GeneratorAdapter( @@ -223,10 +224,10 @@ private Type generateStatefulFactory(Loader loader, ScriptContext context constructor.loadThis(); constructor.invokeConstructor(OBJECT_TYPE, base); - for (int count = 0; count < newFactory.getParameterTypes().length; ++count) { + for (int count = 0; count < parameterTypes.length; ++count) { constructor.loadThis(); constructor.loadArg(count); - constructor.putField(Type.getType("L" + className + ";"), "$arg" + count, Type.getType(newFactory.getParameterTypes()[count])); + constructor.putField(Type.getType("L" + className + ";"), "$arg" + count, Type.getType(parameterTypes[count])); } constructor.returnValue(); @@ -247,7 +248,7 @@ private Type generateStatefulFactory(Loader loader, ScriptContext context MethodType.methodType(newInstance.getReturnType(), newInstance.getParameterTypes()).toMethodDescriptorString() ); - List> parameters = new ArrayList<>(Arrays.asList(newFactory.getParameterTypes())); + List> parameters = new ArrayList<>(Arrays.asList(parameterTypes)); parameters.addAll(Arrays.asList(newInstance.getParameterTypes())); org.objectweb.asm.commons.Method constru = new org.objectweb.asm.commons.Method( @@ -264,9 +265,9 @@ private Type generateStatefulFactory(Loader loader, ScriptContext context adapter.newInstance(WriterConstants.CLASS_TYPE); adapter.dup(); - for (int count = 0; count < newFactory.getParameterTypes().length; ++count) { + for (int count = 0; count < parameterTypes.length; ++count) { adapter.loadThis(); - adapter.getField(Type.getType("L" + className + ";"), "$arg" + count, Type.getType(newFactory.getParameterTypes()[count])); + adapter.getField(Type.getType("L" + className + ";"), "$arg" + count, Type.getType(parameterTypes[count])); } adapter.loadArgs(); @@ -334,13 +335,14 @@ private T generateFactory(Loader loader, ScriptContext context, Type clas } } + final Class[] parameterTypes = reflect.getParameterTypes(); org.objectweb.asm.commons.Method instance = new org.objectweb.asm.commons.Method( reflect.getName(), - MethodType.methodType(reflect.getReturnType(), reflect.getParameterTypes()).toMethodDescriptorString() + MethodType.methodType(reflect.getReturnType(), parameterTypes).toMethodDescriptorString() ); org.objectweb.asm.commons.Method constru = new org.objectweb.asm.commons.Method( "", - MethodType.methodType(void.class, reflect.getParameterTypes()).toMethodDescriptorString() + MethodType.methodType(void.class, parameterTypes).toMethodDescriptorString() ); GeneratorAdapter adapter = new GeneratorAdapter( @@ -421,9 +423,7 @@ private T generateFactory(Loader loader, ScriptContext context, Type clas private void writeNeedsMethods(Class clazz, ClassWriter writer, Set extractedVariables) { for (Method method : clazz.getMethods()) { - if (method.getName().startsWith("needs") - && method.getReturnType().equals(boolean.class) - && method.getParameterTypes().length == 0) { + if (method.getName().startsWith("needs") && method.getReturnType().equals(boolean.class) && method.getParameterCount() == 0) { String name = method.getName(); name = name.substring(5); name = Character.toLowerCase(name.charAt(0)) + name.substring(1); diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/ScriptClassInfo.java b/modules/lang-painless/src/main/java/org/opensearch/painless/ScriptClassInfo.java index e80f92442680a..26dcb4adabea3 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/ScriptClassInfo.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/ScriptClassInfo.java @@ -88,7 +88,7 @@ public ScriptClassInfo(PainlessLookup painlessLookup, Class baseClass) { + "] has more than one." ); } - } else if (m.getName().startsWith("needs") && m.getReturnType() == boolean.class && m.getParameterTypes().length == 0) { + } else if (m.getName().startsWith("needs") && m.getReturnType() == boolean.class && m.getParameterCount() == 0) { needsMethods.add(new org.objectweb.asm.commons.Method(m.getName(), NEEDS_PARAMETER_METHOD_TYPE.toMethodDescriptorString())); } else if (m.getName().startsWith("get") && m.getName().equals("getClass") == false @@ -124,7 +124,7 @@ public ScriptClassInfo(PainlessLookup painlessLookup, Class baseClass) { FunctionTable.LocalFunction defConverter = null; for (java.lang.reflect.Method m : baseClass.getMethods()) { if (m.getName().startsWith("convertFrom") - && m.getParameterTypes().length == 1 + && m.getParameterCount() == 1 && m.getReturnType() == returnType && Modifier.isStatic(m.getModifiers())) { diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/Walker.java b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/Walker.java index 719a69a9977e7..c03b4199ce8d9 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/Walker.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/Walker.java @@ -248,10 +248,6 @@ private Location location(ParserRuleContext ctx) { return new Location(sourceName, ctx.getStart().getStartIndex()); } - private Location location(TerminalNode tn) { - return new Location(sourceName, tn.getSymbol().getStartIndex()); - } - @Override public ANode visitSource(SourceContext ctx) { List functions = new ArrayList<>(); diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupBuilder.java b/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupBuilder.java index e43d1beb9b25b..e79eda975f417 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupBuilder.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupBuilder.java @@ -2168,9 +2168,10 @@ private void generateBridgeMethod(PainlessClassBuilder painlessClassBuilder, Pai bridgeMethodWriter.loadArg(0); } - for (int typeParameterCount = 0; typeParameterCount < javaMethod.getParameterTypes().length; ++typeParameterCount) { + final Class[] typeParameters = javaMethod.getParameterTypes(); + for (int typeParameterCount = 0; typeParameterCount < typeParameters.length; ++typeParameterCount) { bridgeMethodWriter.loadArg(typeParameterCount + bridgeTypeParameterOffset); - Class typeParameter = javaMethod.getParameterTypes()[typeParameterCount]; + Class typeParameter = typeParameters[typeParameterCount]; if (typeParameter == Byte.class) bridgeMethodWriter.invokeStatic(DEF_UTIL_TYPE, DEF_TO_B_BYTE_IMPLICIT); else if (typeParameter == Short.class) bridgeMethodWriter.invokeStatic(DEF_UTIL_TYPE, DEF_TO_B_SHORT_IMPLICIT); diff --git a/modules/opensearch-dashboards/src/javaRestTest/java/org/opensearch/dashboards/OpenSearchDashboardsSystemIndexIT.java b/modules/opensearch-dashboards/src/javaRestTest/java/org/opensearch/dashboards/OpenSearchDashboardsSystemIndexIT.java index 2584a9b41f14d..10ee9393b343f 100644 --- a/modules/opensearch-dashboards/src/javaRestTest/java/org/opensearch/dashboards/OpenSearchDashboardsSystemIndexIT.java +++ b/modules/opensearch-dashboards/src/javaRestTest/java/org/opensearch/dashboards/OpenSearchDashboardsSystemIndexIT.java @@ -34,7 +34,10 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.apache.http.util.EntityUtils; + +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.opensearch.OpenSearchParseException; import org.opensearch.client.Request; import org.opensearch.client.Response; import org.opensearch.common.xcontent.XContentHelper; @@ -73,7 +76,7 @@ public void testCreateIndex() throws IOException { assertThat(response.getStatusLine().getStatusCode(), is(200)); } - public void testAliases() throws IOException { + public void testAliases() throws IOException, ParseException { assumeFalse("In this test, .opensearch_dashboards is the alias name", ".opensearch_dashboards".equals(indexName)); Request request = new Request("PUT", "/_opensearch_dashboards/" + indexName); Response response = client().performRequest(request); @@ -96,7 +99,7 @@ public void testBulkToOpenSearchDashboardsIndex() throws IOException { assertThat(response.getStatusLine().getStatusCode(), is(200)); } - public void testRefresh() throws IOException { + public void testRefresh() throws IOException, ParseException { Request request = new Request("POST", "/_opensearch_dashboards/_bulk"); request.setJsonEntity("{ \"index\" : { \"_index\" : \"" + indexName + "\", \"_id\" : \"1\" } }\n{ \"foo\" : \"bar\" }\n"); Response response = client().performRequest(request); @@ -114,7 +117,7 @@ public void testRefresh() throws IOException { assertThat(responseBody, containsString("bar")); } - public void testGetFromOpenSearchDashboardsIndex() throws IOException { + public void testGetFromOpenSearchDashboardsIndex() throws IOException, ParseException { Request request = new Request("POST", "/_opensearch_dashboards/_bulk"); request.setJsonEntity("{ \"index\" : { \"_index\" : \"" + indexName + "\", \"_id\" : \"1\" } }\n{ \"foo\" : \"bar\" }\n"); request.addParameter("refresh", "true"); @@ -130,7 +133,7 @@ public void testGetFromOpenSearchDashboardsIndex() throws IOException { assertThat(responseBody, containsString("bar")); } - public void testMultiGetFromOpenSearchDashboardsIndex() throws IOException { + public void testMultiGetFromOpenSearchDashboardsIndex() throws IOException, ParseException { Request request = new Request("POST", "/_opensearch_dashboards/_bulk"); request.setJsonEntity( "{ \"index\" : { \"_index\" : \"" @@ -163,7 +166,7 @@ public void testMultiGetFromOpenSearchDashboardsIndex() throws IOException { assertThat(responseBody, containsString("tag")); } - public void testSearchFromOpenSearchDashboardsIndex() throws IOException { + public void testSearchFromOpenSearchDashboardsIndex() throws IOException, ParseException { Request request = new Request("POST", "/_opensearch_dashboards/_bulk"); request.setJsonEntity( "{ \"index\" : { \"_index\" : \"" @@ -241,7 +244,7 @@ public void testUpdateIndexSettings() throws IOException { assertThat(response.getStatusLine().getStatusCode(), is(200)); } - public void testGetIndex() throws IOException { + public void testGetIndex() throws IOException, ParseException { Request request = new Request("PUT", "/_opensearch_dashboards/" + indexName); Response response = client().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); @@ -278,7 +281,7 @@ public void testIndexingAndUpdatingDocs() throws IOException { assertThat(response.getStatusLine().getStatusCode(), is(200)); } - public void testScrollingDocs() throws IOException { + public void testScrollingDocs() throws IOException, OpenSearchParseException, ParseException { Request request = new Request("POST", "/_opensearch_dashboards/_bulk"); request.setJsonEntity( "{ \"index\" : { \"_index\" : \"" diff --git a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalRequest.java b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalRequest.java index 66db397865a0b..d38307fc2194a 100644 --- a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalRequest.java +++ b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalRequest.java @@ -32,7 +32,6 @@ package org.opensearch.index.rankeval; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; @@ -69,9 +68,7 @@ public RankEvalRequest(RankEvalSpec rankingEvaluationSpec, String[] indices) { rankingEvaluationSpec = new RankEvalSpec(in); indices = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_6_0)) { - searchType = SearchType.fromId(in.readByte()); - } + searchType = SearchType.fromId(in.readByte()); } RankEvalRequest() {} @@ -150,9 +147,7 @@ public void writeTo(StreamOutput out) throws IOException { rankingEvaluationSpec.writeTo(out); out.writeStringArray(indices); indicesOptions.writeIndicesOptions(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_6_0)) { - out.writeByte(searchType.id()); - } + out.writeByte(searchType.id()); } @Override diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java index 43adffc6f7671..bb1a9d190313f 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java @@ -33,7 +33,8 @@ package org.opensearch.index.reindex; import java.util.Optional; -import org.apache.http.HttpRequestInterceptor; + +import org.apache.hc.core5.http.HttpRequestInterceptor; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.ActionListener; diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java index 34fcd245289be..0e0e387b78e38 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java @@ -32,10 +32,12 @@ package org.opensearch.index.reindex; -import org.apache.http.conn.ssl.DefaultHostnameVerifier; -import org.apache.http.conn.ssl.NoopHostnameVerifier; -import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy; -import org.opensearch.common.Strings; +import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder; +import org.apache.hc.client5.http.ssl.DefaultHostnameVerifier; +import org.apache.hc.client5.http.ssl.NoopHostnameVerifier; +import org.apache.hc.core5.function.Factory; +import org.apache.hc.core5.http.nio.ssl.TlsStrategy; +import org.apache.hc.core5.reactor.ssl.TlsDetails; import org.opensearch.common.settings.SecureSetting; import org.opensearch.common.settings.SecureString; import org.opensearch.common.settings.Setting; @@ -50,6 +52,8 @@ import javax.net.ssl.HostnameVerifier; import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; + import java.io.IOException; import java.io.UncheckedIOException; import java.nio.file.Path; @@ -161,16 +165,31 @@ private void reload() { } /** - * Encapsulate the loaded SSL configuration as a HTTP-client {@link SSLIOSessionStrategy}. + * Encapsulate the loaded SSL configuration as a HTTP-client {@link TlsStrategy}. * The returned strategy is immutable, but successive calls will return different objects that may have different * configurations if the underlying key/certificate files are modified. */ - SSLIOSessionStrategy getStrategy() { + TlsStrategy getStrategy() { final HostnameVerifier hostnameVerifier = configuration.getVerificationMode().isHostnameVerificationEnabled() ? new DefaultHostnameVerifier() : new NoopHostnameVerifier(); - final String[] protocols = configuration.getSupportedProtocols().toArray(Strings.EMPTY_ARRAY); - final String[] cipherSuites = configuration.getCipherSuites().toArray(Strings.EMPTY_ARRAY); - return new SSLIOSessionStrategy(context, protocols, cipherSuites, hostnameVerifier); + + final String[] protocols = configuration.getSupportedProtocols().toArray(new String[0]); + final String[] cipherSuites = configuration.getCipherSuites().toArray(new String[0]); + + return ClientTlsStrategyBuilder.create() + .setSslContext(context) + .setHostnameVerifier(hostnameVerifier) + .setCiphers(cipherSuites) + .setTlsVersions(protocols) + // See https://issues.apache.org/jira/browse/HTTPCLIENT-2219 + .setTlsDetailsFactory(new Factory() { + @Override + public TlsDetails create(final SSLEngine sslEngine) { + return new TlsDetails(sslEngine.getSession(), sslEngine.getApplicationProtocol()); + } + }) + .build(); + } } diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java index 8ade055d10f60..aa9accbd90e21 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java @@ -33,15 +33,18 @@ package org.opensearch.index.reindex; import java.util.Optional; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.HttpRequestInterceptor; -import org.apache.http.auth.AuthScope; -import org.apache.http.auth.UsernamePasswordCredentials; -import org.apache.http.client.CredentialsProvider; -import org.apache.http.impl.client.BasicCredentialsProvider; -import org.apache.http.impl.nio.reactor.IOReactorConfig; -import org.apache.http.message.BasicHeader; + +import org.apache.hc.client5.http.auth.AuthScope; +import org.apache.hc.client5.http.auth.UsernamePasswordCredentials; +import org.apache.hc.client5.http.impl.auth.BasicCredentialsProvider; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManager; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpRequestInterceptor; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.reactor.IOReactorConfig; +import org.apache.hc.core5.util.Timeout; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.action.ActionListener; @@ -202,21 +205,23 @@ static RestClient buildRestClient( for (Map.Entry header : remoteInfo.getHeaders().entrySet()) { clientHeaders[i++] = new BasicHeader(header.getKey(), header.getValue()); } - final RestClientBuilder builder = RestClient.builder( - new HttpHost(remoteInfo.getHost(), remoteInfo.getPort(), remoteInfo.getScheme()) - ).setDefaultHeaders(clientHeaders).setRequestConfigCallback(c -> { - c.setConnectTimeout(Math.toIntExact(remoteInfo.getConnectTimeout().millis())); - c.setSocketTimeout(Math.toIntExact(remoteInfo.getSocketTimeout().millis())); + final HttpHost httpHost = new HttpHost(remoteInfo.getScheme(), remoteInfo.getHost(), remoteInfo.getPort()); + final RestClientBuilder builder = RestClient.builder(httpHost).setDefaultHeaders(clientHeaders).setRequestConfigCallback(c -> { + c.setConnectTimeout(Timeout.ofMilliseconds(Math.toIntExact(remoteInfo.getConnectTimeout().millis()))); + c.setResponseTimeout(Timeout.ofMilliseconds(Math.toIntExact(remoteInfo.getSocketTimeout().millis()))); return c; }).setHttpClientConfigCallback(c -> { // Enable basic auth if it is configured if (remoteInfo.getUsername() != null) { - UsernamePasswordCredentials creds = new UsernamePasswordCredentials(remoteInfo.getUsername(), remoteInfo.getPassword()); - CredentialsProvider credentialsProvider = new BasicCredentialsProvider(); - credentialsProvider.setCredentials(AuthScope.ANY, creds); + UsernamePasswordCredentials creds = new UsernamePasswordCredentials( + remoteInfo.getUsername(), + remoteInfo.getPassword().toCharArray() + ); + BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); + credentialsProvider.setCredentials(new AuthScope(httpHost, null, "Basic"), creds); c.setDefaultCredentialsProvider(credentialsProvider); } else { - restInterceptor.ifPresent(interceptor -> c.addInterceptorLast(interceptor)); + restInterceptor.ifPresent(interceptor -> c.addRequestInterceptorLast(interceptor)); } // Stick the task id in the thread name so we can track down tasks from stack traces AtomicInteger threads = new AtomicInteger(); @@ -227,8 +232,13 @@ static RestClient buildRestClient( return t; }); // Limit ourselves to one reactor thread because for now the search process is single threaded. - c.setDefaultIOReactorConfig(IOReactorConfig.custom().setIoThreadCount(1).build()); - c.setSSLStrategy(sslConfig.getStrategy()); + c.setIOReactorConfig(IOReactorConfig.custom().setIoThreadCount(1).build()); + + final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() + .setTlsStrategy(sslConfig.getStrategy()) + .build(); + + c.setConnectionManager(connectionManager); return c; }); if (Strings.hasLength(remoteInfo.getPathPrefix()) && "/".equals(remoteInfo.getPathPrefix()) == false) { diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteRequestBuilders.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteRequestBuilders.java index 8467fbdeacd0e..873bd7c3b48cb 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteRequestBuilders.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteRequestBuilders.java @@ -32,8 +32,8 @@ package org.opensearch.index.reindex.remote; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NStringEntity; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.search.SearchRequest; @@ -240,7 +240,7 @@ static Request scroll(String scroll, TimeValue keepAlive, Version remoteVersion) if (remoteVersion.before(Version.fromId(2000099))) { // Versions before 2.0.0 extract the plain scroll_id from the body - request.setEntity(new NStringEntity(scroll, ContentType.TEXT_PLAIN)); + request.setEntity(new StringEntity(scroll, ContentType.TEXT_PLAIN)); return request; } @@ -258,7 +258,7 @@ static Request clearScroll(String scroll, Version remoteVersion) { if (remoteVersion.before(Version.fromId(2000099))) { // Versions before 2.0.0 extract the plain scroll_id from the body - request.setEntity(new NStringEntity(scroll, ContentType.TEXT_PLAIN)); + request.setEntity(new StringEntity(scroll, ContentType.TEXT_PLAIN)); return request; } try (XContentBuilder entity = JsonXContent.contentBuilder()) { diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSource.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSource.java index be691243ecf84..3a943450a1a89 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSource.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSource.java @@ -32,10 +32,11 @@ package org.opensearch.index.reindex.remote; -import org.apache.http.ContentTooLongException; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ContentTooLongException; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; @@ -199,7 +200,7 @@ public void onSuccess(org.opensearch.client.Response response) { InputStream content = responseEntity.getContent(); XContentType xContentType = null; if (responseEntity.getContentType() != null) { - final String mimeType = ContentType.parse(responseEntity.getContentType().getValue()).getMimeType(); + final String mimeType = ContentType.parse(responseEntity.getContentType()).getMimeType(); xContentType = XContentType.fromMediaType(mimeType); } if (xContentType == null) { @@ -284,7 +285,11 @@ private static String bodyMessage(@Nullable HttpEntity entity) throws IOExceptio if (entity == null) { return "No error body."; } else { - return "body=" + EntityUtils.toString(entity); + try { + return "body=" + EntityUtils.toString(entity); + } catch (final ParseException ex) { + throw new IOException(ex); + } } } } diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/ReindexRestInterceptorProvider.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/ReindexRestInterceptorProvider.java index 034981c969b4b..0646c9b5d8705 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/ReindexRestInterceptorProvider.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/ReindexRestInterceptorProvider.java @@ -6,7 +6,8 @@ package org.opensearch.index.reindex.spi; import java.util.Optional; -import org.apache.http.HttpRequestInterceptor; + +import org.apache.hc.core5.http.HttpRequestInterceptor; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.index.reindex.ReindexRequest; diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/RoundTripTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/RoundTripTests.java index 6239946852cf8..edd301603250a 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/RoundTripTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/RoundTripTests.java @@ -152,25 +152,6 @@ private void randomRequest(AbstractBulkIndexByScrollRequest request) { request.setScript(random().nextBoolean() ? null : randomScript()); } - private void assertRequestEquals(Version version, ReindexRequest request, ReindexRequest tripped) { - assertRequestEquals((AbstractBulkIndexByScrollRequest) request, (AbstractBulkIndexByScrollRequest) tripped); - assertEquals(request.getDestination().version(), tripped.getDestination().version()); - assertEquals(request.getDestination().index(), tripped.getDestination().index()); - if (request.getRemoteInfo() == null) { - assertNull(tripped.getRemoteInfo()); - } else { - assertNotNull(tripped.getRemoteInfo()); - assertEquals(request.getRemoteInfo().getScheme(), tripped.getRemoteInfo().getScheme()); - assertEquals(request.getRemoteInfo().getHost(), tripped.getRemoteInfo().getHost()); - assertEquals(request.getRemoteInfo().getQuery(), tripped.getRemoteInfo().getQuery()); - assertEquals(request.getRemoteInfo().getUsername(), tripped.getRemoteInfo().getUsername()); - assertEquals(request.getRemoteInfo().getPassword(), tripped.getRemoteInfo().getPassword()); - assertEquals(request.getRemoteInfo().getHeaders(), tripped.getRemoteInfo().getHeaders()); - assertEquals(request.getRemoteInfo().getSocketTimeout(), tripped.getRemoteInfo().getSocketTimeout()); - assertEquals(request.getRemoteInfo().getConnectTimeout(), tripped.getRemoteInfo().getConnectTimeout()); - } - } - private void assertRequestEquals(AbstractBulkIndexByScrollRequest request, AbstractBulkIndexByScrollRequest tripped) { assertRequestEquals((AbstractBulkByScrollRequest) request, (AbstractBulkByScrollRequest) tripped); assertEquals(request.getScript(), tripped.getScript()); diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteRequestBuildersTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteRequestBuildersTests.java index c349bc54bcbd9..e7af54a0563d3 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteRequestBuildersTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteRequestBuildersTests.java @@ -32,8 +32,8 @@ package org.opensearch.index.reindex.remote; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ContentType; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; import org.opensearch.Version; import org.opensearch.action.search.SearchRequest; import org.opensearch.client.Request; @@ -245,7 +245,7 @@ public void testInitialSearchEntity() throws IOException { searchRequest.source(new SearchSourceBuilder()); String query = "{\"match_all\":{}}"; HttpEntity entity = initialSearch(searchRequest, new BytesArray(query), remoteVersion).getEntity(); - assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue()); + assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType()); if (remoteVersion.onOrAfter(Version.fromId(1000099))) { assertEquals( "{\"query\":" + query + ",\"_source\":true}", @@ -261,7 +261,7 @@ public void testInitialSearchEntity() throws IOException { // Source filtering is included if set up searchRequest.source().fetchSource(new String[] { "in1", "in2" }, new String[] { "out" }); entity = initialSearch(searchRequest, new BytesArray(query), remoteVersion).getEntity(); - assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue()); + assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType()); assertEquals( "{\"query\":" + query + ",\"_source\":{\"includes\":[\"in1\",\"in2\"],\"excludes\":[\"out\"]}}", Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8)) @@ -287,7 +287,7 @@ public void testScrollParams() { public void testScrollEntity() throws IOException { String scroll = randomAlphaOfLength(30); HttpEntity entity = scroll(scroll, timeValueMillis(between(1, 1000)), Version.fromString("5.0.0")).getEntity(); - assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue()); + assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType()); assertThat( Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8)), containsString("\"" + scroll + "\"") @@ -295,14 +295,14 @@ public void testScrollEntity() throws IOException { // Test with version < 2.0.0 entity = scroll(scroll, timeValueMillis(between(1, 1000)), Version.fromId(1070499)).getEntity(); - assertEquals(ContentType.TEXT_PLAIN.toString(), entity.getContentType().getValue()); + assertEquals(ContentType.TEXT_PLAIN.toString(), entity.getContentType()); assertEquals(scroll, Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8))); } public void testClearScroll() throws IOException { String scroll = randomAlphaOfLength(30); Request request = clearScroll(scroll, Version.fromString("5.0.0")); - assertEquals(ContentType.APPLICATION_JSON.toString(), request.getEntity().getContentType().getValue()); + assertEquals(ContentType.APPLICATION_JSON.toString(), request.getEntity().getContentType()); assertThat( Streams.copyToString(new InputStreamReader(request.getEntity().getContent(), StandardCharsets.UTF_8)), containsString("\"" + scroll + "\"") @@ -311,7 +311,7 @@ public void testClearScroll() throws IOException { // Test with version < 2.0.0 request = clearScroll(scroll, Version.fromId(1070499)); - assertEquals(ContentType.TEXT_PLAIN.toString(), request.getEntity().getContentType().getValue()); + assertEquals(ContentType.TEXT_PLAIN.toString(), request.getEntity().getContentType()); assertEquals(scroll, Streams.copyToString(new InputStreamReader(request.getEntity().getContent(), StandardCharsets.UTF_8))); assertThat(request.getParameters().keySet(), empty()); } diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java index 337bc67796f8e..c0e2bd14f55bc 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java @@ -32,31 +32,14 @@ package org.opensearch.index.reindex.remote; -import org.apache.http.ContentTooLongException; -import org.apache.http.HttpEntity; -import org.apache.http.HttpEntityEnclosingRequest; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.ProtocolVersion; -import org.apache.http.StatusLine; -import org.apache.http.client.protocol.HttpClientContext; -import org.apache.http.concurrent.FutureCallback; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.InputStreamEntity; -import org.apache.http.entity.StringEntity; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; -import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.nio.protocol.HttpAsyncRequestProducer; -import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchStatusException; import org.opensearch.Version; import org.opensearch.action.bulk.BackoffPolicy; import org.opensearch.action.search.SearchRequest; -import org.opensearch.client.HeapBufferedAsyncResponseConsumer; import org.opensearch.client.RestClient; +import org.opensearch.client.http.HttpUriRequestProducer; +import org.opensearch.client.nio.HeapBufferedAsyncResponseConsumer; import org.opensearch.common.ParsingException; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.io.FileSystemUtils; @@ -74,13 +57,32 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; +import org.apache.hc.core5.concurrent.FutureCallback; +import org.apache.hc.core5.function.Supplier; +import org.apache.hc.core5.http.ClassicHttpRequest; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentTooLongException; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.io.entity.InputStreamEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.nio.AsyncPushConsumer; +import org.apache.hc.core5.http.nio.AsyncRequestProducer; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.http.nio.HandlerFactory; +import org.apache.hc.core5.http.protocol.HttpContext; +import org.apache.hc.core5.io.CloseMode; +import org.apache.hc.core5.reactor.IOReactorStatus; import org.junit.After; import org.junit.Before; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; import java.io.IOException; import java.io.InputStreamReader; +import java.io.UncheckedIOException; import java.net.URL; import java.nio.charset.StandardCharsets; import java.util.Queue; @@ -97,7 +99,6 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; -import static org.mockito.Mockito.any; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; @@ -444,24 +445,49 @@ public void testWrapExceptionToPreserveStatus() throws IOException { @SuppressWarnings({ "unchecked", "rawtypes" }) public void testTooLargeResponse() throws Exception { ContentTooLongException tooLong = new ContentTooLongException("too long!"); - CloseableHttpAsyncClient httpClient = mock(CloseableHttpAsyncClient.class); - when( - httpClient.execute( - any(HttpAsyncRequestProducer.class), - any(HttpAsyncResponseConsumer.class), - any(HttpClientContext.class), - any(FutureCallback.class) - ) - ).then(new Answer>() { + CloseableHttpAsyncClient httpClient = new CloseableHttpAsyncClient() { + + @Override + public void close() throws IOException {} + + @Override + public void close(CloseMode closeMode) {} + + @Override + public void start() {} + + @Override + public void register(String hostname, String uriPattern, Supplier supplier) {} + + @Override + public void initiateShutdown() {} + @Override - public Future answer(InvocationOnMock invocationOnMock) throws Throwable { - HeapBufferedAsyncResponseConsumer consumer = (HeapBufferedAsyncResponseConsumer) invocationOnMock.getArguments()[1]; - FutureCallback callback = (FutureCallback) invocationOnMock.getArguments()[3]; - assertEquals(new ByteSizeValue(100, ByteSizeUnit.MB).bytesAsInt(), consumer.getBufferLimit()); + public IOReactorStatus getStatus() { + return null; + } + + @Override + protected Future doExecute( + HttpHost target, + AsyncRequestProducer requestProducer, + AsyncResponseConsumer responseConsumer, + HandlerFactory pushHandlerFactory, + HttpContext context, + FutureCallback callback + ) { + assertEquals( + new ByteSizeValue(100, ByteSizeUnit.MB).bytesAsInt(), + ((HeapBufferedAsyncResponseConsumer) responseConsumer).getBufferLimit() + ); callback.failed(tooLong); return null; } - }); + + @Override + public void awaitShutdown(org.apache.hc.core5.util.TimeValue waitTime) throws InterruptedException {} + }; + RemoteScrollableHitSource source = sourceWithMockedClient(true, httpClient); Throwable e = expectThrows(RuntimeException.class, source::start); @@ -539,46 +565,68 @@ private RemoteScrollableHitSource sourceWithMockedRemoteCall(boolean mockRemoteV } } - CloseableHttpAsyncClient httpClient = mock(CloseableHttpAsyncClient.class); - when( - httpClient.execute( - any(HttpAsyncRequestProducer.class), - any(HttpAsyncResponseConsumer.class), - any(HttpClientContext.class), - any(FutureCallback.class) - ) - ).thenAnswer(new Answer>() { - + final CloseableHttpAsyncClient httpClient = new CloseableHttpAsyncClient() { int responseCount = 0; @Override - public Future answer(InvocationOnMock invocationOnMock) throws Throwable { - // Throw away the current thread context to simulate running async httpclient's thread pool - threadPool.getThreadContext().stashContext(); - HttpAsyncRequestProducer requestProducer = (HttpAsyncRequestProducer) invocationOnMock.getArguments()[0]; - FutureCallback futureCallback = (FutureCallback) invocationOnMock.getArguments()[3]; - HttpEntityEnclosingRequest request = (HttpEntityEnclosingRequest) requestProducer.generateRequest(); - URL resource = resources[responseCount]; - String path = paths[responseCount++]; - ProtocolVersion protocolVersion = new ProtocolVersion("http", 1, 1); - if (path.startsWith("fail:")) { - String body = Streams.copyToString(new InputStreamReader(request.getEntity().getContent(), StandardCharsets.UTF_8)); - if (path.equals("fail:rejection.json")) { - StatusLine statusLine = new BasicStatusLine(protocolVersion, RestStatus.TOO_MANY_REQUESTS.getStatus(), ""); - BasicHttpResponse httpResponse = new BasicHttpResponse(statusLine); - futureCallback.completed(httpResponse); + public void close(CloseMode closeMode) {} + + @Override + public void close() throws IOException {} + + @Override + public void start() {} + + @Override + public IOReactorStatus getStatus() { + return null; + } + + @Override + public void awaitShutdown(org.apache.hc.core5.util.TimeValue waitTime) throws InterruptedException {} + + @Override + public void initiateShutdown() {} + + @Override + protected Future doExecute( + HttpHost target, + AsyncRequestProducer requestProducer, + AsyncResponseConsumer responseConsumer, + HandlerFactory pushHandlerFactory, + HttpContext context, + FutureCallback callback + ) { + try { + // Throw away the current thread context to simulate running async httpclient's thread pool + threadPool.getThreadContext().stashContext(); + ClassicHttpRequest request = getRequest(requestProducer); + URL resource = resources[responseCount]; + String path = paths[responseCount++]; + if (path.startsWith("fail:")) { + String body = Streams.copyToString(new InputStreamReader(request.getEntity().getContent(), StandardCharsets.UTF_8)); + if (path.equals("fail:rejection.json")) { + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.TOO_MANY_REQUESTS.getStatus(), ""); + callback.completed((T) httpResponse); + } else { + callback.failed(new RuntimeException(body)); + } } else { - futureCallback.failed(new RuntimeException(body)); + BasicClassicHttpResponse httpResponse = new BasicClassicHttpResponse(200, ""); + httpResponse.setEntity(new InputStreamEntity(FileSystemUtils.openFileURLStream(resource), contentType)); + callback.completed((T) httpResponse); } - } else { - StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, ""); - HttpResponse httpResponse = new BasicHttpResponse(statusLine); - httpResponse.setEntity(new InputStreamEntity(FileSystemUtils.openFileURLStream(resource), contentType)); - futureCallback.completed(httpResponse); + return null; + } catch (IOException ex) { + throw new UncheckedIOException(ex); } - return null; } - }); + + @Override + public void register(String hostname, String uriPattern, Supplier supplier) {} + + }; + return sourceWithMockedClient(mockRemoteVersion, httpClient); } @@ -649,4 +697,9 @@ private T expectListenerFailure(Class expectedExcept assertNotNull(exception.get()); return exception.get(); } + + private static ClassicHttpRequest getRequest(AsyncRequestProducer requestProducer) { + assertThat(requestProducer, instanceOf(HttpUriRequestProducer.class)); + return ((HttpUriRequestProducer) requestProducer).getRequest(); + } } diff --git a/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java b/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java index 3d0c09fb2288c..cbadcba5ef6f0 100644 --- a/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java +++ b/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java @@ -34,9 +34,6 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NStringEntity; import org.opensearch.client.Request; import org.opensearch.client.Response; import org.opensearch.common.Strings; @@ -49,6 +46,9 @@ import org.opensearch.rest.RestStatus; import org.opensearch.test.rest.yaml.ClientYamlTestCandidate; import org.opensearch.test.rest.yaml.OpenSearchClientYamlSuiteTestCase; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.junit.Before; import java.io.IOException; @@ -144,7 +144,7 @@ private static HttpEntity buildRepositorySettings(final String type, final Setti builder.endObject(); } builder.endObject(); - return new NStringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); + return new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); } } } diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 5d2047d7f18a2..9e0d9955a65a1 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -156,6 +156,12 @@ thirdPartyAudit { 'org.bouncycastle.cert.X509v3CertificateBuilder', 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', 'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder', + 'org.bouncycastle.openssl.PEMEncryptedKeyPair', + 'org.bouncycastle.openssl.PEMParser', + 'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter', + 'org.bouncycastle.openssl.jcajce.JceOpenSSLPKCS8DecryptorProviderBuilder', + 'org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder', + 'org.bouncycastle.pkcs.PKCS8EncryptedPrivateKeyInfo', // from io.netty.handler.ssl.JettyNpnSslEngine (netty) 'org.eclipse.jetty.npn.NextProtoNego$ClientProvider', diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.79.Final.jar.sha1 deleted file mode 100644 index 8e9e4d0b7f754..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c014412b599489b1db27c6bc08d8a46da94e397 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.84.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..25a6f9ecf50b6 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +a6b8cf57cfffc28d8e33f8175788a99401f576d9 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.79.Final.jar.sha1 deleted file mode 100644 index c0920231d79a8..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -18f5b02af7ca611978bc28f2cb58cbb3b9b0f0ef \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.84.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..032a8f1ed954e --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +4f60f56c4cd17db114f01dc64aa465a2905240f5 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.79.Final.jar.sha1 deleted file mode 100644 index a3f650da5abbd..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -882c70bc0a30a98bf3ce477f043e967ac026044c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.84.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..1e985edfce65e --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +78628e78087d3da6c3d2b22aa67798d3f3adcd68 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 deleted file mode 100644 index f2989024cfce1..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0eeffab0cd5efb699d5e4ab9b694d32fef6694b3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.84.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..5fe8c5420cd74 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +5a0178b9689493fd612cd40481034469f4bd14cc \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.79.Final.jar.sha1 deleted file mode 100644 index faa7b099406a3..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2814bd465731355323aba0fdd22163bfce638a75 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.84.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..beaa2cce654c3 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +90c84ec7f1108ae164810cf46694a5ec7ce738fc \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.79.Final.jar.sha1 deleted file mode 100644 index 8e314f164da69..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2dc22423c8ed19906615fb936a5fcb7db14a4e6c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.84.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..afd28b451ba12 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +69cd93e2c321f04fc0a18eb5af519565373d0873 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.79.Final.jar.sha1 deleted file mode 100644 index af550935bb911..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -55ecb1ff4464b56564a90824a741c3911264aaa4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.84.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..07aa37fc76524 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +b6f808e331cf843d2a7ff62042cf9b5343e2ff25 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.79.Final.jar.sha1 deleted file mode 100644 index c6e18efb3ad3d..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6cc2b49749b4fbcc39c687027e04e65e857552a9 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.84.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..5e12ada3f5c10 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +acd9947d0a951b1f7021c7adc393df936b1ecbf0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 deleted file mode 100644 index 7f984663dfa85..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -731937caec938b77b39df932a8da8aaca8d5ec05 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.84.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..6273c55f3acbd --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +e51601ddb88ee646a97ff04db38d45c22c29aee8 \ No newline at end of file diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java index 96e21e0e05ff7..fbac1f1c52e95 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java @@ -32,7 +32,6 @@ package org.opensearch.rest.discovery; -import org.apache.http.HttpHost; import org.opensearch.OpenSearchNetty4IntegTestCase; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.client.Client; @@ -49,9 +48,11 @@ import org.opensearch.http.HttpServerTransport; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; +import org.apache.hc.core5.http.HttpHost; import org.hamcrest.Matchers; import java.io.IOException; +import java.net.URISyntaxException; import java.util.Collections; import java.util.List; @@ -124,6 +125,8 @@ public Settings onNodeStopped(String nodeName) throws IOException { .get(); assertFalse(nodeName, clusterHealthResponse.isTimedOut()); return Settings.EMPTY; + } catch (final URISyntaxException ex) { + throw new IOException(ex); } finally { restClient.setNodes(allNodes); } diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java index e3fde75e5b551..fcc9ab295c6c7 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java @@ -413,18 +413,19 @@ protected void channelRead0(ChannelHandlerContext ctx, HttpMessage msg) throws E // If this handler is hit then no upgrade has been attempted and the client is just talking HTTP final ChannelPipeline pipeline = ctx.pipeline(); pipeline.addAfter(ctx.name(), "handler", getRequestHandler()); - pipeline.replace(this, "aggregator", aggregator); + pipeline.replace(this, "decoder_compress", new HttpContentDecompressor()); - ch.pipeline().addLast("decoder_compress", new HttpContentDecompressor()); - ch.pipeline().addLast("encoder", new HttpResponseEncoder()); + pipeline.addAfter("decoder_compress", "aggregator", aggregator); if (handlingSettings.isCompression()) { - ch.pipeline() - .addAfter("aggregator", "encoder_compress", new HttpContentCompressor(handlingSettings.getCompressionLevel())); + pipeline.addAfter( + "aggregator", + "encoder_compress", + new HttpContentCompressor(handlingSettings.getCompressionLevel()) + ); } - ch.pipeline().addBefore("handler", "request_creator", requestCreator); - ch.pipeline().addBefore("handler", "response_creator", responseCreator); - ch.pipeline() - .addBefore("handler", "pipelining", new Netty4HttpPipeliningHandler(logger, transport.pipeliningMaxEvents)); + pipeline.addBefore("handler", "request_creator", requestCreator); + pipeline.addBefore("handler", "response_creator", responseCreator); + pipeline.addBefore("handler", "pipelining", new Netty4HttpPipeliningHandler(logger, transport.pipeliningMaxEvents)); ctx.fireChannelRead(ReferenceCountUtil.retain(msg)); } diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.0.jar.sha1 deleted file mode 100644 index 0038e3153b139..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -aa0f250558375922f3091820361156e514fe1842 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.5.0-snapshot-a4ef70f.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..a49a0749a9e4a --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +a7c38619d8f2cc48f792e007aa25b430f4f25698 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.0.jar.sha1 deleted file mode 100644 index ec8c78a580543..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -32eb1ad367ab1289804aeed95ea7216711a7764d \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.5.0-snapshot-a4ef70f.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..709bcf84faf06 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +6243383e5fbcf87551ded4c1b48b69a4276bb748 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.0.jar.sha1 deleted file mode 100644 index 438585ee3afa8..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -63661714be65f882a921d281965b0779fd487b90 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.5.0-snapshot-a4ef70f.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..0c4d7b7a2755c --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +91d1560bc927f1a431bb92e47fda9395d3b3e551 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.0.jar.sha1 deleted file mode 100644 index 019a98dc594b3..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1034d876551fc21f7835b456dab01db21b9a4af6 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.5.0-snapshot-a4ef70f.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..82524cbdb4ada --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +26bbfd1a796d62006dff9c7e32d31a0397a8025e \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.0.jar.sha1 deleted file mode 100644 index cc8e31b7a248f..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f704ee4b14e2fe2622bb983f04b36a32df8fd4a7 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.5.0-snapshot-a4ef70f.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..af6b600d22090 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +a1a26c04e24d9a8573e6bd9a0bacad184821dd33 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.0.jar.sha1 deleted file mode 100644 index 25e115b84308d..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a95ff17b51da6b3da641fa4053e5ee9ea2ff5daf \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.5.0-snapshot-a4ef70f.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..ea5680869c187 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +19aa9eff0e0671fd91eb435a2e2fa29dec52cf5c \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.0.jar.sha1 deleted file mode 100644 index 7cc4a7131f866..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -13e1ae2c760d8c0d7990ffe3296e46d9d8e6f842 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.5.0-snapshot-a4ef70f.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..4f81941a1746e --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +05ff979dfe3ded901ccd72d5a5d66349286c44bf \ No newline at end of file diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index 8ca9491f834a6..c88d19f0e2806 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -59,7 +59,7 @@ dependencies { api "com.sun.jersey:jersey-client:${versions.jersey}" api "com.sun.jersey:jersey-core:${versions.jersey}" api "com.sun.jersey:jersey-json:${versions.jersey}" - api 'org.codehaus.jettison:jettison:1.5.1' + api "org.codehaus.jettison:jettison:${versions.jettison}" api 'com.sun.xml.bind:jaxb-impl:2.2.3-1' // HACK: javax.xml.bind was removed from default modules in java 9, so we pull the api in here, diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.13.4.2.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.13.4.2.jar.sha1 new file mode 100644 index 0000000000000..a7782e8aac18d --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-databind-2.13.4.2.jar.sha1 @@ -0,0 +1 @@ +325c06bdfeb628cfb80ebaaf1a26cc1eb558a585 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.13.4.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.13.4.jar.sha1 deleted file mode 100644 index fcc6491d1f78d..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-databind-2.13.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -98b0edfa8e4084078f10b7b356c300ded4a71491 \ No newline at end of file diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 8f952f7619ac1..7bf67769cda10 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -38,7 +38,7 @@ opensearchplugin { } versions << [ - 'tika' : '2.4.0', + 'tika' : '2.5.0', 'pdfbox': '2.0.25', 'poi' : '5.2.2', 'mime4j': '0.8.3' diff --git a/plugins/ingest-attachment/licenses/tika-core-2.4.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-core-2.4.0.jar.sha1 deleted file mode 100644 index 373b7ec63138a..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-core-2.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -97b2454943127857a8304319be658d6d7ff4fff1 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-core-2.5.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-core-2.5.0.jar.sha1 new file mode 100644 index 0000000000000..419f01c631375 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-core-2.5.0.jar.sha1 @@ -0,0 +1 @@ +7f9f35e4827726b062ac2b0ad0fd361837a50ac9 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.4.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.4.0.jar.sha1 deleted file mode 100644 index cf724f4ee1de4..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -57901d6088b0e34999e25af6b363ccec959b5e61 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.5.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.5.0.jar.sha1 new file mode 100644 index 0000000000000..a9e47ff8a8a86 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.5.0.jar.sha1 @@ -0,0 +1 @@ +649574dca8f19d991ac25894c40284446dc5cf50 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.4.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.4.0.jar.sha1 deleted file mode 100644 index ec03a055a6f6d..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -83522360364a93e819eaec74f393bc56ed1d466a \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.5.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.5.0.jar.sha1 new file mode 100644 index 0000000000000..d648183868034 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.5.0.jar.sha1 @@ -0,0 +1 @@ +2b9268511c34d8a1098f0565438cb8077fcf845d \ No newline at end of file diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 8ca151d1e90db..063851b3a7edf 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -44,7 +44,7 @@ opensearchplugin { } dependencies { - api 'com.azure:azure-core:1.31.0' + api 'com.azure:azure-core:1.33.0' api 'com.azure:azure-storage-common:12.18.1' api 'com.azure:azure-core-http-netty:1.12.4' api "io.netty:netty-codec-dns:${versions.netty}" @@ -58,8 +58,8 @@ dependencies { api 'org.reactivestreams:reactive-streams:1.0.4' api 'io.projectreactor:reactor-core:3.4.23' api 'io.projectreactor.netty:reactor-netty:1.0.18' - api 'io.projectreactor.netty:reactor-netty-core:1.0.22' - api 'io.projectreactor.netty:reactor-netty-http:1.0.23' + api 'io.projectreactor.netty:reactor-netty-core:1.0.24' + api 'io.projectreactor.netty:reactor-netty-http:1.0.24' api "org.slf4j:slf4j-api:${versions.slf4j}" api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" @@ -67,7 +67,7 @@ dependencies { api "com.fasterxml.jackson.dataformat:jackson-dataformat-xml:${versions.jackson}" api "com.fasterxml.jackson.module:jackson-module-jaxb-annotations:${versions.jackson}" api 'org.codehaus.woodstox:stax2-api:4.2.1' - implementation 'com.fasterxml.woodstox:woodstox-core:6.2.8' + implementation "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" runtimeOnly 'com.google.guava:guava:31.1-jre' api 'org.apache.commons:commons-lang3:3.12.0' testImplementation project(':test:fixtures:azure-fixture') diff --git a/plugins/repository-azure/licenses/azure-core-1.31.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.31.0.jar.sha1 deleted file mode 100644 index 6a5076b3da301..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -39f18dae02237f90f1cd23b56701d7f9d9525531 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-1.33.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.33.0.jar.sha1 new file mode 100644 index 0000000000000..9077fc4ebf84b --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-1.33.0.jar.sha1 @@ -0,0 +1 @@ +93f105c2e923f0ab90521cc0e6e729b9c8304ad8 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.13.4.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.13.4.2.jar.sha1 new file mode 100644 index 0000000000000..a7782e8aac18d --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-databind-2.13.4.2.jar.sha1 @@ -0,0 +1 @@ +325c06bdfeb628cfb80ebaaf1a26cc1eb558a585 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.13.4.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.13.4.jar.sha1 deleted file mode 100644 index fcc6491d1f78d..0000000000000 --- a/plugins/repository-azure/licenses/jackson-databind-2.13.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -98b0edfa8e4084078f10b7b356c300ded4a71491 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.79.Final.jar.sha1 deleted file mode 100644 index a1753b194ea31..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c19c46f9529791964f636c93cfaca0556f0d5d0 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.84.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..f27ecd081f65d --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +48ce1da1bc12b830f6ffcdc5f0329639eb11e2fb \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 deleted file mode 100644 index f2989024cfce1..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0eeffab0cd5efb699d5e4ab9b694d32fef6694b3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.84.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..5fe8c5420cd74 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +5a0178b9689493fd612cd40481034469f4bd14cc \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.79.Final.jar.sha1 deleted file mode 100644 index 913f0e7685c86..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -794a5937cdb1871c4ae350610752dec2929dc1d6 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.84.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..1eef1b7841930 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +8cef741b42de5a1b21a8313fffcf2b518138c00b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 deleted file mode 100644 index dbb072f3f665f..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -054aace8683de7893cf28d4aab72cd60f49b5700 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.84.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..0c3ed9425f8b7 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +8d9f2282f4da2486eed7797bc8622437eda7ce65 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 deleted file mode 100644 index a5d1be00d9c29..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8eb9be9b6a66a03f5f4df67fe559cb676493d167 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.84.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..2835332c51158 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +3755d26967afca20b925c07d41e6ed3ec38c6822 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 deleted file mode 100644 index 7f984663dfa85..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -731937caec938b77b39df932a8da8aaca8d5ec05 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.84.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..6273c55f3acbd --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +e51601ddb88ee646a97ff04db38d45c22c29aee8 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.0.22.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.0.22.jar.sha1 deleted file mode 100644 index 4c82e37d27043..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-core-1.0.22.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5c2a258ac71e525c65f2e3a0bcf458b6c79bbc16 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.0.24.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.0.24.jar.sha1 new file mode 100644 index 0000000000000..3f5c8670c6c00 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-core-1.0.24.jar.sha1 @@ -0,0 +1 @@ +feaecb39237170aafb23935e9b383e8dda281379 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.0.23.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.0.23.jar.sha1 deleted file mode 100644 index 0b26b80fe3915..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-http-1.0.23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -63932f2b675f451135986b3723a12d45e818b170 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.0.24.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.0.24.jar.sha1 new file mode 100644 index 0000000000000..aa0ca72e38cd0 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-http-1.0.24.jar.sha1 @@ -0,0 +1 @@ +2fac480a17f752335318f103ab91427bdfb7716a \ No newline at end of file diff --git a/plugins/repository-azure/licenses/woodstox-core-6.2.8.jar.sha1 b/plugins/repository-azure/licenses/woodstox-core-6.2.8.jar.sha1 deleted file mode 100644 index ae65cdebf26de..0000000000000 --- a/plugins/repository-azure/licenses/woodstox-core-6.2.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -670748292899c53b1963730d9eb7f8ab71314e90 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/woodstox-core-6.4.0.jar.sha1 b/plugins/repository-azure/licenses/woodstox-core-6.4.0.jar.sha1 new file mode 100644 index 0000000000000..cac5f37205956 --- /dev/null +++ b/plugins/repository-azure/licenses/woodstox-core-6.4.0.jar.sha1 @@ -0,0 +1 @@ +c47579857bbf12c85499f431d4ecf27d77976b7c \ No newline at end of file diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java index e8417f9ceaf2c..9ebebc5b25a3e 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java @@ -40,8 +40,6 @@ import fixture.azure.AzureHttpHandler; import reactor.core.scheduler.Schedulers; -import org.apache.http.HttpStatus; - import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.Strings; import org.opensearch.common.SuppressForbidden; @@ -63,7 +61,6 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; - import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -486,28 +483,4 @@ private static Optional getRangeEnd(HttpExchange exchange) { return Optional.of(Math.toIntExact(rangeEnd)); } - private static void sendIncompleteContent(HttpExchange exchange, byte[] bytes) throws IOException { - final int rangeStart = getRangeStart(exchange); - assertThat(rangeStart, lessThan(bytes.length)); - final Optional rangeEnd = getRangeEnd(exchange); - final int length; - if (rangeEnd.isPresent()) { - // adapt range end to be compliant to https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 - final int effectiveRangeEnd = Math.min(rangeEnd.get(), bytes.length - 1); - length = effectiveRangeEnd - rangeStart; - } else { - length = bytes.length - rangeStart - 1; - } - exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); - exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(length)); - exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob"); - exchange.sendResponseHeaders(HttpStatus.SC_OK, length); - final int bytesToSend = randomIntBetween(0, length - 1); - if (bytesToSend > 0) { - exchange.getResponseBody().write(bytes, rangeStart, bytesToSend); - } - if (randomBoolean()) { - exchange.getResponseBody().flush(); - } - } } diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 097e96fcd8fdc..05e879547a4b0 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -66,7 +66,7 @@ dependencies { api 'com.google.api:gax:2.17.0' api 'org.threeten:threetenbp:1.4.4' api 'com.google.protobuf:protobuf-java-util:3.20.0' - api 'com.google.protobuf:protobuf-java:3.19.3' + api 'com.google.protobuf:protobuf-java:3.21.7' api 'com.google.code.gson:gson:2.9.0' api 'com.google.api.grpc:proto-google-common-protos:2.8.0' api 'com.google.api.grpc:proto-google-iam-v1:0.12.0' diff --git a/plugins/repository-gcs/licenses/protobuf-java-3.19.3.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-3.19.3.jar.sha1 deleted file mode 100644 index 655ecd1f1c1c9..0000000000000 --- a/plugins/repository-gcs/licenses/protobuf-java-3.19.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4b57f1b1b9e281231c3fcfc039ce3021e29ff570 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/protobuf-java-3.21.7.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-3.21.7.jar.sha1 new file mode 100644 index 0000000000000..faa673a23ef41 --- /dev/null +++ b/plugins/repository-gcs/licenses/protobuf-java-3.21.7.jar.sha1 @@ -0,0 +1 @@ +96cfc7147192f1de72c3d7d06972155ffb7d180c \ No newline at end of file diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java index 72d3e37466d09..5448799e7f81b 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java @@ -42,7 +42,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; import org.opensearch.SpecialPermission; import org.opensearch.common.SuppressForbidden; import org.opensearch.core.internal.io.IOUtils; @@ -61,7 +60,7 @@ /** * Wrapper around reads from GCS that will retry blob downloads that fail part-way through, resuming from where the failure occurred. * This should be handled by the SDK but it isn't today. This should be revisited in the future (e.g. before removing - * the {@link LegacyESVersion#V_7_0_0} version constant) and removed if the SDK handles retries itself in the future. + * the {@code LegacyESVersion#V_7_0_0} version constant) and removed if the SDK handles retries itself in the future. */ class GoogleCloudStorageRetryingInputStream extends InputStream { diff --git a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java index 616a1ae9feb4f..6850b204e0112 100644 --- a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java +++ b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java @@ -37,8 +37,8 @@ import com.google.cloud.storage.StorageOptions; import com.sun.net.httpserver.HttpHandler; import fixture.gcs.FakeOAuth2HttpHandler; -import org.apache.http.HttpStatus; +import org.apache.hc.core5.http.HttpStatus; import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.SuppressForbidden; diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 6fd91f78a63e6..f6363f24ff783 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -65,11 +65,11 @@ dependencies { } api 'org.apache.htrace:htrace-core4:4.2.0-incubating' api "org.apache.logging.log4j:log4j-core:${versions.log4j}" - api 'org.apache.avro:avro:1.11.0' + api 'org.apache.avro:avro:1.11.1' api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api 'com.google.code.gson:gson:2.9.0' runtimeOnly 'com.google.guava:guava:31.1-jre' - api 'com.google.protobuf:protobuf-java:3.21.4' + api 'com.google.protobuf:protobuf-java:3.21.7' api "commons-logging:commons-logging:${versions.commonslogging}" api 'commons-cli:commons-cli:1.5.0' api "commons-codec:commons-codec:${versions.commonscodec}" @@ -85,7 +85,7 @@ dependencies { api 'net.minidev:json-smart:2.4.8' api 'org.apache.zookeeper:zookeeper:3.8.0' api "io.netty:netty-all:${versions.netty}" - implementation 'com.fasterxml.woodstox:woodstox-core:6.3.0' + implementation "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" implementation 'org.codehaus.woodstox:stax2-api:4.2.1' hdfsFixture project(':test:fixtures:hdfs-fixture') diff --git a/plugins/repository-hdfs/licenses/avro-1.11.0.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.11.0.jar.sha1 deleted file mode 100644 index 9a0601879a1fc..0000000000000 --- a/plugins/repository-hdfs/licenses/avro-1.11.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2b0c58e5b450d4f4931456952ad9520cae9c896c \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/avro-1.11.1.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.11.1.jar.sha1 new file mode 100644 index 0000000000000..f03424516b44e --- /dev/null +++ b/plugins/repository-hdfs/licenses/avro-1.11.1.jar.sha1 @@ -0,0 +1 @@ +81af5d4b9bdaaf4ba41bcb0df5241355ec34c630 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/jackson-databind-2.13.4.2.jar.sha1 b/plugins/repository-hdfs/licenses/jackson-databind-2.13.4.2.jar.sha1 new file mode 100644 index 0000000000000..a7782e8aac18d --- /dev/null +++ b/plugins/repository-hdfs/licenses/jackson-databind-2.13.4.2.jar.sha1 @@ -0,0 +1 @@ +325c06bdfeb628cfb80ebaaf1a26cc1eb558a585 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/jackson-databind-2.13.4.jar.sha1 b/plugins/repository-hdfs/licenses/jackson-databind-2.13.4.jar.sha1 deleted file mode 100644 index fcc6491d1f78d..0000000000000 --- a/plugins/repository-hdfs/licenses/jackson-databind-2.13.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -98b0edfa8e4084078f10b7b356c300ded4a71491 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.79.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.79.Final.jar.sha1 deleted file mode 100644 index 724950db96f09..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1c53cffaa14d61de523b167377843e35807292a7 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.84.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..14003104a623f --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +f1a994d19e9971ba6f1b8abf4ebf912a21cec983 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.21.4.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.21.4.jar.sha1 deleted file mode 100644 index f232c9a449547..0000000000000 --- a/plugins/repository-hdfs/licenses/protobuf-java-3.21.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9947febd7a6d0695726c78f603a149b7b7c108e0 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.21.7.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.21.7.jar.sha1 new file mode 100644 index 0000000000000..faa673a23ef41 --- /dev/null +++ b/plugins/repository-hdfs/licenses/protobuf-java-3.21.7.jar.sha1 @@ -0,0 +1 @@ +96cfc7147192f1de72c3d7d06972155ffb7d180c \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/woodstox-core-6.3.0.jar.sha1 b/plugins/repository-hdfs/licenses/woodstox-core-6.3.0.jar.sha1 deleted file mode 100644 index ebd85df98b39e..0000000000000 --- a/plugins/repository-hdfs/licenses/woodstox-core-6.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -03c1df4164b107ee22ad4f24bd453ec78a0efd95 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/woodstox-core-6.4.0.jar.sha1 b/plugins/repository-hdfs/licenses/woodstox-core-6.4.0.jar.sha1 new file mode 100644 index 0000000000000..cac5f37205956 --- /dev/null +++ b/plugins/repository-hdfs/licenses/woodstox-core-6.4.0.jar.sha1 @@ -0,0 +1 @@ +c47579857bbf12c85499f431d4ecf27d77976b7c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.13.4.2.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.13.4.2.jar.sha1 new file mode 100644 index 0000000000000..a7782e8aac18d --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-databind-2.13.4.2.jar.sha1 @@ -0,0 +1 @@ +325c06bdfeb628cfb80ebaaf1a26cc1eb558a585 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.13.4.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.13.4.jar.sha1 deleted file mode 100644 index fcc6491d1f78d..0000000000000 --- a/plugins/repository-s3/licenses/jackson-databind-2.13.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -98b0edfa8e4084078f10b7b356c300ded4a71491 \ No newline at end of file diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java index b35d4080a413a..3c3f2887469b3 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -36,8 +36,6 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import fixture.s3.S3HttpHandler; -import org.opensearch.action.ActionRunnable; -import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; @@ -45,29 +43,21 @@ import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; -import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.NamedXContentRegistry; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.plugins.Plugin; -import org.opensearch.repositories.RepositoriesService; -import org.opensearch.repositories.RepositoryData; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.OpenSearchMockAPIBasedRepositoryIntegTestCase; -import org.opensearch.snapshots.SnapshotId; -import org.opensearch.snapshots.SnapshotsService; import org.opensearch.snapshots.mockstore.BlobStoreWrapper; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -75,8 +65,6 @@ import java.util.Map; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.startsWith; @SuppressForbidden(reason = "this test uses a HttpServer to emulate an S3 endpoint") @@ -84,8 +72,6 @@ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) public class S3BlobStoreRepositoryTests extends OpenSearchMockAPIBasedRepositoryIntegTestCase { - private static final TimeValue TEST_COOLDOWN_PERIOD = TimeValue.timeValueSeconds(10L); - private String region; private String signerOverride; @@ -158,56 +144,6 @@ protected Settings nodeSettings(int nodeOrdinal) { return builder.build(); } - public void testEnforcedCooldownPeriod() throws IOException { - final String repoName = createRepository( - randomName(), - Settings.builder().put(repositorySettings()).put(S3Repository.COOLDOWN_PERIOD.getKey(), TEST_COOLDOWN_PERIOD).build() - ); - - final SnapshotId fakeOldSnapshot = client().admin() - .cluster() - .prepareCreateSnapshot(repoName, "snapshot-old") - .setWaitForCompletion(true) - .setIndices() - .get() - .getSnapshotInfo() - .snapshotId(); - final RepositoriesService repositoriesService = internalCluster().getCurrentClusterManagerNodeInstance(RepositoriesService.class); - final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repoName); - final RepositoryData repositoryData = getRepositoryData(repository); - final RepositoryData modifiedRepositoryData = repositoryData.withVersions( - Collections.singletonMap(fakeOldSnapshot, SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION.minimumCompatibilityVersion()) - ); - final BytesReference serialized = BytesReference.bytes( - modifiedRepositoryData.snapshotsToXContent(XContentFactory.jsonBuilder(), SnapshotsService.OLD_SNAPSHOT_FORMAT) - ); - PlainActionFuture.get(f -> repository.threadPool().generic().execute(ActionRunnable.run(f, () -> { - try (InputStream stream = serialized.streamInput()) { - repository.blobStore() - .blobContainer(repository.basePath()) - .writeBlobAtomic( - BlobStoreRepository.INDEX_FILE_PREFIX + modifiedRepositoryData.getGenId(), - stream, - serialized.length(), - true - ); - } - }))); - - final String newSnapshotName = "snapshot-new"; - final long beforeThrottledSnapshot = repository.threadPool().relativeTimeInNanos(); - client().admin().cluster().prepareCreateSnapshot(repoName, newSnapshotName).setWaitForCompletion(true).setIndices().get(); - assertThat(repository.threadPool().relativeTimeInNanos() - beforeThrottledSnapshot, greaterThan(TEST_COOLDOWN_PERIOD.getNanos())); - - final long beforeThrottledDelete = repository.threadPool().relativeTimeInNanos(); - client().admin().cluster().prepareDeleteSnapshot(repoName, newSnapshotName).get(); - assertThat(repository.threadPool().relativeTimeInNanos() - beforeThrottledDelete, greaterThan(TEST_COOLDOWN_PERIOD.getNanos())); - - final long beforeFastDelete = repository.threadPool().relativeTimeInNanos(); - client().admin().cluster().prepareDeleteSnapshot(repoName, fakeOldSnapshot.getName()).get(); - assertThat(repository.threadPool().relativeTimeInNanos() - beforeFastDelete, lessThan(TEST_COOLDOWN_PERIOD.getNanos())); - } - /** * S3RepositoryPlugin that allows to disable chunked encoding and to set a low threshold between single upload and multipart upload. */ diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java index c8377949a6842..f80e5743edbc0 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java @@ -35,10 +35,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionListener; -import org.opensearch.action.ActionRunnable; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.RepositoryMetadata; @@ -52,7 +50,6 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.NamedXContentRegistry; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.monitor.jvm.JvmInfo; @@ -62,13 +59,10 @@ import org.opensearch.repositories.blobstore.MeteredBlobStoreRepository; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotInfo; -import org.opensearch.snapshots.SnapshotsService; import org.opensearch.threadpool.Scheduler; -import org.opensearch.threadpool.ThreadPool; import java.util.Collection; import java.util.Map; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; @@ -182,24 +176,6 @@ class S3Repository extends MeteredBlobStoreRepository { static final Setting CLIENT_NAME = new Setting<>("client", "default", Function.identity()); - /** - * Artificial delay to introduce after a snapshot finalization or delete has finished so long as the repository is still using the - * backwards compatible snapshot format from before - * {@link org.opensearch.snapshots.SnapshotsService#SHARD_GEN_IN_REPO_DATA_VERSION} ({@link LegacyESVersion#V_7_6_0}). - * This delay is necessary so that the eventually consistent nature of AWS S3 does not randomly result in repository corruption when - * doing repository operations in rapid succession on a repository in the old metadata format. - * This setting should not be adjusted in production when working with an AWS S3 backed repository. Doing so risks the repository - * becoming silently corrupted. To get rid of this waiting period, either create a new S3 repository or remove all snapshots older than - * {@link LegacyESVersion#V_7_6_0} from the repository which will trigger an upgrade of the repository metadata to the new - * format and disable the cooldown period. - */ - static final Setting COOLDOWN_PERIOD = Setting.timeSetting( - "cooldown_period", - new TimeValue(3, TimeUnit.MINUTES), - new TimeValue(0, TimeUnit.MILLISECONDS), - Setting.Property.Dynamic - ); - /** * Specifies the path within bucket to repository data. Defaults to root directory. */ @@ -223,12 +199,6 @@ class S3Repository extends MeteredBlobStoreRepository { private final RepositoryMetadata repositoryMetadata; - /** - * Time period to delay repository operations by after finalizing or deleting a snapshot. - * See {@link #COOLDOWN_PERIOD} for details. - */ - private final TimeValue coolDown; - /** * Constructs an s3 backed repository */ @@ -296,8 +266,6 @@ class S3Repository extends MeteredBlobStoreRepository { ); } - coolDown = COOLDOWN_PERIOD.get(metadata.settings()); - logger.debug( "using bucket [{}], chunk_size [{}], server_side_encryption [{}], buffer_size [{}], cannedACL [{}], storageClass [{}]", bucket, @@ -334,9 +302,6 @@ public void finalizeSnapshot( Function stateTransformer, ActionListener listener ) { - if (SnapshotsService.useShardGenerations(repositoryMetaVersion) == false) { - listener = delayedListener(listener); - } super.finalizeSnapshot( shardGenerations, repositoryStateId, @@ -355,59 +320,9 @@ public void deleteSnapshots( Version repositoryMetaVersion, ActionListener listener ) { - if (SnapshotsService.useShardGenerations(repositoryMetaVersion) == false) { - listener = delayedListener(listener); - } super.deleteSnapshots(snapshotIds, repositoryStateId, repositoryMetaVersion, listener); } - /** - * Wraps given listener such that it is executed with a delay of {@link #coolDown} on the snapshot thread-pool after being invoked. - * See {@link #COOLDOWN_PERIOD} for details. - */ - private ActionListener delayedListener(ActionListener listener) { - final ActionListener wrappedListener = ActionListener.runBefore(listener, () -> { - final Scheduler.Cancellable cancellable = finalizationFuture.getAndSet(null); - assert cancellable != null; - }); - return new ActionListener() { - @Override - public void onResponse(T response) { - logCooldownInfo(); - final Scheduler.Cancellable existing = finalizationFuture.getAndSet( - threadPool.schedule( - ActionRunnable.wrap(wrappedListener, l -> l.onResponse(response)), - coolDown, - ThreadPool.Names.SNAPSHOT - ) - ); - assert existing == null : "Already have an ongoing finalization " + finalizationFuture; - } - - @Override - public void onFailure(Exception e) { - logCooldownInfo(); - final Scheduler.Cancellable existing = finalizationFuture.getAndSet( - threadPool.schedule(ActionRunnable.wrap(wrappedListener, l -> l.onFailure(e)), coolDown, ThreadPool.Names.SNAPSHOT) - ); - assert existing == null : "Already have an ongoing finalization " + finalizationFuture; - } - }; - } - - private void logCooldownInfo() { - logger.info( - "Sleeping for [{}] after modifying repository [{}] because it contains snapshots older than version [{}]" - + " and therefore is using a backwards compatible metadata format that requires this cooldown period to avoid " - + "repository corruption. To get rid of this message and move to the new repository metadata format, either remove " - + "all snapshots older than version [{}] from the repository or create a new repository at an empty location.", - coolDown, - metadata.name(), - SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION, - SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION - ); - } - @Override protected S3BlobStore createBlobStore() { return new S3BlobStore(service, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass, repositoryMetadata); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java index 388f5b8d74a2b..f751d63232f79 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java @@ -40,7 +40,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; import org.opensearch.core.internal.io.IOUtils; import java.io.IOException; @@ -52,7 +51,7 @@ /** * Wrapper around an S3 object that will retry the {@link GetObjectRequest} if the download fails part-way through, resuming from where * the failure occurred. This should be handled by the SDK but it isn't today. This should be revisited in the future (e.g. before removing - * the {@link LegacyESVersion#V_7_0_0} version constant) and removed when the SDK handles retries itself. + * the {@code LegacyESVersion#V_7_0_0} version constant) and removed when the SDK handles retries itself. * * See https://github.com/aws/aws-sdk-java/issues/856 for the related SDK issue */ diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index a7e8c42a4e2d3..c5b401de60c8c 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -83,6 +83,12 @@ thirdPartyAudit { 'org.bouncycastle.cert.X509v3CertificateBuilder', 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', 'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder', + 'org.bouncycastle.openssl.PEMEncryptedKeyPair', + 'org.bouncycastle.openssl.PEMParser', + 'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter', + 'org.bouncycastle.openssl.jcajce.JceOpenSSLPKCS8DecryptorProviderBuilder', + 'org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder', + 'org.bouncycastle.pkcs.PKCS8EncryptedPrivateKeyInfo', // from io.netty.handler.ssl.JettyNpnSslEngine (netty) 'org.eclipse.jetty.npn.NextProtoNego$ClientProvider', diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.79.Final.jar.sha1 deleted file mode 100644 index 8e9e4d0b7f754..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c014412b599489b1db27c6bc08d8a46da94e397 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.84.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..25a6f9ecf50b6 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +a6b8cf57cfffc28d8e33f8175788a99401f576d9 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.79.Final.jar.sha1 deleted file mode 100644 index c0920231d79a8..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -18f5b02af7ca611978bc28f2cb58cbb3b9b0f0ef \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.84.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..032a8f1ed954e --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +4f60f56c4cd17db114f01dc64aa465a2905240f5 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.79.Final.jar.sha1 deleted file mode 100644 index a3f650da5abbd..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -882c70bc0a30a98bf3ce477f043e967ac026044c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.84.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..1e985edfce65e --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +78628e78087d3da6c3d2b22aa67798d3f3adcd68 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.79.Final.jar.sha1 deleted file mode 100644 index faa7b099406a3..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2814bd465731355323aba0fdd22163bfce638a75 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.84.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..beaa2cce654c3 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +90c84ec7f1108ae164810cf46694a5ec7ce738fc \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.79.Final.jar.sha1 deleted file mode 100644 index 8e314f164da69..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2dc22423c8ed19906615fb936a5fcb7db14a4e6c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.84.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..afd28b451ba12 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +69cd93e2c321f04fc0a18eb5af519565373d0873 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.79.Final.jar.sha1 deleted file mode 100644 index af550935bb911..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -55ecb1ff4464b56564a90824a741c3911264aaa4 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.84.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..07aa37fc76524 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +b6f808e331cf843d2a7ff62042cf9b5343e2ff25 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.79.Final.jar.sha1 deleted file mode 100644 index c6e18efb3ad3d..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6cc2b49749b4fbcc39c687027e04e65e857552a9 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.84.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.84.Final.jar.sha1 new file mode 100644 index 0000000000000..5e12ada3f5c10 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.84.Final.jar.sha1 @@ -0,0 +1 @@ +acd9947d0a951b1f7021c7adc393df936b1ecbf0 \ No newline at end of file diff --git a/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java b/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java index 5691154882c9f..ebed71d90df9a 100644 --- a/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java +++ b/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java @@ -32,9 +32,9 @@ package org.opensearch.search; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NStringEntity; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.apache.lucene.search.TotalHits; import org.opensearch.OpenSearchException; import org.opensearch.Version; @@ -343,7 +343,7 @@ private static HttpEntity buildUpdateSettingsRequestBody(Map set builder.endObject(); requestBody = Strings.toString(builder); } - return new NStringEntity(requestBody, ContentType.APPLICATION_JSON); + return new StringEntity(requestBody, ContentType.APPLICATION_JSON); } private static class HighLevelClient extends RestHighLevelClient { diff --git a/qa/die-with-dignity/src/javaRestTest/java/org/opensearch/qa/die_with_dignity/DieWithDignityIT.java b/qa/die-with-dignity/src/javaRestTest/java/org/opensearch/qa/die_with_dignity/DieWithDignityIT.java index ec891ef8d44ef..aedb05e8dcbd5 100644 --- a/qa/die-with-dignity/src/javaRestTest/java/org/opensearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/qa/die-with-dignity/src/javaRestTest/java/org/opensearch/qa/die_with_dignity/DieWithDignityIT.java @@ -98,15 +98,6 @@ public void testDieWithDignity() throws Exception { } } - private boolean containsAll(String line, String... subStrings) { - for (String subString : subStrings) { - if (line.matches(subString) == false) { - return false; - } - } - return true; - } - private void debugLogs(Path path) throws IOException { try (BufferedReader reader = Files.newBufferedReader(path)) { reader.lines().forEach(line -> logger.info(line)); diff --git a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java index 714d8a252579f..48b2460f61ff8 100644 --- a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java @@ -32,8 +32,6 @@ package org.opensearch.upgrades; -import org.apache.http.util.EntityUtils; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.client.Request; import org.opensearch.client.Response; @@ -53,6 +51,8 @@ import org.opensearch.test.XContentTestUtils; import org.opensearch.test.rest.OpenSearchRestTestCase; import org.opensearch.test.rest.yaml.ObjectPath; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.junit.Before; import java.io.IOException; @@ -71,7 +71,6 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; -import static org.opensearch.cluster.metadata.IndexNameExpressionResolver.SYSTEM_INDEX_ENFORCEMENT_VERSION; import static org.opensearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; import static org.opensearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; @@ -286,7 +285,7 @@ public void testClusterState() throws Exception { } - public void testShrink() throws IOException { + public void testShrink() throws IOException, NumberFormatException, ParseException { String shrunkenIndex = index + "_shrunk"; int numDocs; if (isRunningAgainstOldCluster()) { @@ -329,9 +328,6 @@ public void testShrink() throws IOException { client().performRequest(updateSettingsRequest); Request shrinkIndexRequest = new Request("PUT", "/" + index + "/_shrink/" + shrunkenIndex); - if (getOldClusterVersion().before(LegacyESVersion.V_7_0_0)) { - shrinkIndexRequest.addParameter("copy_settings", "true"); - } shrinkIndexRequest.setJsonEntity("{\"settings\": {\"index.number_of_shards\": 1}}"); client().performRequest(shrinkIndexRequest); @@ -359,7 +355,7 @@ public void testShrink() throws IOException { assertEquals(numDocs, totalHits); } - public void testShrinkAfterUpgrade() throws IOException { + public void testShrinkAfterUpgrade() throws IOException, ParseException { String shrunkenIndex = index + "_shrunk"; int numDocs; if (isRunningAgainstOldCluster()) { @@ -447,7 +443,7 @@ public void testShrinkAfterUpgrade() throws IOException { *
  • Make sure the document count is correct * */ - public void testRollover() throws IOException { + public void testRollover() throws IOException, ParseException { if (isRunningAgainstOldCluster()) { Request createIndex = new Request("PUT", "/" + index + "-000001"); createIndex.setJsonEntity("{" @@ -529,7 +525,7 @@ void assertBasicSearchWorks(int count) throws IOException { } } - void assertAllSearchWorks(int count) throws IOException { + void assertAllSearchWorks(int count) throws IOException, ParseException { logger.info("--> testing _all search"); Map response = entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search"))); assertNoFailures(response); @@ -626,14 +622,14 @@ void assertStoredBinaryFields(int count) throws Exception { } } - static String toStr(Response response) throws IOException { + static String toStr(Response response) throws IOException, ParseException { return EntityUtils.toString(response.getEntity()); } /** * Tests that a single document survives. Super basic smoke test. */ - public void testSingleDoc() throws IOException { + public void testSingleDoc() throws IOException, ParseException { String docLocation = "/" + index + "/" + type + "/1"; String doc = "{\"test\": \"test\"}"; @@ -795,7 +791,7 @@ public void testRecovery() throws Exception { * old and new versions. All of the snapshots include an index, a template, * and some routing configuration. */ - public void testSnapshotRestore() throws IOException { + public void testSnapshotRestore() throws IOException, ParseException { int count; if (isRunningAgainstOldCluster()) { // Create the index @@ -1006,12 +1002,8 @@ public void testClosedIndices() throws Exception { closeIndex(index); } - if (getOldClusterVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - ensureGreenLongWait(index); - assertClosedIndex(index, true); - } else { - assertClosedIndex(index, false); - } + ensureGreenLongWait(index); + assertClosedIndex(index, true); if (isRunningAgainstOldCluster() == false) { openIndex(index); @@ -1067,7 +1059,7 @@ private void assertClosedIndex(final String index, final boolean checkRoutingTab } } - private void checkSnapshot(final String snapshotName, final int count, final Version tookOnVersion) throws IOException { + private void checkSnapshot(final String snapshotName, final int count, final Version tookOnVersion) throws IOException, ParseException { // Check the snapshot metadata, especially the version Request listSnapshotRequest = new Request("GET", "/_snapshot/repo/" + snapshotName); Map listSnapshotResponse = entityAsMap(client().performRequest(listSnapshotRequest)); @@ -1186,7 +1178,7 @@ private void indexDocument(String id) throws IOException { assertOK(client().performRequest(indexRequest)); } - private int countOfIndexedRandomDocuments() throws IOException { + private int countOfIndexedRandomDocuments() throws IOException, NumberFormatException, ParseException { return Integer.parseInt(loadInfoDocument(index + "_count")); } @@ -1201,7 +1193,7 @@ private void saveInfoDocument(String id, String value) throws IOException { client().performRequest(request); } - private String loadInfoDocument(String id) throws IOException { + private String loadInfoDocument(String id) throws IOException, ParseException { Request request = new Request("GET", "/info/_doc/" + id); request.addParameter("filter_path", "_source"); String doc = toStr(client().performRequest(request)); @@ -1253,7 +1245,7 @@ public void testPeerRecoveryRetentionLeases() throws Exception { settings.startObject("settings"); settings.field("number_of_shards", between(1, 5)); settings.field("number_of_replicas", between(0, 1)); - if (randomBoolean() || getOldClusterVersion().before(LegacyESVersion.V_7_0_0)) { + if (randomBoolean()) { // this is the default after v7.0.0, but is required before that settings.field("soft_deletes.enabled", true); } @@ -1436,10 +1428,6 @@ public void testSystemIndexMetadataIsUpgraded() throws Exception { // make sure .tasks index exists Request getTasksIndex = new Request("GET", "/.tasks"); getTasksIndex.addParameter("allow_no_indices", "false"); - if (getOldClusterVersion().before(LegacyESVersion.V_7_0_0)) { - getTasksIndex.addParameter("include_type_name", "false"); - } - getTasksIndex.setOptions(expectVersionSpecificWarnings(v -> { v.current(systemIndexWarning); v.compatible(systemIndexWarning); @@ -1451,20 +1439,6 @@ public void testSystemIndexMetadataIsUpgraded() throws Exception { throw new AssertionError(".tasks index does not exist yet"); } }); - - // If we are on 7.x create an alias that includes both a system index and a non-system index so we can be sure it gets - // upgraded properly. If we're already on 8.x, skip this part of the test. - if (minimumNodeVersion().before(SYSTEM_INDEX_ENFORCEMENT_VERSION)) { - // Create an alias to make sure it gets upgraded properly - Request putAliasRequest = new Request("POST", "/_aliases"); - putAliasRequest.setJsonEntity("{\n" + - " \"actions\": [\n" + - " {\"add\": {\"index\": \".tasks\", \"alias\": \"test-system-alias\"}},\n" + - " {\"add\": {\"index\": \"test_index_reindex\", \"alias\": \"test-system-alias\"}}\n" + - " ]\n" + - "}"); - assertThat(client().performRequest(putAliasRequest).getStatusLine().getStatusCode(), is(200)); - } } else { assertBusy(() -> { Request clusterStateRequest = new Request("GET", "/_cluster/state/metadata"); @@ -1479,21 +1453,8 @@ public void testSystemIndexMetadataIsUpgraded() throws Exception { XContentTestUtils.JsonMapView tasksIndex = new XContentTestUtils.JsonMapView((Map) indices.get(".tasks")); assertThat(tasksIndex.get("system"), is(true)); - // If .tasks was created in a 7.x version, it should have an alias on it that we need to make sure got upgraded properly. final String tasksCreatedVersionString = tasksIndex.get("settings.index.version.created"); assertThat(tasksCreatedVersionString, notNullValue()); - final Version tasksCreatedVersion = Version.fromId(Integer.parseInt(tasksCreatedVersionString)); - if (tasksCreatedVersion.before(SYSTEM_INDEX_ENFORCEMENT_VERSION)) { - // Verify that the alias survived the upgrade - Request getAliasRequest = new Request("GET", "/_alias/test-system-alias"); - getAliasRequest.setOptions(expectVersionSpecificWarnings(v -> { - v.current(systemIndexWarning); - v.compatible(systemIndexWarning); - })); - Map aliasResponse = entityAsMap(client().performRequest(getAliasRequest)); - assertThat(aliasResponse, hasKey(".tasks")); - assertThat(aliasResponse, hasKey("test_index_reindex")); - } }); } } diff --git a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java index de042cb2b7634..44ed426e13782 100644 --- a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java @@ -32,7 +32,8 @@ package org.opensearch.upgrades; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.LegacyESVersion; import org.opensearch.client.Request; import org.opensearch.client.Response; @@ -46,6 +47,7 @@ import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.ConstantScoreQueryBuilder; import org.opensearch.index.query.DisMaxQueryBuilder; @@ -157,7 +159,7 @@ private static void addCandidate(String querySource, QueryBuilder expectedQb) { } public void testQueryBuilderBWC() throws Exception { - final String type = getOldClusterVersion().before(LegacyESVersion.V_7_0_0) ? "doc" : "_doc"; + final String type = MapperService.SINGLE_MAPPING_NAME; String index = "queries"; if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); @@ -234,7 +236,7 @@ public void testQueryBuilderBWC() throws Exception { } } - private static Map toMap(Response response) throws IOException { + private static Map toMap(Response response) throws IOException, ParseException { return toMap(EntityUtils.toString(response.getEntity())); } diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index 7a2c37639b93e..90aeb8faadf80 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -38,6 +38,10 @@ apply plugin: 'opensearch.standalone-test' apply from : "$rootDir/gradle/bwc-test.gradle" apply plugin: 'opensearch.rest-resources' +dependencies { + testImplementation project(":client:rest-high-level") +} + restResources { restTests { includeCore '*' diff --git a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/ExceptionIT.java b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/ExceptionIT.java index f85a94cc9f556..35f530f22a141 100644 --- a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/ExceptionIT.java +++ b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/ExceptionIT.java @@ -8,7 +8,9 @@ package org.opensearch.backwards; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.HttpStatus; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.Version; import org.opensearch.client.Node; import org.opensearch.client.Request; @@ -21,8 +23,6 @@ import java.util.Collections; import java.util.Map; -import static org.apache.http.HttpStatus.SC_NOT_FOUND; - public class ExceptionIT extends OpenSearchRestTestCase { public void testOpensearchException() throws Exception { logClusterNodes(); @@ -38,13 +38,13 @@ public void testOpensearchException() throws Exception { } catch (ResponseException e) { logger.debug(e.getMessage()); Response response = e.getResponse(); - assertEquals(SC_NOT_FOUND, response.getStatusLine().getStatusCode()); + assertEquals(HttpStatus.SC_NOT_FOUND, response.getStatusLine().getStatusCode()); assertEquals("no_such_index", ObjectPath.createFromResponse(response).evaluate("error.index")); } } } - private void logClusterNodes() throws IOException { + private void logClusterNodes() throws IOException, ParseException { ObjectPath objectPath = ObjectPath.createFromResponse(client().performRequest(new Request("GET", "_nodes"))); Map nodes = objectPath.evaluate("nodes"); // As of 2.0, 'GET _cat/master' API is deprecated to promote inclusive language. diff --git a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java index 69c4f0110a3ff..4746ad35a9406 100644 --- a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java +++ b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java @@ -31,7 +31,7 @@ package org.opensearch.backwards; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.client.Request; @@ -50,6 +50,7 @@ import org.opensearch.test.rest.yaml.ObjectPath; import java.io.IOException; +import java.net.URISyntaxException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -416,7 +417,7 @@ private List buildShards(String index, Nodes nodes, RestClient client) th return shards; } - private Nodes buildNodeAndVersions() throws IOException { + private Nodes buildNodeAndVersions() throws IOException, URISyntaxException { Response response = client().performRequest(new Request("GET", "_nodes")); ObjectPath objectPath = ObjectPath.createFromResponse(response); Map nodesAsMap = objectPath.evaluate("nodes"); @@ -426,7 +427,7 @@ private Nodes buildNodeAndVersions() throws IOException { id, objectPath.evaluate("nodes." + id + ".name"), Version.fromString(objectPath.evaluate("nodes." + id + ".version")), - HttpHost.create(objectPath.evaluate("nodes." + id + ".http.publish_address")))); + HttpHost.create((String)objectPath.evaluate("nodes." + id + ".http.publish_address")))); } response = client().performRequest(new Request("GET", "_cluster/state")); nodes.setClusterManagerNodeId(ObjectPath.createFromResponse(response).evaluate("master_node")); diff --git a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/SearchingIT.java b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/SearchingIT.java new file mode 100644 index 0000000000000..839fe01ed81d6 --- /dev/null +++ b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/SearchingIT.java @@ -0,0 +1,78 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.backwards; + +import org.apache.hc.core5.http.HttpHost; +import org.opensearch.LegacyESVersion; +import org.opensearch.Version; +import org.opensearch.action.get.MultiGetRequest; +import org.opensearch.action.get.MultiGetResponse; +import org.opensearch.backwards.IndexingIT.Nodes; +import org.opensearch.client.Request; +import org.opensearch.client.RequestOptions; +import org.opensearch.client.Response; +import org.opensearch.client.ResponseException; +import org.opensearch.client.RestClient; +import org.opensearch.client.RestHighLevelClient; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.Strings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.index.seqno.SeqNoStats; +import org.opensearch.rest.RestStatus; +import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.opensearch.test.rest.yaml.ObjectPath; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class SearchingIT extends OpenSearchRestTestCase { + public void testMultiGet() throws Exception { + final Set nodes = buildNodes(); + + final MultiGetRequest multiGetRequest = new MultiGetRequest(); + multiGetRequest.add("index", "id1"); + + try (RestHighLevelClient client = new RestHighLevelClient(RestClient.builder(nodes.toArray(HttpHost[]::new)))) { + MultiGetResponse response = client.mget(multiGetRequest, RequestOptions.DEFAULT); + assertEquals(1, response.getResponses().length); + + assertTrue(response.getResponses()[0].isFailed()); + assertNotNull(response.getResponses()[0].getFailure()); + assertEquals(response.getResponses()[0].getFailure().getId(), "id1"); + assertEquals(response.getResponses()[0].getFailure().getIndex(), "index"); + assertThat(response.getResponses()[0].getFailure().getMessage(), containsString("no such index [index]")); + } + } + + private Set buildNodes() throws IOException, URISyntaxException { + Response response = client().performRequest(new Request("GET", "_nodes")); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + Map nodesAsMap = objectPath.evaluate("nodes"); + final Set nodes = new HashSet<>(); + for (String id : nodesAsMap.keySet()) { + nodes.add(HttpHost.create((String) objectPath.evaluate("nodes." + id + ".http.publish_address"))); + } + + return nodes; + } +} diff --git a/qa/os/build.gradle b/qa/os/build.gradle index 92c5e4f154ad8..9a1e6f781faec 100644 --- a/qa/os/build.gradle +++ b/qa/os/build.gradle @@ -42,6 +42,7 @@ dependencies { api "org.apache.httpcomponents:fluent-hc:${versions.httpclient}" api "org.apache.logging.log4j:log4j-api:${versions.log4j}" api "org.apache.logging.log4j:log4j-core:${versions.log4j}" + api "org.apache.logging.log4j:log4j-jul:${versions.log4j}" api "org.apache.logging.log4j:log4j-jcl:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" api "commons-logging:commons-logging:${versions.commonslogging}" diff --git a/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java b/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java index 61ccbab95850d..5f73144501f94 100644 --- a/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java +++ b/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java @@ -31,7 +31,7 @@ package org.opensearch.cluster.remote.test; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.junit.AfterClass; import org.junit.Before; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; @@ -104,8 +104,8 @@ private HighLevelClient(RestClient restClient) { private RestHighLevelClient buildClient(final String url) throws IOException { int portSeparator = url.lastIndexOf(':'); - HttpHost httpHost = new HttpHost(url.substring(0, portSeparator), - Integer.parseInt(url.substring(portSeparator + 1)), getProtocol()); + HttpHost httpHost = new HttpHost(getProtocol(), url.substring(0, portSeparator), + Integer.parseInt(url.substring(portSeparator + 1))); return new HighLevelClient(buildClient(restAdminSettings(), new HttpHost[]{httpHost})); } diff --git a/qa/repository-multi-version/src/test/java/org/opensearch/upgrades/MultiVersionRepositoryAccessIT.java b/qa/repository-multi-version/src/test/java/org/opensearch/upgrades/MultiVersionRepositoryAccessIT.java index c0b90626d7bad..49efce0516434 100644 --- a/qa/repository-multi-version/src/test/java/org/opensearch/upgrades/MultiVersionRepositoryAccessIT.java +++ b/qa/repository-multi-version/src/test/java/org/opensearch/upgrades/MultiVersionRepositoryAccessIT.java @@ -32,7 +32,6 @@ package org.opensearch.upgrades; -import org.opensearch.OpenSearchStatusException; import org.opensearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.opensearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotStatus; @@ -42,20 +41,17 @@ import org.opensearch.client.Request; import org.opensearch.client.RequestOptions; import org.opensearch.client.Response; -import org.opensearch.client.ResponseException; import org.opensearch.client.RestClient; import org.opensearch.client.RestHighLevelClient; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.DeprecationHandler; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.snapshots.SnapshotsService; import org.opensearch.test.rest.OpenSearchRestTestCase; import java.io.IOException; import java.io.InputStream; import java.net.HttpURLConnection; -import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -231,20 +227,10 @@ public void testUpgradeMovesRepoToNewMetaVersion() throws IOException { ensureSnapshotRestoreWorks(repoName, "snapshot-2", shards); } } else { - if (SnapshotsService.useIndexGenerations(minimumNodeVersion()) == false) { - assertThat(TEST_STEP, is(TestStep.STEP3_OLD_CLUSTER)); - final List> expectedExceptions = - Arrays.asList(ResponseException.class, OpenSearchStatusException.class); - expectThrowsAnyOf(expectedExceptions, () -> listSnapshots(repoName)); - expectThrowsAnyOf(expectedExceptions, () -> deleteSnapshot(client, repoName, "snapshot-1")); - expectThrowsAnyOf(expectedExceptions, () -> deleteSnapshot(client, repoName, "snapshot-2")); - expectThrowsAnyOf(expectedExceptions, () -> createSnapshot(client, repoName, "snapshot-impossible", index)); - } else { - assertThat(listSnapshots(repoName), hasSize(2)); - if (TEST_STEP == TestStep.STEP4_NEW_CLUSTER) { - ensureSnapshotRestoreWorks(repoName, "snapshot-1", shards); - ensureSnapshotRestoreWorks(repoName, "snapshot-2", shards); - } + assertThat(listSnapshots(repoName), hasSize(2)); + if (TEST_STEP == TestStep.STEP4_NEW_CLUSTER) { + ensureSnapshotRestoreWorks(repoName, "snapshot-1", shards); + ensureSnapshotRestoreWorks(repoName, "snapshot-2", shards); } } } finally { diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java index f34e5f7bc121a..ed4bf11041c88 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java @@ -31,7 +31,8 @@ package org.opensearch.upgrades; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.client.Request; @@ -61,7 +62,7 @@ */ public class IndexingIT extends AbstractRollingTestCase { - public void testIndexing() throws IOException { + public void testIndexing() throws IOException, ParseException { switch (CLUSTER_TYPE) { case OLD: break; @@ -181,18 +182,7 @@ public void testAutoIdWithOpTypeCreate() throws IOException { } } - if (minNodeVersion.before(LegacyESVersion.V_7_5_0)) { - ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(bulk)); - assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); - assertThat(e.getMessage(), - // if request goes to 7.5+ node - either(containsString("optype create not supported for indexing requests without explicit id until")) - // if request goes to < 7.5 node - .or(containsString("an id must be provided if version type or value are set") - )); - } else { - client().performRequest(bulk); - } + client().performRequest(bulk); break; case UPGRADED: client().performRequest(bulk); @@ -214,7 +204,7 @@ private void bulk(String index, String valueSuffix, int count) throws IOExceptio client().performRequest(bulk); } - private void assertCount(String index, int count) throws IOException { + private void assertCount(String index, int count) throws IOException, ParseException { Request searchTestIndexRequest = new Request("POST", "/" + index + "/_search"); searchTestIndexRequest.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); searchTestIndexRequest.addParameter("filter_path", "hits.total"); diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/JodaCompatibilityIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/JodaCompatibilityIT.java deleted file mode 100644 index 0ef1e3a5050af..0000000000000 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/JodaCompatibilityIT.java +++ /dev/null @@ -1,282 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.upgrades; - -import org.apache.http.HttpStatus; -import org.apache.http.util.EntityUtils; -import org.opensearch.LegacyESVersion; -import org.opensearch.client.Node; -import org.opensearch.client.Request; -import org.opensearch.client.RequestOptions; -import org.opensearch.client.Response; -import org.opensearch.client.WarningsHandler; -import org.opensearch.common.Booleans; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.search.DocValueFormat; -import org.junit.BeforeClass; - -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.util.Collections; -import java.util.List; -import java.util.function.Consumer; - -import static org.opensearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM; - -/** - * This is test is meant to verify that when upgrading from 6.x version to 7.7 or newer it is able to parse date fields with joda pattern. - * - * The test is indexing documents and searches with use of joda or java pattern. - * In order to make sure that serialization logic is used a search call is executed 3 times (using all nodes). - * It cannot be guaranteed that serialization logic will always be used as it might happen that - * all shards are allocated on the same node and client is connecting to it. - * Because of this warnings assertions have to be ignored. - * - * A special flag used when serializing {@link DocValueFormat.DateTime#writeTo DocValueFormat.DateTime::writeTo} - * is used to indicate that an index was created in 6.x and has a joda pattern. The same flag is read when - * {@link DocValueFormat.DateTime#DateTime(StreamInput)} deserializing. - * When upgrading from 7.0-7.6 to 7.7 there is no way to tell if a pattern was created in 6.x as this flag cannot be added. - * Hence a skip assume section in init() - * - * @see org.opensearch.search.DocValueFormat.DateTime - */ -public class JodaCompatibilityIT extends AbstractRollingTestCase { - - @BeforeClass - public static void init(){ - assumeTrue("upgrading from 7.0-7.6 will fail parsing joda formats", - UPGRADE_FROM_VERSION.before(LegacyESVersion.V_7_0_0)); - } - - public void testJodaBackedDocValueAndDateFields() throws Exception { - switch (CLUSTER_TYPE) { - case OLD: - Request createTestIndex = indexWithDateField("joda_time", "YYYY-MM-dd'T'HH:mm:ssZZ"); - createTestIndex.setOptions(ignoreWarnings()); - - Response resp = client().performRequest(createTestIndex); - assertEquals(HttpStatus.SC_OK, resp.getStatusLine().getStatusCode()); - - postNewDoc("joda_time", 1); - - break; - case MIXED: - int minute = Booleans.parseBoolean(System.getProperty("tests.first_round")) ? 2 : 3; - postNewDoc("joda_time", minute); - - Request search = dateRangeSearch("joda_time"); - search.setOptions(ignoreWarnings()); - - performOnAllNodes(search, r -> assertEquals(HttpStatus.SC_OK, r.getStatusLine().getStatusCode())); - break; - case UPGRADED: - postNewDoc("joda_time", 4); - - search = searchWithAgg("joda_time"); - search.setOptions(ignoreWarnings()); - //making sure all nodes were used for search - performOnAllNodes(search, r -> assertResponseHasAllDocuments(r)); - break; - } - } - - public void testJavaBackedDocValueAndDateFields() throws Exception { - switch (CLUSTER_TYPE) { - case OLD: - Request createTestIndex = indexWithDateField("java_time", "8yyyy-MM-dd'T'HH:mm:ssXXX"); - Response resp = client().performRequest(createTestIndex); - assertEquals(HttpStatus.SC_OK, resp.getStatusLine().getStatusCode()); - - postNewDoc("java_time", 1); - - break; - case MIXED: - int minute = Booleans.parseBoolean(System.getProperty("tests.first_round")) ? 2 : 3; - postNewDoc("java_time", minute); - - Request search = dateRangeSearch("java_time"); - Response searchResp = client().performRequest(search); - assertEquals(HttpStatus.SC_OK, searchResp.getStatusLine().getStatusCode()); - break; - case UPGRADED: - postNewDoc("java_time", 4); - - search = searchWithAgg("java_time"); - //making sure all nodes were used for search - performOnAllNodes(search, r -> assertResponseHasAllDocuments(r)); - - break; - } - } - - private RequestOptions ignoreWarnings() { - RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); - options.setWarningsHandler(WarningsHandler.PERMISSIVE); - return options.build(); - } - - private void performOnAllNodes(Request search, Consumer consumer) throws IOException { - List nodes = client().getNodes(); - for (Node node : nodes) { - client().setNodes(Collections.singletonList(node)); - Response response = client().performRequest(search); - consumer.accept(response); - assertEquals(HttpStatus.SC_OK, response.getStatusLine().getStatusCode()); - } - client().setNodes(nodes); - } - - private void assertResponseHasAllDocuments(Response searchResp) { - assertEquals(HttpStatus.SC_OK, searchResp.getStatusLine().getStatusCode()); - try { - assertEquals(removeWhiteSpace("{" + - " \"_shards\": {" + - " \"total\": 3," + - " \"successful\": 3" + - " },"+ - " \"hits\": {" + - " \"total\": 4," + - " \"hits\": [" + - " {" + - " \"_source\": {" + - " \"datetime\": \"2020-01-01T00:00:01+01:00\"" + - " }" + - " }," + - " {" + - " \"_source\": {" + - " \"datetime\": \"2020-01-01T00:00:02+01:00\"" + - " }" + - " }," + - " {" + - " \"_source\": {" + - " \"datetime\": \"2020-01-01T00:00:03+01:00\"" + - " }" + - " }," + - " {" + - " \"_source\": {" + - " \"datetime\": \"2020-01-01T00:00:04+01:00\"" + - " }" + - " }" + - " ]" + - " }" + - "}"), - EntityUtils.toString(searchResp.getEntity(), StandardCharsets.UTF_8)); - } catch (IOException e) { - throw new AssertionError("Exception during response parising", e); - } - } - - private String removeWhiteSpace(String input) { - return input.replaceAll("[\\n\\r\\t\\ ]", ""); - } - - private Request dateRangeSearch(String endpoint) { - Request search = new Request("GET", endpoint+"/_search"); - search.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); - search.addParameter("filter_path", "hits.total,hits.hits._source.datetime,_shards.total,_shards.successful"); - search.setJsonEntity("" + - "{\n" + - " \"track_total_hits\": true,\n" + - " \"sort\": \"datetime\",\n" + - " \"query\": {\n" + - " \"range\": {\n" + - " \"datetime\": {\n" + - " \"gte\": \"2020-01-01T00:00:00+01:00\",\n" + - " \"lte\": \"2020-01-02T00:00:00+01:00\"\n" + - " }\n" + - " }\n" + - " }\n" + - "}\n" - ); - return search; - } - - private Request searchWithAgg(String endpoint) throws IOException { - Request search = new Request("GET", endpoint+"/_search"); - search.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); - search.addParameter("filter_path", "hits.total,hits.hits._source.datetime,_shards.total,_shards.successful"); - - search.setJsonEntity("{\n" + - " \"track_total_hits\": true,\n" + - " \"sort\": \"datetime\",\n" + - " \"query\": {\n" + - " \"range\": {\n" + - " \"datetime\": {\n" + - " \"gte\": \"2020-01-01T00:00:00+01:00\",\n" + - " \"lte\": \"2020-01-02T00:00:00+01:00\"\n" + - " }\n" + - " }\n" + - " },\n" + - " \"aggs\" : {\n" + - " \"docs_per_year\" : {\n" + - " \"date_histogram\" : {\n" + - " \"field\" : \"date\",\n" + - " \"calendar_interval\" : \"year\"\n" + - " }\n" + - " }\n" + - " }\n" + - "}\n" - ); - return search; - } - private Request indexWithDateField(String indexName, String format) { - Request createTestIndex = new Request("PUT", indexName); - createTestIndex.addParameter("include_type_name", "false"); - createTestIndex.setJsonEntity("{\n" + - " \"settings\": {\n" + - " \"index.number_of_shards\": 3\n" + - " },\n" + - " \"mappings\": {\n" + - " \"properties\": {\n" + - " \"datetime\": {\n" + - " \"type\": \"date\",\n" + - " \"format\": \"" + format + "\"\n" + - " }\n" + - " }\n" + - " }\n" + - "}" - ); - return createTestIndex; - } - - private void postNewDoc(String endpoint, int minute) throws IOException { - Request putDoc = new Request("POST", endpoint+"/_doc"); - putDoc.addParameter("refresh", "true"); - putDoc.addParameter("wait_for_active_shards", "all"); - putDoc.setJsonEntity("{\n" + - " \"datetime\": \"2020-01-01T00:00:0" + minute + "+01:00\"\n" + - "}" - ); - Response resp = client().performRequest(putDoc); - assertEquals(HttpStatus.SC_CREATED, resp.getStatusLine().getStatusCode()); - } -} diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/MappingIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/MappingIT.java deleted file mode 100644 index 07b1d67fde7ff..0000000000000 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/MappingIT.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.upgrades; - -import org.opensearch.LegacyESVersion; -import org.opensearch.client.Request; -import org.opensearch.client.Response; -import org.opensearch.common.xcontent.support.XContentMapValues; - -public class MappingIT extends AbstractRollingTestCase { - /** - * Create a mapping that explicitly disables the _all field (possible in 6x, see #37429) - * and check that it can be upgraded to 7x. - */ - public void testAllFieldDisable6x() throws Exception { - assumeTrue("_all", UPGRADE_FROM_VERSION.before(LegacyESVersion.V_7_0_0)); - switch (CLUSTER_TYPE) { - case OLD: - Request createTestIndex = new Request("PUT", "all-index"); - createTestIndex.addParameter("include_type_name", "false"); - createTestIndex.setJsonEntity( - "{ \"settings\": { \"index.number_of_shards\": 1 }, " + - "\"mappings\": {\"_all\": { \"enabled\": false }, \"properties\": { \"field\": { \"type\": \"text\" }}}}" - ); - createTestIndex.setOptions(expectWarnings("[_all] is deprecated in 6.0+ and will be removed in 7.0. As a replacement," + - " " + "you can use [copy_to] on mapping fields to create your own catch all field.")); - Response resp = client().performRequest(createTestIndex); - assertEquals(200, resp.getStatusLine().getStatusCode()); - break; - - default: - final Request request = new Request("GET", "all-index"); - Response response = client().performRequest(request); - assertEquals(200, response.getStatusLine().getStatusCode()); - Object enabled = XContentMapValues.extractValue("all-index.mappings._all.enabled", entityAsMap(response)); - assertNotNull(enabled); - assertEquals(false, enabled); - break; - } - } -} diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java index cbf91fa9d71e7..8c303eb5d0b55 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java @@ -31,7 +31,6 @@ package org.opensearch.upgrades; -import org.apache.http.util.EntityUtils; import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.support.PlainActionFuture; @@ -47,8 +46,10 @@ import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.index.IndexSettings; +import org.opensearch.index.mapper.MapperService; import org.opensearch.rest.RestStatus; import org.opensearch.test.rest.yaml.ObjectPath; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.hamcrest.Matcher; import org.hamcrest.Matchers; @@ -194,25 +195,6 @@ public void testRecoveryWithConcurrentIndexing() throws Exception { } } - private void assertDocCountOnAllCopies(String index, int expectedCount) throws Exception { - assertBusy(() -> { - Map state = entityAsMap(client().performRequest(new Request("GET", "/_cluster/state"))); - String xpath = "routing_table.indices." + index + ".shards.0.node"; - @SuppressWarnings("unchecked") List assignedNodes = (List) XContentMapValues.extractValue(xpath, state); - assertNotNull(state.toString(), assignedNodes); - for (String assignedNode : assignedNodes) { - try { - assertCount(index, "_only_nodes:" + assignedNode, expectedCount); - } catch (ResponseException e) { - if (e.getMessage().contains("no data nodes with criteria [" + assignedNode + "found for shard: [" + index + "][0]")) { - throw new AssertionError(e); // shard is relocating - ask assert busy to retry - } - throw e; - } - } - }); - } - private void assertCount(final String index, final String preference, final int expectedCount) throws IOException { final int actualDocs; try { @@ -270,6 +252,7 @@ public void testRelocationWithConcurrentIndexing() throws Exception { updateIndexSettings(index, Settings.builder().put(INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none")); break; case MIXED: + // todo: verify this test can be removed in 3.0.0 final String newNode = getNodeId(v -> v.equals(Version.CURRENT)); final String oldNode = getNodeId(v -> v.before(Version.CURRENT)); // remove the replica and guaranteed the primary is placed on the old node @@ -348,11 +331,7 @@ public void testRecovery() throws Exception { if (randomBoolean()) { indexDocs(index, i, 1); // update } else if (randomBoolean()) { - if (getNodeId(v -> v.onOrAfter(LegacyESVersion.V_7_0_0)) == null) { - client().performRequest(new Request("DELETE", index + "/test/" + i)); - } else { - client().performRequest(new Request("DELETE", index + "/_doc/" + i)); - } + client().performRequest(new Request("DELETE", index + "/" + MapperService.SINGLE_MAPPING_NAME + "/" + i)); } } } @@ -458,15 +437,10 @@ public void testRecoveryClosedIndex() throws Exception { closeIndex(indexName); } - final Version indexVersionCreated = indexVersionCreated(indexName); - if (indexVersionCreated.onOrAfter(LegacyESVersion.V_7_2_0)) { - // index was created on a version that supports the replication of closed indices, - // so we expect the index to be closed and replicated - ensureGreen(indexName); - assertClosedIndex(indexName, true); - } else { - assertClosedIndex(indexName, false); - } + // index was created on a version that supports the replication of closed indices, + // so we expect the index to be closed and replicated + ensureGreen(indexName); + assertClosedIndex(indexName, true); } /** @@ -492,14 +466,10 @@ public void testCloseIndexDuringRollingUpgrade() throws Exception { closeIndex(indexName); } - if (minimumNodeVersion.onOrAfter(LegacyESVersion.V_7_2_0)) { - // index is created on a version that supports the replication of closed indices, - // so we expect the index to be closed and replicated - ensureGreen(indexName); - assertClosedIndex(indexName, true); - } else { - assertClosedIndex(indexName, false); - } + // index is created on a version that supports the replication of closed indices, + // so we expect the index to be closed and replicated + ensureGreen(indexName); + assertClosedIndex(indexName, true); } /** @@ -526,38 +496,20 @@ public void testClosedIndexNoopRecovery() throws Exception { closeIndex(indexName); } - final Version indexVersionCreated = indexVersionCreated(indexName); - if (indexVersionCreated.onOrAfter(LegacyESVersion.V_7_2_0)) { - // index was created on a version that supports the replication of closed indices, - // so we expect the index to be closed and replicated - ensureGreen(indexName); - assertClosedIndex(indexName, true); - if (minimumNodeVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - switch (CLUSTER_TYPE) { - case OLD: break; - case MIXED: - assertNoopRecoveries(indexName, s -> s.startsWith(CLUSTER_NAME + "-0")); - break; - case UPGRADED: - assertNoopRecoveries(indexName, s -> s.startsWith(CLUSTER_NAME)); - break; - } - } - } else { - assertClosedIndex(indexName, false); - } - - } - /** - * Returns the version in which the given index has been created - */ - private static Version indexVersionCreated(final String indexName) throws IOException { - final Request request = new Request("GET", "/" + indexName + "/_settings"); - final String versionCreatedSetting = indexName + ".settings.index.version.created"; - request.addParameter("filter_path", versionCreatedSetting); + // index was created on a version that supports the replication of closed indices, + // so we expect the index to be closed and replicated + ensureGreen(indexName); + assertClosedIndex(indexName, true); - final Response response = client().performRequest(request); - return Version.fromId(Integer.parseInt(ObjectPath.createFromResponse(response).evaluate(versionCreatedSetting))); + switch (CLUSTER_TYPE) { + case OLD: break; + case MIXED: + assertNoopRecoveries(indexName, s -> s.startsWith(CLUSTER_NAME + "-0")); + break; + case UPGRADED: + assertNoopRecoveries(indexName, s -> s.startsWith(CLUSTER_NAME)); + break; + } } /** @@ -605,20 +557,6 @@ private void assertClosedIndex(final String index, final boolean checkRoutingTab } } - @SuppressWarnings("unchecked") - private void assertPeerRecoveredFiles(String reason, String index, String targetNode, Matcher sizeMatcher) throws IOException { - Map recoveryStats = entityAsMap(client().performRequest(new Request("GET", index + "/_recovery"))); - List> shards = (List>) XContentMapValues.extractValue(index + "." + "shards", recoveryStats); - for (Map shard : shards) { - if (Objects.equals(XContentMapValues.extractValue("type", shard), "PEER")) { - if (Objects.equals(XContentMapValues.extractValue("target.name", shard), targetNode)) { - Integer recoveredFileSize = (Integer) XContentMapValues.extractValue("index.files.recovered", shard); - assertThat(reason + " target node [" + targetNode + "] stats [" + recoveryStats + "]", recoveredFileSize, sizeMatcher); - } - } - } - } - @SuppressWarnings("unchecked") private void ensureGlobalCheckpointSynced(String index) throws Exception { assertBusy(() -> { @@ -782,12 +720,8 @@ public void testAutoExpandIndicesDuringRollingUpgrade() throws Exception { final int numberOfReplicas = Integer.parseInt( getIndexSettingsAsMap(indexName).get(IndexMetadata.SETTING_NUMBER_OF_REPLICAS).toString()); - if (minimumNodeVersion.onOrAfter(LegacyESVersion.V_7_6_0)) { - assertEquals(nodes.size() - 2, numberOfReplicas); - ensureGreen(indexName); - } else { - assertEquals(nodes.size() - 1, numberOfReplicas); - } + assertEquals(nodes.size() - 2, numberOfReplicas); + ensureGreen(indexName); } public void testSoftDeletesDisabledWarning() throws Exception { diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java index c50af0084b000..8bebb3881e3fd 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java @@ -40,7 +40,6 @@ import java.util.Map; -import static org.opensearch.cluster.metadata.IndexNameExpressionResolver.SYSTEM_INDEX_ENFORCEMENT_VERSION; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -59,13 +58,8 @@ public void testSystemIndicesUpgrades() throws Exception { Request bulk = new Request("POST", "/_bulk"); bulk.addParameter("refresh", "true"); - if (UPGRADE_FROM_VERSION.before(LegacyESVersion.V_7_0_0)) { - bulk.setJsonEntity("{\"index\": {\"_index\": \"test_index_old\", \"_type\" : \"_doc\"}}\n" + - "{\"f1\": \"v1\", \"f2\": \"v2\"}\n"); - } else { - bulk.setJsonEntity("{\"index\": {\"_index\": \"test_index_old\"}\n" + - "{\"f1\": \"v1\", \"f2\": \"v2\"}\n"); - } + bulk.setJsonEntity("{\"index\": {\"_index\": \"test_index_old\"}\n" + + "{\"f1\": \"v1\", \"f2\": \"v2\"}\n"); client().performRequest(bulk); // start a async reindex job @@ -91,10 +85,6 @@ public void testSystemIndicesUpgrades() throws Exception { // make sure .tasks index exists Request getTasksIndex = new Request("GET", "/.tasks"); getTasksIndex.addParameter("allow_no_indices", "false"); - if (UPGRADE_FROM_VERSION.before(LegacyESVersion.V_7_0_0)) { - getTasksIndex.addParameter("include_type_name", "false"); - } - getTasksIndex.setOptions(expectVersionSpecificWarnings(v -> { v.current(systemIndexWarning); v.compatible(systemIndexWarning); @@ -106,20 +96,6 @@ public void testSystemIndicesUpgrades() throws Exception { throw new AssertionError(".tasks index does not exist yet"); } }); - - // If we are on 7.x create an alias that includes both a system index and a non-system index so we can be sure it gets - // upgraded properly. If we're already on 8.x, skip this part of the test. - if (minimumNodeVersion().before(SYSTEM_INDEX_ENFORCEMENT_VERSION)) { - // Create an alias to make sure it gets upgraded properly - Request putAliasRequest = new Request("POST", "/_aliases"); - putAliasRequest.setJsonEntity("{\n" + - " \"actions\": [\n" + - " {\"add\": {\"index\": \".tasks\", \"alias\": \"test-system-alias\"}},\n" + - " {\"add\": {\"index\": \"test_index_reindex\", \"alias\": \"test-system-alias\"}}\n" + - " ]\n" + - "}"); - assertThat(client().performRequest(putAliasRequest).getStatusLine().getStatusCode(), is(200)); - } } else if (CLUSTER_TYPE == ClusterType.UPGRADED) { assertBusy(() -> { Request clusterStateRequest = new Request("GET", "/_cluster/state/metadata"); @@ -134,21 +110,8 @@ public void testSystemIndicesUpgrades() throws Exception { JsonMapView tasksIndex = new JsonMapView((Map) indices.get(".tasks")); assertThat(tasksIndex.get("system"), is(true)); - // If .tasks was created in a 7.x version, it should have an alias on it that we need to make sure got upgraded properly. final String tasksCreatedVersionString = tasksIndex.get("settings.index.version.created"); assertThat(tasksCreatedVersionString, notNullValue()); - final Version tasksCreatedVersion = Version.fromId(Integer.parseInt(tasksCreatedVersionString)); - if (tasksCreatedVersion.before(SYSTEM_INDEX_ENFORCEMENT_VERSION)) { - // Verify that the alias survived the upgrade - Request getAliasRequest = new Request("GET", "/_alias/test-system-alias"); - getAliasRequest.setOptions(expectVersionSpecificWarnings(v -> { - v.current(systemIndexWarning); - v.compatible(systemIndexWarning); - })); - Map aliasResponse = entityAsMap(client().performRequest(getAliasRequest)); - assertThat(aliasResponse, hasKey(".tasks")); - assertThat(aliasResponse, hasKey("test_index_reindex")); - } }); } } diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsDisabledIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsDisabledIT.java index 6178167c98e98..0c845bb2d34e5 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsDisabledIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsDisabledIT.java @@ -34,7 +34,8 @@ import java.io.IOException; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Request; import org.opensearch.client.Response; import org.opensearch.client.ResponseException; @@ -60,7 +61,7 @@ protected Settings nodeSettings(int nodeOrdinal) { .build(); } - public void testThatErrorTraceParamReturns400() throws IOException { + public void testThatErrorTraceParamReturns400() throws IOException, ParseException { Request request = new Request("DELETE", "/"); request.addParameter("error_trace", "true"); ResponseException e = expectThrows(ResponseException.class, () -> diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsEnabledIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsEnabledIT.java index 090a572ef0d6a..e2ccf86d31dbf 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsEnabledIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsEnabledIT.java @@ -32,7 +32,8 @@ package org.opensearch.http; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Request; import org.opensearch.client.Response; import org.opensearch.client.ResponseException; @@ -47,7 +48,7 @@ */ public class DetailedErrorsEnabledIT extends HttpSmokeTestCase { - public void testThatErrorTraceWorksByDefault() throws IOException { + public void testThatErrorTraceWorksByDefault() throws IOException, ParseException { try { Request request = new Request("DELETE", "/"); request.addParameter("error_trace", "true"); diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpCompressionIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpCompressionIT.java index 1925ecc5cd346..5514fae996a39 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpCompressionIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpCompressionIT.java @@ -31,9 +31,10 @@ package org.opensearch.http; -import org.apache.http.HttpHeaders; -import org.apache.http.client.entity.GzipDecompressingEntity; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.entity.GzipDecompressingEntity; +import org.apache.hc.core5.http.HttpHeaders; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Request; import org.opensearch.client.RequestOptions; import org.opensearch.client.Response; @@ -56,7 +57,7 @@ public class HttpCompressionIT extends OpenSearchRestTestCase { " }\n" + "}"; - public void testCompressesResponseIfRequested() throws IOException { + public void testCompressesResponseIfRequested() throws IOException, ParseException { Request request = new Request("POST", "/company/_doc/2"); request.setJsonEntity(SAMPLE_DOCUMENT); Response response = client().performRequest(request); diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/NoHandlerIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/NoHandlerIT.java index c3d766abe96ca..8e6dea7edd0f8 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/NoHandlerIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/NoHandlerIT.java @@ -32,7 +32,8 @@ package org.opensearch.http; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Request; import org.opensearch.client.RequestOptions; import org.opensearch.client.Response; @@ -46,7 +47,7 @@ public class NoHandlerIT extends HttpSmokeTestCase { - public void testNoHandlerRespectsAcceptHeader() throws IOException { + public void testNoHandlerRespectsAcceptHeader() throws IOException, ParseException { runTestNoHandlerRespectsAcceptHeader( "application/json", "application/json; charset=UTF-8", @@ -58,7 +59,7 @@ public void testNoHandlerRespectsAcceptHeader() throws IOException { } private void runTestNoHandlerRespectsAcceptHeader( - final String accept, final String contentType, final String expect) throws IOException { + final String accept, final String contentType, final String expect) throws IOException, ParseException { Request request = new Request("GET", "/foo/bar/baz/qux/quux"); RequestOptions.Builder options = request.getOptions().toBuilder(); options.addHeader("Accept", accept); diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/RestHttpResponseHeadersIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/RestHttpResponseHeadersIT.java index b8257272ba65b..74b85ace37b81 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/RestHttpResponseHeadersIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/RestHttpResponseHeadersIT.java @@ -30,7 +30,7 @@ package org.opensearch.http; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Request; import org.opensearch.client.Response; import org.opensearch.client.ResponseException; diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/SearchRestCancellationIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/SearchRestCancellationIT.java index a13d406f7b133..42c7357de3f07 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/SearchRestCancellationIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/SearchRestCancellationIT.java @@ -31,8 +31,8 @@ package org.opensearch.http; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NByteArrayEntity; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; import org.apache.logging.log4j.LogManager; import org.apache.lucene.util.SetOnce; import org.opensearch.action.admin.cluster.node.info.NodeInfo; @@ -109,7 +109,7 @@ public void testAutomaticCancellationMultiSearchDuringQueryPhase() throws Except new Script(ScriptType.INLINE, "mockscript", ScriptedBlockPlugin.SCRIPT_NAME, Collections.emptyMap())))); Request restRequest = new Request("POST", "/_msearch"); byte[] requestBody = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, contentType.xContent()); - restRequest.setEntity(new NByteArrayEntity(requestBody, createContentType(contentType))); + restRequest.setEntity(new ByteArrayEntity(requestBody, createContentType(contentType))); verifyCancellationDuringQueryPhase(MultiSearchAction.NAME, restRequest); } @@ -158,7 +158,7 @@ public void testAutomaticCancellationMultiSearchDuringFetchPhase() throws Except new Script(ScriptType.INLINE, "mockscript", ScriptedBlockPlugin.SCRIPT_NAME, Collections.emptyMap())))); Request restRequest = new Request("POST", "/_msearch"); byte[] requestBody = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, contentType.xContent()); - restRequest.setEntity(new NByteArrayEntity(requestBody, createContentType(contentType))); + restRequest.setEntity(new ByteArrayEntity(requestBody, createContentType(contentType))); verifyCancellationDuringFetchPhase(MultiSearchAction.NAME, restRequest); } diff --git a/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientProducer.java b/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientProducer.java index f85c3efcbb6e8..2b1abe45f7723 100644 --- a/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientProducer.java +++ b/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientProducer.java @@ -32,20 +32,22 @@ package org.opensearch.wildfly.transport; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.RestClient; import org.opensearch.client.RestHighLevelClient; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.PathUtils; import javax.enterprise.inject.Produces; + +import java.net.URISyntaxException; import java.nio.file.Path; @SuppressWarnings("unused") public final class RestHighLevelClientProducer { @Produces - public RestHighLevelClient createRestHighLevelClient() { + public RestHighLevelClient createRestHighLevelClient() throws URISyntaxException { String httpUri = System.getProperty("opensearch.uri"); return new RestHighLevelClient(RestClient.builder(HttpHost.create(httpUri))); diff --git a/qa/wildfly/src/test/java/org/opensearch/wildfly/WildflyIT.java b/qa/wildfly/src/test/java/org/opensearch/wildfly/WildflyIT.java index 7961ca69c2d29..2f2b355baedaf 100644 --- a/qa/wildfly/src/test/java/org/opensearch/wildfly/WildflyIT.java +++ b/qa/wildfly/src/test/java/org/opensearch/wildfly/WildflyIT.java @@ -32,14 +32,15 @@ package org.opensearch.wildfly; -import org.apache.http.client.methods.CloseableHttpResponse; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClientBuilder; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.client5.http.impl.classic.CloseableHttpClient; +import org.apache.hc.client5.http.impl.classic.CloseableHttpResponse; +import org.apache.hc.client5.http.impl.classic.HttpClientBuilder; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.tests.util.LuceneTestCase; @@ -78,7 +79,7 @@ private String buildBaseUrl() { return "http://localhost:" + port + "/example-app/transport"; } - public void testRestClient() throws URISyntaxException, IOException { + public void testRestClient() throws URISyntaxException, IOException, ParseException { final String baseUrl = buildBaseUrl(); try (CloseableHttpClient client = HttpClientBuilder.create().build()) { @@ -100,7 +101,7 @@ public void testRestClient() throws URISyntaxException, IOException { put.setEntity(new StringEntity(body, ContentType.APPLICATION_JSON)); try (CloseableHttpResponse response = client.execute(put)) { - int status = response.getStatusLine().getStatusCode(); + int status = response.getCode(); assertThat( "expected a 201 response but got: " + status + " - body: " + EntityUtils.toString(response.getEntity()), status, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_decommission_awareness.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_decommission_awareness.json new file mode 100644 index 0000000000000..13ea101169e60 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_decommission_awareness.json @@ -0,0 +1,19 @@ +{ + "cluster.delete_decommission_awareness": { + "documentation": { + "url": "https://opensearch.org/docs/latest/opensearch/rest-api/decommission/", + "description": "Delete any existing decommission." + }, + "stability": "experimental", + "url": { + "paths": [ + { + "path": "/_cluster/decommission/awareness/", + "methods": [ + "DELETE" + ] + } + ] + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_decommission_awareness.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_decommission_awareness.json index 430f96921fbc2..302dea4ec31a7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_decommission_awareness.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_decommission_awareness.json @@ -8,10 +8,16 @@ "url": { "paths": [ { - "path": "/_cluster/decommission/awareness/_status", - "methods": [ + "path":"/_cluster/decommission/awareness/{awareness_attribute_name}/_status", + "methods":[ "GET" - ] + ], + "parts":{ + "awareness_attribute_name":{ + "type":"string", + "description":"Awareness attribute name" + } + } } ] } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/80_geo_point.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/80_geo_point.yml new file mode 100644 index 0000000000000..7d6c2b835f1f7 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/80_geo_point.yml @@ -0,0 +1,151 @@ +setup: + - do: + indices.create: + index: test_1 + body: + settings: + number_of_replicas: 0 + mappings: + properties: + location: + type: geo_point + +--- +"Single point test": + - skip: + version: " - 2.3.99" + reason: "geojson format is supported in 2.4 and above" + - do: + bulk: + refresh: true + body: + - index: + _index: test_1 + _id: 1 + - location: + lon: 52.374081 + lat: 4.912350 + - index: + _index: test_1 + _id: 2 + - location: "4.901618,52.369219" + - index: + _index: test_1 + _id: 3 + - location: [ 52.371667, 4.914722 ] + - index: + _index: test_1 + _id: 4 + - location: "POINT (52.371667 4.914722)" + - index: + _index: test_1 + _id: 5 + - location: "t0v5zsq1gpzf" + - index: + _index: test_1 + _id: 6 + - location: + type: Point + coordinates: [ 52.371667, 4.914722 ] + + - do: + search: + index: test_1 + rest_total_hits_as_int: true + body: + query: + geo_shape: + location: + shape: + type: "envelope" + coordinates: [ [ 51, 5 ], [ 53, 3 ] ] + + - match: { hits.total: 6 } + + - do: + search: + index: test_1 + rest_total_hits_as_int: true + body: + query: + geo_shape: + location: + shape: + type: "envelope" + coordinates: [ [ 151, 15 ], [ 153, 13 ] ] + + - match: { hits.total: 0 } + +--- +"Multi points test": + - skip: + version: " - 2.3.99" + reason: "geojson format is supported in 2.4 and above" + - do: + bulk: + refresh: true + body: + - index: + _index: test_1 + _id: 1 + - location: + - {lon: 52.374081, lat: 4.912350} + - {lon: 152.374081, lat: 14.912350} + - index: + _index: test_1 + _id: 2 + - location: + - "4.901618,52.369219" + - "14.901618,152.369219" + - index: + _index: test_1 + _id: 3 + - location: + - [ 52.371667, 4.914722 ] + - [ 152.371667, 14.914722 ] + - index: + _index: test_1 + _id: 4 + - location: + - "POINT (52.371667 4.914722)" + - "POINT (152.371667 14.914722)" + - index: + _index: test_1 + _id: 5 + - location: + - "t0v5zsq1gpzf" + - "x6skg0zbhnum" + - index: + _index: test_1 + _id: 6 + - location: + - {type: Point, coordinates: [ 52.371667, 4.914722 ]} + - {type: Point, coordinates: [ 152.371667, 14.914722 ]} + + - do: + search: + index: test_1 + rest_total_hits_as_int: true + body: + query: + geo_shape: + location: + shape: + type: "envelope" + coordinates: [ [ 51, 5 ], [ 53, 3 ] ] + + - match: { hits.total: 6 } + + - do: + search: + index: test_1 + rest_total_hits_as_int: true + body: + query: + geo_shape: + location: + shape: + type: "envelope" + coordinates: [ [ 151, 15 ], [ 153, 13 ] ] + + - match: { hits.total: 6 } diff --git a/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryPhaseTests.java b/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryPhaseTests.java index 74cd4754efe44..2768a38cf673d 100644 --- a/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryPhaseTests.java +++ b/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryPhaseTests.java @@ -53,11 +53,9 @@ import org.apache.lucene.index.Term; import org.apache.lucene.queries.spans.SpanNearQuery; import org.apache.lucene.queries.spans.SpanTermQuery; -import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Collector; -import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.FieldComparator; @@ -76,7 +74,6 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.TotalHitCountCollector; import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.Weight; @@ -1178,75 +1175,6 @@ public void search(List leaves, Weight weight, Collector coll }; } - // used to check that numeric long or date sort optimization was run - private static ContextIndexSearcher newOptimizedContextSearcher(IndexReader reader, int queryType, ExecutorService executor) - throws IOException { - return new ContextIndexSearcher( - reader, - IndexSearcher.getDefaultSimilarity(), - IndexSearcher.getDefaultQueryCache(), - IndexSearcher.getDefaultQueryCachingPolicy(), - true, - executor - ) { - - @Override - public void search( - Query query, - CollectorManager manager, - QuerySearchResult result, - DocValueFormat[] formats, - TotalHits totalHits - ) throws IOException { - assertTrue(query instanceof BooleanQuery); - List clauses = ((BooleanQuery) query).clauses(); - assertTrue(clauses.size() == 2); - assertTrue(clauses.get(0).getOccur() == Occur.FILTER); - assertTrue(clauses.get(1).getOccur() == Occur.SHOULD); - if (queryType == 0) { - assertTrue( - clauses.get(1).getQuery().getClass() == LongPoint.newDistanceFeatureQuery("random_field", 1, 1, 1).getClass() - ); - } - if (queryType == 1) assertTrue(clauses.get(1).getQuery() instanceof DocValuesFieldExistsQuery); - super.search(query, manager, result, formats, totalHits); - } - - @Override - public void search( - List leaves, - Weight weight, - @SuppressWarnings("rawtypes") CollectorManager manager, - QuerySearchResult result, - DocValueFormat[] formats, - TotalHits totalHits - ) throws IOException { - final Query query = weight.getQuery(); - assertTrue(query instanceof BooleanQuery); - List clauses = ((BooleanQuery) query).clauses(); - assertTrue(clauses.size() == 2); - assertTrue(clauses.get(0).getOccur() == Occur.FILTER); - assertTrue(clauses.get(1).getOccur() == Occur.SHOULD); - if (queryType == 0) { - assertTrue( - clauses.get(1).getQuery().getClass() == LongPoint.newDistanceFeatureQuery("random_field", 1, 1, 1).getClass() - ); - } - if (queryType == 1) assertTrue(clauses.get(1).getQuery() instanceof DocValuesFieldExistsQuery); - super.search(leaves, weight, manager, result, formats, totalHits); - } - - @Override - public void search(List leaves, Weight weight, Collector collector) throws IOException { - if (getExecutor() == null) { - assert (false); // should not be there, expected to search with CollectorManager - } else { - super.search(leaves, weight, collector); - } - } - }; - } - private static class TestTotalHitCountCollectorManager extends TotalHitCountCollectorManager { private int totalHits; private final TotalHitCountCollector collector; diff --git a/scripts/build.sh b/scripts/build.sh new file mode 100755 index 0000000000000..16906bf39fbc7 --- /dev/null +++ b/scripts/build.sh @@ -0,0 +1,161 @@ +#!/bin/bash + +# Copyright OpenSearch Contributors +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. + +set -ex + +function usage() { + echo "Usage: $0 [args]" + echo "" + echo "Arguments:" + echo -e "-v VERSION\t[Required] OpenSearch version." + echo -e "-q QUALIFIER\t[Optional] Version qualifier." + echo -e "-s SNAPSHOT\t[Optional] Build a snapshot, default is 'false'." + echo -e "-p PLATFORM\t[Optional] Platform, default is 'uname -s'." + echo -e "-a ARCHITECTURE\t[Optional] Build architecture, default is 'uname -m'." + echo -e "-d DISTRIBUTION\t[Optional] Distribution, default is 'tar'." + echo -e "-o OUTPUT\t[Optional] Output path, default is 'artifacts'." + echo -e "-h help" +} + +while getopts ":h:v:q:s:o:p:a:d:" arg; do + case $arg in + h) + usage + exit 1 + ;; + v) + VERSION=$OPTARG + ;; + q) + QUALIFIER=$OPTARG + ;; + s) + SNAPSHOT=$OPTARG + ;; + o) + OUTPUT=$OPTARG + ;; + p) + PLATFORM=$OPTARG + ;; + a) + ARCHITECTURE=$OPTARG + ;; + d) + DISTRIBUTION=$OPTARG + ;; + :) + echo "Error: -${OPTARG} requires an argument" + usage + exit 1 + ;; + ?) + echo "Invalid option: -${arg}" + exit 1 + ;; + esac +done + +if [ -z "$VERSION" ]; then + echo "Error: You must specify the OpenSearch version" + usage + exit 1 +fi + +[ -z "$OUTPUT" ] && OUTPUT=artifacts + +mkdir -p $OUTPUT/maven/org/opensearch + +# Build project and publish to maven local. +./gradlew publishToMavenLocal -Dbuild.snapshot=$SNAPSHOT -Dbuild.version_qualifier=$QUALIFIER + +# Publish to existing test repo, using this to stage release versions of the artifacts that can be released from the same build. +./gradlew publishNebulaPublicationToTestRepository -Dbuild.snapshot=$SNAPSHOT -Dbuild.version_qualifier=$QUALIFIER + +# Copy maven publications to be promoted +cp -r ./build/local-test-repo/org/opensearch "${OUTPUT}"/maven/org + +# Assemble distribution artifact +# see https://github.com/opensearch-project/OpenSearch/blob/main/settings.gradle#L34 for other distribution targets + +[ -z "$PLATFORM" ] && PLATFORM=$(uname -s | awk '{print tolower($0)}') +[ -z "$ARCHITECTURE" ] && ARCHITECTURE=`uname -m` +[ -z "$DISTRIBUTION" ] && DISTRIBUTION="tar" + +case $PLATFORM-$DISTRIBUTION-$ARCHITECTURE in + linux-tar-x64|darwin-tar-x64) + PACKAGE="tar" + EXT="tar.gz" + TYPE="archives" + TARGET="$PLATFORM-$PACKAGE" + SUFFIX="$PLATFORM-x64" + ;; + linux-tar-arm64|darwin-tar-arm64) + PACKAGE="tar" + EXT="tar.gz" + TYPE="archives" + TARGET="$PLATFORM-arm64-$PACKAGE" + SUFFIX="$PLATFORM-arm64" + ;; + linux-rpm-x64) + PACKAGE="rpm" + EXT="rpm" + TYPE="packages" + TARGET="rpm" + SUFFIX="x86_64" + ;; + linux-rpm-arm64) + PACKAGE="rpm" + EXT="rpm" + TYPE="packages" + TARGET="arm64-rpm" + SUFFIX="aarch64" + ;; + windows-zip-x64) + PACKAGE="zip" + EXT="zip" + TYPE="archives" + TARGET="$PLATFORM-$PACKAGE" + SUFFIX="$PLATFORM-x64" + ;; + windows-zip-arm64) + PACKAGE="zip" + EXT="zip" + TYPE="archives" + TARGET="$PLATFORM-arm64-$PACKAGE" + SUFFIX="$PLATFORM-arm64" + ;; + *) + echo "Unsupported platform-distribution-architecture combination: $PLATFORM-$DISTRIBUTION-$ARCHITECTURE" + exit 1 + ;; +esac + +echo "Building OpenSearch for $PLATFORM-$DISTRIBUTION-$ARCHITECTURE" + +./gradlew :distribution:$TYPE:$TARGET:assemble :distribution:$TYPE:no-jdk-$TARGET:assemble -Dbuild.snapshot=$SNAPSHOT -Dbuild.version_qualifier=$QUALIFIER + +# Copy artifact to dist folder in bundle build output +[[ "$SNAPSHOT" == "true" ]] && IDENTIFIER="-SNAPSHOT" +ARTIFACT_BUILD_NAME=`ls distribution/$TYPE/$TARGET/build/distributions/ | grep "opensearch-min.*$SUFFIX.$EXT"` +mkdir -p "${OUTPUT}/dist" +cp distribution/$TYPE/$TARGET/build/distributions/$ARTIFACT_BUILD_NAME "${OUTPUT}"/dist/$ARTIFACT_BUILD_NAME + +echo "Building core plugins..." +mkdir -p "${OUTPUT}/core-plugins" +cd plugins +../gradlew assemble -Dbuild.snapshot="$SNAPSHOT" -Dbuild.version_qualifier=$QUALIFIER +cd .. +for plugin in plugins/*; do + PLUGIN_NAME=$(basename "$plugin") + if [ -d "$plugin" ] && [ "examples" != "$PLUGIN_NAME" ]; then + PLUGIN_ARTIFACT_BUILD_NAME=`ls "$plugin"/build/distributions/ | grep "$PLUGIN_NAME.*$IDENTIFIER.zip"` + cp "$plugin"/build/distributions/"$PLUGIN_ARTIFACT_BUILD_NAME" "${OUTPUT}"/core-plugins/"$PLUGIN_ARTIFACT_BUILD_NAME" + fi +done diff --git a/server/build.gradle b/server/build.gradle index 9d9d12e798eab..d50be48afc023 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -129,6 +129,7 @@ dependencies { // logging api "org.apache.logging.log4j:log4j-api:${versions.log4j}" + api "org.apache.logging.log4j:log4j-jul:${versions.log4j}" api "org.apache.logging.log4j:log4j-core:${versions.log4j}", optional // jna diff --git a/server/licenses/jna-5.12.1.jar.sha1 b/server/licenses/jna-5.12.1.jar.sha1 deleted file mode 100644 index 0d42f248c1afd..0000000000000 --- a/server/licenses/jna-5.12.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b1e93a735caea94f503e95e6fe79bf9cdc1e985d \ No newline at end of file diff --git a/server/licenses/jna-5.5.0.jar.sha1 b/server/licenses/jna-5.5.0.jar.sha1 new file mode 100644 index 0000000000000..5621dfc743dd0 --- /dev/null +++ b/server/licenses/jna-5.5.0.jar.sha1 @@ -0,0 +1 @@ +0e0845217c4907822403912ad6828d8e0b256208 diff --git a/server/licenses/log4j-jul-2.17.1.jar.sha1 b/server/licenses/log4j-jul-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..4afb381a696e9 --- /dev/null +++ b/server/licenses/log4j-jul-2.17.1.jar.sha1 @@ -0,0 +1 @@ +881333b463d47828eda7443b19811763367b1916 \ No newline at end of file diff --git a/client/rest/licenses/httpcore-nio-LICENSE.txt b/server/licenses/log4j-jul-LICENSE.txt similarity index 100% rename from client/rest/licenses/httpcore-nio-LICENSE.txt rename to server/licenses/log4j-jul-LICENSE.txt diff --git a/server/licenses/log4j-jul-NOTICE.txt b/server/licenses/log4j-jul-NOTICE.txt new file mode 100644 index 0000000000000..243a0391fb574 --- /dev/null +++ b/server/licenses/log4j-jul-NOTICE.txt @@ -0,0 +1,20 @@ +Apache Log4j +Copyright 1999-2021 Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +ResolverUtil.java +Copyright 2005-2006 Tim Fennell + +Dumbster SMTP test server +Copyright 2004 Jason Paul Kitchen + +TypeUtil.java +Copyright 2002-2012 Ramnivas Laddad, Juergen Hoeller, Chris Beams + +picocli (http://picocli.info) +Copyright 2017 Remko Popma + +TimeoutBlockingWaitStrategy.java and parts of Util.java +Copyright 2011 LMAX Ltd. diff --git a/server/licenses/lucene-analysis-common-9.4.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.4.0.jar.sha1 deleted file mode 100644 index d4db2877c486b..0000000000000 --- a/server/licenses/lucene-analysis-common-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02fbd4e87241411fcf5d34e92a50bee46ab164dc \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-analysis-common-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..e12c20e2a64b8 --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +84d717ed509f8ce484c57fea720d8de2a6afdaa6 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.4.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.4.0.jar.sha1 deleted file mode 100644 index 1b7b53ef9fe70..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -259863dfd107645de6146b3c87b4ecee66a4d43d \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-backward-codecs-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..e78e165acddb3 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +087bcc11526f8dcc56742dd8188bd05ad0329161 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.4.0.jar.sha1 b/server/licenses/lucene-core-9.4.0.jar.sha1 deleted file mode 100644 index 66f7f24485172..0000000000000 --- a/server/licenses/lucene-core-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cca1116f813c0f0c63acfac4c952baf29d46d76b \ No newline at end of file diff --git a/server/licenses/lucene-core-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-core-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..bd5fc52fb86c3 --- /dev/null +++ b/server/licenses/lucene-core-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +e949897fa24e14d2701a3c41fe27a4f094681b81 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.4.0.jar.sha1 b/server/licenses/lucene-grouping-9.4.0.jar.sha1 deleted file mode 100644 index fe79c0efd34e4..0000000000000 --- a/server/licenses/lucene-grouping-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -51bec1d5acc8ecaf9f50e047d3f86d60c7a958f4 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-grouping-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..17aa27ceac3bf --- /dev/null +++ b/server/licenses/lucene-grouping-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +6cb53ca55f7e313ed19852ae37fca4ad2e4caa0c \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.4.0.jar.sha1 b/server/licenses/lucene-highlighter-9.4.0.jar.sha1 deleted file mode 100644 index 54700f08a3fdb..0000000000000 --- a/server/licenses/lucene-highlighter-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c8cf8c9308d8fb18a927c7ed267a14ace3990a5f \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-highlighter-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..7f248580a6a49 --- /dev/null +++ b/server/licenses/lucene-highlighter-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +c7f650e33ac11e01bb5c2e35e4eb080a9ce245b8 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.4.0.jar.sha1 b/server/licenses/lucene-join-9.4.0.jar.sha1 deleted file mode 100644 index 752006d3a66dd..0000000000000 --- a/server/licenses/lucene-join-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -99b2d3c8e137a6853a2503456897d47d4f18974b \ No newline at end of file diff --git a/server/licenses/lucene-join-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-join-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..88fef91bee929 --- /dev/null +++ b/server/licenses/lucene-join-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +914ea03f71043a9291623628396a97a4c1901f8c \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.4.0.jar.sha1 b/server/licenses/lucene-memory-9.4.0.jar.sha1 deleted file mode 100644 index 27b488699968e..0000000000000 --- a/server/licenses/lucene-memory-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -881cb214e79da14de35cb0e8e6779d2722828a96 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-memory-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..f6422c2e72fda --- /dev/null +++ b/server/licenses/lucene-memory-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +e83ecf8c4f5991f8e4ea319fc9194c933e02f66d \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.4.0.jar.sha1 b/server/licenses/lucene-misc-9.4.0.jar.sha1 deleted file mode 100644 index f9924475b9acd..0000000000000 --- a/server/licenses/lucene-misc-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a126123e482e6bf2e7aea670d221a2a39d3277dc \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-misc-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..262190789814d --- /dev/null +++ b/server/licenses/lucene-misc-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +5adc5753c741847cd84cb11ebfcd613bedc11beb \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.4.0.jar.sha1 b/server/licenses/lucene-queries-9.4.0.jar.sha1 deleted file mode 100644 index 65e441bfdaf90..0000000000000 --- a/server/licenses/lucene-queries-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fe74dbfe9dba9ee9ee2cb80f151fde97fb4efd12 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-queries-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..f8bba3d90a0f1 --- /dev/null +++ b/server/licenses/lucene-queries-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +824272a064aa2fff1f952b5ae383e80aef4e45f8 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.4.0.jar.sha1 b/server/licenses/lucene-queryparser-9.4.0.jar.sha1 deleted file mode 100644 index 2d454942d52e1..0000000000000 --- a/server/licenses/lucene-queryparser-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -13f108a8572fcf0670c7df3ba8dbe1076d0e0dbe \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-queryparser-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..652ccd298c9d9 --- /dev/null +++ b/server/licenses/lucene-queryparser-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +d5bf983dfb6183b390bdc9d3b41b88b6ee6f780e \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.4.0.jar.sha1 b/server/licenses/lucene-sandbox-9.4.0.jar.sha1 deleted file mode 100644 index 4ebcf3f6edc8f..0000000000000 --- a/server/licenses/lucene-sandbox-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e7a676a12ea50dcbf64564f4e4022f939f0a627d \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-sandbox-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..b51328d19065a --- /dev/null +++ b/server/licenses/lucene-sandbox-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +fff58cc6b79887348b45c9d06bff39d055540738 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.4.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.4.0.jar.sha1 deleted file mode 100644 index c0f181ad19eb6..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -84d956d1cb1458c51967af1c4acadd2a1f92634d \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-spatial-extras-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..37a22d637a051 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +c391f1df56d63dff3c6543da15c87105f2106c86 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.4.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.4.0.jar.sha1 deleted file mode 100644 index 3414f36b02bef..0000000000000 --- a/server/licenses/lucene-spatial3d-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -76887ca708f23b13613e45fb9e307c548b22c6da \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-spatial3d-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..b0c9924752852 --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +794109c75534b1c3a19a29bcb66692f0e0708744 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.4.0.jar.sha1 b/server/licenses/lucene-suggest-9.4.0.jar.sha1 deleted file mode 100644 index 563a0b5ad966b..0000000000000 --- a/server/licenses/lucene-suggest-9.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -406c9c539f262449d3b1e57e7bc4302efeecaf6c \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-suggest-9.5.0-snapshot-a4ef70f.jar.sha1 new file mode 100644 index 0000000000000..63f5d8123c2cf --- /dev/null +++ b/server/licenses/lucene-suggest-9.5.0-snapshot-a4ef70f.jar.sha1 @@ -0,0 +1 @@ +dc5fdd92541f4e78256152d3efc11bdb67ffdc91 \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java index a44cf05a4bdc4..11d1af608fbee 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java @@ -89,6 +89,7 @@ public void testNodeCounts() { expectedCounts.put(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE.roleName(), 1); expectedCounts.put(DiscoveryNodeRole.INGEST_ROLE.roleName(), 1); expectedCounts.put(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE.roleName(), 1); + expectedCounts.put(DiscoveryNodeRole.SEARCH_ROLE.roleName(), 0); expectedCounts.put(ClusterStatsNodes.Counts.COORDINATING_ONLY, 0); int numNodes = randomIntBetween(1, 5); @@ -160,6 +161,7 @@ public void testNodeCountsWithDeprecatedMasterRole() { expectedCounts.put(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE.roleName(), 1); expectedCounts.put(DiscoveryNodeRole.INGEST_ROLE.roleName(), 0); expectedCounts.put(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE.roleName(), 0); + expectedCounts.put(DiscoveryNodeRole.SEARCH_ROLE.roleName(), 0); expectedCounts.put(ClusterStatsNodes.Counts.COORDINATING_ONLY, 0); ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java new file mode 100644 index 0000000000000..14ec041b7464b --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java @@ -0,0 +1,252 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.coordination; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.junit.After; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionAction; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingResponse; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.cluster.decommission.DecommissionStatus; +import org.opensearch.cluster.decommission.DecommissioningFailedException; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.routing.WeightedRouting; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Priority; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; + +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; + +import static org.opensearch.test.NodeRoles.onlyRole; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoTimeout; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class AwarenessAttributeDecommissionIT extends OpenSearchIntegTestCase { + private final Logger logger = LogManager.getLogger(AwarenessAttributeDecommissionIT.class); + + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(MockTransportService.TestPlugin.class); + } + + @After + public void cleanup() throws Exception { + assertNoTimeout(client().admin().cluster().prepareHealth().get()); + } + + public void testDecommissionStatusUpdatePublishedToAllNodes() throws ExecutionException, InterruptedException { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + + logger.info("--> start 3 cluster manager nodes on zones 'a' & 'b' & 'c'"); + List clusterManagerNodes = internalCluster().startNodes( + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "a") + .put(onlyRole(commonSettings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)) + .build(), + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "b") + .put(onlyRole(commonSettings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)) + .build(), + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "c") + .put(onlyRole(commonSettings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)) + .build() + ); + + logger.info("--> start 3 data nodes on zones 'a' & 'b' & 'c'"); + List dataNodes = internalCluster().startNodes( + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "a") + .put(onlyRole(commonSettings, DiscoveryNodeRole.DATA_ROLE)) + .build(), + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "b") + .put(onlyRole(commonSettings, DiscoveryNodeRole.DATA_ROLE)) + .build(), + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "c") + .put(onlyRole(commonSettings, DiscoveryNodeRole.DATA_ROLE)) + .build() + ); + + ensureStableCluster(6); + + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 1.0, "c", 0.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + + ClusterPutWeightedRoutingResponse weightedRoutingResponse = client().admin() + .cluster() + .prepareWeightedRouting() + .setWeightedRouting(weightedRouting) + .get(); + assertTrue(weightedRoutingResponse.isAcknowledged()); + + logger.info("--> starting decommissioning nodes in zone {}", 'c'); + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "c"); + // Set the timeout to 0 to do immediate Decommission + DecommissionRequest decommissionRequest = new DecommissionRequest(decommissionAttribute); + decommissionRequest.setNoDelay(true); + DecommissionResponse decommissionResponse = client().execute(DecommissionAction.INSTANCE, decommissionRequest).get(); + assertTrue(decommissionResponse.isAcknowledged()); + + logger.info("--> Received decommissioning nodes in zone {}", 'c'); + // Keep some delay for scheduler to invoke decommission flow + Thread.sleep(500); + + // Will wait for all events to complete + client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); + + logger.info("--> Received LANGUID event"); + + // assert that decommission status is successful + GetDecommissionStateResponse response = client(clusterManagerNodes.get(0)).execute( + GetDecommissionStateAction.INSTANCE, + new GetDecommissionStateRequest(decommissionAttribute.attributeName()) + ).get(); + assertEquals(response.getAttributeValue(), decommissionAttribute.attributeValue()); + assertEquals(DecommissionStatus.SUCCESSFUL, response.getDecommissionStatus()); + + logger.info("--> Decommission status is successful"); + ClusterState clusterState = client(clusterManagerNodes.get(0)).admin().cluster().prepareState().execute().actionGet().getState(); + assertEquals(4, clusterState.nodes().getSize()); + + logger.info("--> Got cluster state with 4 nodes."); + // assert status on nodes that are part of cluster currently + Iterator discoveryNodeIterator = clusterState.nodes().getNodes().valuesIt(); + DiscoveryNode clusterManagerNodeAfterDecommission = null; + while (discoveryNodeIterator.hasNext()) { + // assert no node has decommissioned attribute + DiscoveryNode node = discoveryNodeIterator.next(); + assertNotEquals(node.getAttributes().get("zone"), "c"); + if (node.isClusterManagerNode()) { + clusterManagerNodeAfterDecommission = node; + } + // assert all the nodes has status as SUCCESSFUL + ClusterService localNodeClusterService = internalCluster().getInstance(ClusterService.class, node.getName()); + assertEquals( + localNodeClusterService.state().metadata().decommissionAttributeMetadata().status(), + DecommissionStatus.SUCCESSFUL + ); + } + assertNotNull("Cluster Manager not found after decommission", clusterManagerNodeAfterDecommission); + logger.info("--> Cluster Manager node found after decommission"); + + // assert status on decommissioned node + // Here we will verify that until it got kicked out, it received appropriate status updates + // decommissioned nodes hence will have status as IN_PROGRESS as it will be kicked out later after this + // and won't receive status update to SUCCESSFUL + String randomDecommissionedNode = randomFrom(clusterManagerNodes.get(2), dataNodes.get(2)); + ClusterService decommissionedNodeClusterService = internalCluster().getInstance(ClusterService.class, randomDecommissionedNode); + assertEquals( + decommissionedNodeClusterService.state().metadata().decommissionAttributeMetadata().status(), + DecommissionStatus.IN_PROGRESS + ); + logger.info("--> Verified the decommissioned node Has in progress state."); + + // Will wait for all events to complete + client(clusterManagerNodeAfterDecommission.getName()).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); + logger.info("--> Got LANGUID event"); + // Recommissioning the zone back to gracefully succeed the test once above tests succeeds + DeleteDecommissionStateResponse deleteDecommissionStateResponse = client(clusterManagerNodeAfterDecommission.getName()).execute( + DeleteDecommissionStateAction.INSTANCE, + new DeleteDecommissionStateRequest() + ).get(); + assertTrue(deleteDecommissionStateResponse.isAcknowledged()); + logger.info("--> Deleting decommission done."); + + // will wait for cluster to stabilise with a timeout of 2 min (findPeerInterval for decommissioned nodes) + // as by then all nodes should have joined the cluster + ensureStableCluster(6, TimeValue.timeValueMinutes(2)); + } + + public void testDecommissionFailedWhenAttributeNotWeighedAway() throws Exception { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + // Start 3 cluster manager eligible nodes + internalCluster().startClusterManagerOnlyNodes(3, Settings.builder().put(commonSettings).build()); + // start 3 data nodes + internalCluster().startDataOnlyNodes(3, Settings.builder().put(commonSettings).build()); + ensureStableCluster(6); + ClusterHealthResponse health = client().admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setWaitForGreenStatus() + .setWaitForNodes(Integer.toString(6)) + .execute() + .actionGet(); + assertFalse(health.isTimedOut()); + + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "c"); + DecommissionRequest decommissionRequest = new DecommissionRequest(decommissionAttribute); + decommissionRequest.setNoDelay(true); + assertBusy(() -> { + DecommissioningFailedException ex = expectThrows( + DecommissioningFailedException.class, + () -> client().execute(DecommissionAction.INSTANCE, decommissionRequest).actionGet() + ); + assertTrue( + ex.getMessage() + .contains("no weights are set to the attribute. Please set appropriate weights before triggering decommission action") + ); + }); + + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 1.0, "c", 1.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + + ClusterPutWeightedRoutingResponse weightedRoutingResponse = client().admin() + .cluster() + .prepareWeightedRouting() + .setWeightedRouting(weightedRouting) + .get(); + assertTrue(weightedRoutingResponse.isAcknowledged()); + + assertBusy(() -> { + DecommissioningFailedException ex = expectThrows( + DecommissioningFailedException.class, + () -> client().execute(DecommissionAction.INSTANCE, decommissionRequest).actionGet() + ); + assertTrue(ex.getMessage().contains("weight for decommissioned attribute is expected to be [0.0] but found [1.0]")); + }); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java index 0b4eae81cde86..bba07d878a42c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java @@ -244,6 +244,48 @@ public void testGetWeightedRouting_WeightsAreSet() throws IOException { assertEquals("3.0", weightedRoutingResponse.getLocalNodeWeight()); } + public void testWeightedRoutingMetadataOnOSProcessRestart() throws Exception { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + + internalCluster().startNodes( + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "c").build() + ); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("3").execute().actionGet(); + assertThat(health.isTimedOut(), equalTo(false)); + + ensureGreen(); + + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 2.0, "c", 3.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + // put api call to set weights + ClusterPutWeightedRoutingResponse response = client().admin() + .cluster() + .prepareWeightedRouting() + .setWeightedRouting(weightedRouting) + .get(); + assertEquals(response.isAcknowledged(), true); + + ensureStableCluster(3); + + // routing weights are set in cluster metadata + assertNotNull(internalCluster().clusterService().state().metadata().weightedRoutingMetadata()); + + ensureGreen(); + + // Restart a random data node and check that OS process comes healthy + internalCluster().restartRandomDataNode(); + ensureGreen(); + assertNotNull(internalCluster().clusterService().state().metadata().weightedRoutingMetadata()); + } + public void testDeleteWeightedRouting_WeightsNotSet() { Settings commonSettings = Settings.builder() .put("cluster.routing.allocation.awareness.attributes", "zone") diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java index 10e809e2fb5dc..4664648c03ccc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java @@ -56,7 +56,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.core.internal.io.IOUtils; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexSettings; @@ -217,6 +216,7 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Excepti .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 6) .put(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), "0ms") + .put(IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED.getKey(), false) .build() ); final long minShardSize = createReasonableSizedShards(indexName); @@ -485,12 +485,5 @@ TestFileStore getTestFileStore(Path path) { assertThat(path + " not contained in a unique tracked path", containingPaths, hasSize(1)); return trackedPaths.get(containingPaths.iterator().next()); } - - void clearTrackedPaths() throws IOException { - for (Path path : trackedPaths.keySet()) { - IOUtils.rm(path); - } - trackedPaths.clear(); - } } } diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java index a92849a077376..a88d42c07f8d6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java @@ -43,15 +43,28 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.Priority; +import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeUnit; +import org.opensearch.core.internal.io.IOUtils; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.snapshots.SnapshotInfo; import org.opensearch.snapshots.SnapshotState; +import org.opensearch.snapshots.mockstore.MockRepository; +import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.MockHttpTransport; +import org.opensearch.test.NodeConfigurationSource; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.transport.nio.MockNioTransportPlugin; +import java.io.IOException; +import java.nio.file.Path; +import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.function.Function; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; @@ -63,12 +76,18 @@ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) public class ClusterShardLimitIT extends OpenSearchIntegTestCase { private static final String shardsPerNodeKey = ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(); + private static final String ignoreDotIndexKey = ShardLimitValidator.SETTING_CLUSTER_IGNORE_DOT_INDEXES.getKey(); public void testSettingClusterMaxShards() { int shardsPerNode = between(1, 500_000); setShardsPerNode(shardsPerNode); } + public void testSettingIgnoreDotIndexes() { + boolean ignoreDotIndexes = randomBoolean(); + setIgnoreDotIndex(ignoreDotIndexes); + } + public void testMinimumPerNode() { int negativeShardsPerNode = between(-50_000, 0); try { @@ -100,7 +119,6 @@ public void testIndexCreationOverLimit() { ShardCounts counts = ShardCounts.forDataNodeCount(dataNodes); setShardsPerNode(counts.getShardsPerNode()); - // Create an index that will bring us up to the limit createIndex( "test", @@ -127,6 +145,164 @@ public void testIndexCreationOverLimit() { assertFalse(clusterState.getMetadata().hasIndex("should-fail")); } + /** + * The test checks if the index starting with the dot can be created if the node has + * number of shards equivalent to the cluster.max_shards_per_node and the cluster.ignore_Dot_indexes + * setting is set to true. If the cluster.ignore_Dot_indexes is set to true index creation of + * indexes starting with dot would succeed. + */ + public void testIndexCreationOverLimitForDotIndexesSucceeds() { + int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size(); + + // Setting the cluster.max_shards_per_node setting according to the data node count. + setShardsPerNode(dataNodes); + setIgnoreDotIndex(true); + + /* + Create an index that will bring us up to the limit. It would create index with primary equal to the + dataNodes * dataNodes so that cluster.max_shards_per_node setting is reached. + */ + createIndex( + "test", + Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, dataNodes * dataNodes) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + + // Getting total active shards in the cluster. + int currentActiveShards = client().admin().cluster().prepareHealth().get().getActiveShards(); + + // Getting cluster.max_shards_per_node setting + ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + String maxShardsPerNode = clusterState.getMetadata() + .settings() + .get(ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey()); + + // Checking if the total shards created are equivalent to dataNodes * cluster.max_shards_per_node + assertEquals(dataNodes * Integer.parseInt(maxShardsPerNode), currentActiveShards); + + createIndex( + ".test-index", + Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0).build() + ); + + clusterState = client().admin().cluster().prepareState().get().getState(); + assertTrue(clusterState.getMetadata().hasIndex(".test-index")); + } + + /** + * The test checks if the index starting with the dot should not be created if the node has + * number of shards equivalent to the cluster.max_shards_per_node and the cluster.ignore_Dot_indexes + * setting is set to false. If the cluster.ignore_Dot_indexes is set to false index creation of + * indexes starting with dot would fail as well. + */ + public void testIndexCreationOverLimitForDotIndexesFail() { + int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size(); + int maxAllowedShards = dataNodes * dataNodes; + + // Setting the cluster.max_shards_per_node setting according to the data node count. + setShardsPerNode(dataNodes); + + /* + Create an index that will bring us up to the limit. It would create index with primary equal to the + dataNodes * dataNodes so that cluster.max_shards_per_node setting is reached. + */ + createIndex( + "test", + Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, maxAllowedShards) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + + // Getting total active shards in the cluster. + int currentActiveShards = client().admin().cluster().prepareHealth().get().getActiveShards(); + + // Getting cluster.max_shards_per_node setting + ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + String maxShardsPerNode = clusterState.getMetadata() + .settings() + .get(ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey()); + + // Checking if the total shards created are equivalent to dataNodes * cluster.max_shards_per_node + assertEquals(dataNodes * Integer.parseInt(maxShardsPerNode), currentActiveShards); + + int extraShardCount = 1; + try { + createIndex( + ".test-index", + Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, extraShardCount) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + } catch (IllegalArgumentException e) { + verifyException(maxAllowedShards, currentActiveShards, extraShardCount, e); + } + clusterState = client().admin().cluster().prepareState().get().getState(); + assertFalse(clusterState.getMetadata().hasIndex(".test-index")); + } + + /** + * The test checks if the index starting with the .ds- can be created if the node has + * number of shards equivalent to the cluster.max_shards_per_node and the cluster.ignore_Dot_indexes + * setting is set to true. If the cluster.ignore_Dot_indexes is set to true index creation of + * indexes starting with dot would only succeed and dataStream indexes would still have validation applied. + */ + public void testIndexCreationOverLimitForDataStreamIndexes() { + int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size(); + int maxAllowedShards = dataNodes * dataNodes; + + // Setting the cluster.max_shards_per_node setting according to the data node count. + setShardsPerNode(dataNodes); + setIgnoreDotIndex(true); + + /* + Create an index that will bring us up to the limit. It would create index with primary equal to the + dataNodes * dataNodes so that cluster.max_shards_per_node setting is reached. + */ + createIndex( + "test", + Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, maxAllowedShards) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + + // Getting total active shards in the cluster. + int currentActiveShards = client().admin().cluster().prepareHealth().get().getActiveShards(); + + // Getting cluster.max_shards_per_node setting + ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + String maxShardsPerNode = clusterState.getMetadata() + .settings() + .get(ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey()); + + // Checking if the total shards created are equivalent to dataNodes * cluster.max_shards_per_node + assertEquals(dataNodes * Integer.parseInt(maxShardsPerNode), currentActiveShards); + + int extraShardCount = 1; + try { + createIndex( + ".ds-test-index", + Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, extraShardCount) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + } catch (IllegalArgumentException e) { + verifyException(maxAllowedShards, currentActiveShards, extraShardCount, e); + } + clusterState = client().admin().cluster().prepareState().get().getState(); + assertFalse(clusterState.getMetadata().hasIndex(".ds-test-index")); + } + public void testIndexCreationOverLimitFromTemplate() { int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size(); @@ -414,6 +590,100 @@ public void testOpenIndexOverLimit() { assertFalse(clusterState.getMetadata().hasIndex("snapshot-index")); } + public void testIgnoreDotSettingOnMultipleNodes() throws IOException, InterruptedException { + int maxAllowedShardsPerNode = 10, indexPrimaryShards = 11, indexReplicaShards = 1; + + InternalTestCluster cluster = new InternalTestCluster( + randomLong(), + createTempDir(), + true, + true, + 0, + 0, + "cluster", + new NodeConfigurationSource() { + @Override + public Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(ClusterShardLimitIT.this.nodeSettings(nodeOrdinal)) + .put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()) + .build(); + } + + @Override + public Path nodeConfigPath(int nodeOrdinal) { + return null; + } + }, + 0, + "cluster-", + Arrays.asList( + TestSeedPlugin.class, + MockHttpTransport.TestPlugin.class, + MockTransportService.TestPlugin.class, + MockNioTransportPlugin.class, + InternalSettingsPlugin.class, + MockRepository.Plugin.class + ), + Function.identity() + ); + cluster.beforeTest(random()); + + // Starting 3 ClusterManagerOnlyNode nodes + cluster.startClusterManagerOnlyNode(Settings.builder().put("cluster.ignore_dot_indexes", true).build()); + cluster.startClusterManagerOnlyNode(Settings.builder().put("cluster.ignore_dot_indexes", false).build()); + cluster.startClusterManagerOnlyNode(Settings.builder().put("cluster.ignore_dot_indexes", false).build()); + + // Starting 2 data nodes + cluster.startDataOnlyNode(Settings.builder().put("cluster.ignore_dot_indexes", false).build()); + cluster.startDataOnlyNode(Settings.builder().put("cluster.ignore_dot_indexes", false).build()); + + // Setting max shards per node to be 10 + cluster.client() + .admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(shardsPerNodeKey, maxAllowedShardsPerNode)) + .get(); + + // Creating an index starting with dot having shards greater thn the desired node limit + cluster.client() + .admin() + .indices() + .prepareCreate(".test-index") + .setSettings( + Settings.builder().put(SETTING_NUMBER_OF_SHARDS, indexPrimaryShards).put(SETTING_NUMBER_OF_REPLICAS, indexReplicaShards) + ) + .get(); + + // As active ClusterManagerNode setting takes precedence killing the active one. + // This would be the first one where cluster.ignore_dot_indexes is true because the above calls are blocking. + cluster.stopCurrentClusterManagerNode(); + + // Waiting for all shards to get assigned + cluster.client().admin().cluster().prepareHealth().setWaitForGreenStatus().get(); + + // Creating an index starting with dot having shards greater thn the desired node limit + try { + cluster.client() + .admin() + .indices() + .prepareCreate(".test-index1") + .setSettings( + Settings.builder().put(SETTING_NUMBER_OF_SHARDS, indexPrimaryShards).put(SETTING_NUMBER_OF_REPLICAS, indexReplicaShards) + ) + .get(); + } catch (IllegalArgumentException e) { + ClusterHealthResponse clusterHealth = cluster.client().admin().cluster().prepareHealth().get(); + int currentActiveShards = clusterHealth.getActiveShards(); + int dataNodeCount = clusterHealth.getNumberOfDataNodes(); + int extraShardCount = indexPrimaryShards * (1 + indexReplicaShards); + verifyException(maxAllowedShardsPerNode * dataNodeCount, currentActiveShards, extraShardCount, e); + } + + IOUtils.close(cluster); + } + private int ensureMultipleDataNodes(int dataNodes) { if (dataNodes == 1) { internalCluster().startNode(dataNode()); @@ -457,6 +727,29 @@ private void setShardsPerNode(int shardsPerNode) { } } + private void setIgnoreDotIndex(boolean ignoreDotIndex) { + try { + ClusterUpdateSettingsResponse response; + if (frequently()) { + response = client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(ignoreDotIndexKey, ignoreDotIndex).build()) + .get(); + assertEquals(ignoreDotIndex, response.getPersistentSettings().getAsBoolean(ignoreDotIndexKey, true)); + } else { + response = client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(ignoreDotIndexKey, ignoreDotIndex).build()) + .get(); + assertEquals(ignoreDotIndex, response.getTransientSettings().getAsBoolean(ignoreDotIndexKey, true)); + } + } catch (IllegalArgumentException ex) { + fail(ex.getMessage()); + } + } + private void verifyException(int dataNodes, ShardCounts counts, IllegalArgumentException e) { int totalShards = counts.getFailingIndexShards() * (1 + counts.getFailingIndexReplicas()); int currentShards = counts.getFirstIndexShards() * (1 + counts.getFirstIndexReplicas()); @@ -471,4 +764,15 @@ private void verifyException(int dataNodes, ShardCounts counts, IllegalArgumentE assertEquals(expectedError, e.getMessage()); } + private void verifyException(int maxShards, int currentShards, int extraShards, IllegalArgumentException e) { + String expectedError = "Validation Failed: 1: this action would add [" + + extraShards + + "] total shards, but this cluster currently has [" + + currentShards + + "]/[" + + maxShards + + "] maximum shards open;"; + assertEquals(expectedError, e.getMessage()); + } + } diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java index db46fb4424848..6d05ecd0b56b0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java @@ -32,7 +32,6 @@ package org.opensearch.gateway; -import org.opensearch.LegacyESVersion; import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -512,7 +511,7 @@ public void testPeerRecoveryForClosedIndices() throws Exception { } /** - * If the recovery source is on an old node (before
    {@link LegacyESVersion#V_7_2_0}
    ) then the recovery target + * If the recovery source is on an old node (before
    {@code LegacyESVersion#V_7_2_0}
    ) then the recovery target * won't have the safe commit after phase1 because the recovery source does not send the global checkpoint in the clean_files * step. And if the recovery fails and retries, then the recovery stage might not transition properly. This test simulates * this behavior by changing the global checkpoint in phase1 to unassigned. diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java index 3d8da7eac7690..959b04b7861a3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java @@ -288,7 +288,14 @@ public void testIndexCanChangeCustomDataPath() throws Exception { final Path indexDataPath = sharedDataPath.resolve("start-" + randomAsciiLettersOfLength(10)); logger.info("--> creating index [{}] with data_path [{}]", index, indexDataPath); - createIndex(index, Settings.builder().put(IndexMetadata.SETTING_DATA_PATH, indexDataPath.toAbsolutePath().toString()).build()); + createIndex( + index, + Settings.builder() + .put(IndexMetadata.SETTING_DATA_PATH, indexDataPath.toAbsolutePath().toString()) + .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST) + .put(IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED.getKey(), false) + .build() + ); client().prepareIndex(index).setId("1").setSource("foo", "bar").setRefreshPolicy(IMMEDIATE).get(); ensureGreen(index); @@ -307,6 +314,16 @@ public void testIndexCanChangeCustomDataPath() throws Exception { logger.info("--> closing the index [{}] before updating data_path", index); assertAcked(client().admin().indices().prepareClose(index).setWaitForActiveShards(ActiveShardCount.DEFAULT)); + // race condition: async flush may cause translog file deletion resulting in an inconsistent stream from + // Files.walk below during copy phase + // temporarily disable refresh to avoid any flushes or syncs that may inadvertently cause the deletion + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(index) + .setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "-1").build()) + ); + final Path newIndexDataPath = sharedDataPath.resolve("end-" + randomAlphaOfLength(10)); IOUtils.rm(newIndexDataPath); @@ -326,11 +343,17 @@ public void testIndexCanChangeCustomDataPath() throws Exception { } logger.info("--> updating data_path to [{}] for index [{}]", newIndexDataPath, index); + // update data path and re-enable refresh assertAcked( client().admin() .indices() .prepareUpdateSettings(index) - .setSettings(Settings.builder().put(IndexMetadata.SETTING_DATA_PATH, newIndexDataPath.toAbsolutePath().toString()).build()) + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_DATA_PATH, newIndexDataPath.toAbsolutePath().toString()) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), IndexSettings.DEFAULT_REFRESH_INTERVAL.toString()) + .build() + ) .setIndicesOptions(IndicesOptions.fromOptions(true, false, true, true)) ); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesLifecycleListenerIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesLifecycleListenerIT.java index 72f28e94528ba..17a7d4c84b6fe 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesLifecycleListenerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesLifecycleListenerIT.java @@ -32,9 +32,7 @@ package org.opensearch.indices; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; -import org.opensearch.Version; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; @@ -246,13 +244,8 @@ public void testIndexStateShardChanged() throws Throwable { assertThat(stateChangeListenerNode1.afterCloseSettings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1), equalTo(6)); assertThat(stateChangeListenerNode1.afterCloseSettings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1), equalTo(1)); - if (Version.CURRENT.onOrAfter(LegacyESVersion.V_7_2_0)) { - assertShardStatesMatch(stateChangeListenerNode1, 6, CLOSED, CREATED, RECOVERING, POST_RECOVERY, STARTED); - assertShardStatesMatch(stateChangeListenerNode2, 6, CLOSED, CREATED, RECOVERING, POST_RECOVERY, STARTED); - } else { - assertShardStatesMatch(stateChangeListenerNode1, 6, CLOSED); - assertShardStatesMatch(stateChangeListenerNode2, 6, CLOSED); - } + assertShardStatesMatch(stateChangeListenerNode1, 6, CLOSED, CREATED, RECOVERING, POST_RECOVERY, STARTED); + assertShardStatesMatch(stateChangeListenerNode2, 6, CLOSED, CREATED, RECOVERING, POST_RECOVERY, STARTED); } private static void assertShardStatesMatch( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java index 969534cbc5787..1da789965ea2d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java @@ -74,28 +74,13 @@ public void testSearchWithWRRShardRouting() throws IOException { assertThat(health.isTimedOut(), equalTo(false)); assertAcked( - prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index" + ".number_of_replicas", 2)) + prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 10).put("index.number_of_replicas", 2)) ); - assertAcked( - prepareCreate("test1").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index" + ".number_of_replicas", 2)) - ); - assertAcked( - prepareCreate("test2").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index" + ".number_of_replicas", 2)) - ); - assertAcked( - prepareCreate("test3").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index" + ".number_of_replicas", 2)) - ); - ensureGreen(); logger.info("--> creating indices for test"); - client().prepareIndex("test").setId("" + 0).setSource("field_" + 0, "value_" + 0).get(); - client().prepareIndex("test1").setId("" + 1).setSource("field_" + 1, "value_" + 1).get(); - client().prepareIndex("test2").setId("" + 2).setSource("field_" + 2, "value_" + 2).get(); - client().prepareIndex("test3").setId("" + 3).setSource("field_" + 3, "value_" + 3).get(); - - refresh("test", "test1", "test2", "test3"); - - ClusterState state1 = internalCluster().clusterService().state(); + for (int i = 0; i < 100; i++) { + client().prepareIndex("test_" + i).setId("" + i).setSource("field_" + i, "value_" + i).get(); + } logger.info("--> setting shard routing weights for weighted round robin"); Map weights = Map.of("a", 1.0, "b", 1.0, "c", 0.0); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java index 7d7f80c8ac758..d4273aee925f7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java @@ -161,7 +161,4 @@ public void testMultiValuedFieldWithValueScript() throws Exception { assertThat(bucket.getDocCount(), equalTo(2L)); } - private MultiTermsValuesSourceConfig field(String name) { - return new MultiTermsValuesSourceConfig.Builder().setFieldName(name).build(); - } } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java index 7b95bf93f8bf4..f659d827448a0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java @@ -78,14 +78,6 @@ public void testShardClone() throws Exception { final Path repoPath = randomRepoPath(); createRepository(repoName, "fs", repoPath); - final boolean useBwCFormat = randomBoolean(); - if (useBwCFormat) { - initWithSnapshotVersion(repoName, repoPath, SnapshotsService.OLD_SNAPSHOT_FORMAT); - // Re-create repo to clear repository data cache - assertAcked(clusterAdmin().prepareDeleteRepository(repoName).get()); - createRepository(repoName, "fs", repoPath); - } - final String indexName = "test-index"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); final String sourceSnapshot = "source-snapshot"; @@ -101,21 +93,11 @@ public void testShardClone() throws Exception { final SnapshotId targetSnapshotId = new SnapshotId("target-snapshot", UUIDs.randomBase64UUID(random())); - final String currentShardGen; - if (useBwCFormat) { - currentShardGen = null; - } else { - currentShardGen = repositoryData.shardGenerations().getShardGen(indexId, shardId); - } + final String currentShardGen = repositoryData.shardGenerations().getShardGen(indexId, shardId); final String newShardGeneration = PlainActionFuture.get( f -> repository.cloneShardSnapshot(sourceSnapshotInfo.snapshotId(), targetSnapshotId, repositoryShardId, currentShardGen, f) ); - if (useBwCFormat) { - final long gen = Long.parseLong(newShardGeneration); - assertEquals(gen, 1L); // Initial snapshot brought it to 0, clone increments it to 1 - } - final BlobStoreIndexShardSnapshot targetShardSnapshot = readShardSnapshot(repository, repositoryShardId, targetSnapshotId); final BlobStoreIndexShardSnapshot sourceShardSnapshot = readShardSnapshot( repository, diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java index b6d482cad8860..f483ed7fe6c5d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java @@ -56,7 +56,6 @@ import org.opensearch.plugins.Plugin; import org.opensearch.repositories.RepositoryData; import org.opensearch.repositories.RepositoryException; -import org.opensearch.repositories.ShardGenerations; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.snapshots.mockstore.MockRepository; import org.opensearch.test.OpenSearchIntegTestCase; @@ -1297,43 +1296,6 @@ public void testConcurrentOperationsLimit() throws Exception { } } - public void testConcurrentSnapshotWorksWithOldVersionRepo() throws Exception { - internalCluster().startClusterManagerOnlyNode(); - final String dataNode = internalCluster().startDataOnlyNode(); - final String repoName = "test-repo"; - final Path repoPath = randomRepoPath(); - createRepository( - repoName, - "mock", - Settings.builder().put(BlobStoreRepository.CACHE_REPOSITORY_DATA.getKey(), false).put("location", repoPath) - ); - initWithSnapshotVersion(repoName, repoPath, SnapshotsService.OLD_SNAPSHOT_FORMAT); - - createIndexWithContent("index-slow"); - - final ActionFuture createSlowFuture = startFullSnapshotBlockedOnDataNode( - "slow-snapshot", - repoName, - dataNode - ); - - final String dataNode2 = internalCluster().startDataOnlyNode(); - ensureStableCluster(3); - final String indexFast = "index-fast"; - createIndexWithContent(indexFast, dataNode2, dataNode); - - final ActionFuture createFastSnapshot = startFullSnapshot(repoName, "fast-snapshot"); - - assertThat(createSlowFuture.isDone(), is(false)); - unblockNode(repoName, dataNode); - - assertSuccessful(createFastSnapshot); - assertSuccessful(createSlowFuture); - - final RepositoryData repositoryData = getRepositoryData(repoName); - assertThat(repositoryData.shardGenerations(), is(ShardGenerations.EMPTY)); - } - public void testQueuedDeleteAfterFinalizationFailure() throws Exception { final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); final String repoName = "test-repo"; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java index b806ee3e55a94..a07b245db2b21 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java @@ -32,48 +32,33 @@ package org.opensearch.snapshots; -import org.opensearch.Version; -import org.opensearch.action.ActionRunnable; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; import org.opensearch.action.index.IndexRequestBuilder; -import org.opensearch.action.support.PlainActionFuture; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.RepositoriesMetadata; -import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.internal.io.IOUtils; import org.opensearch.repositories.IndexId; -import org.opensearch.repositories.IndexMetaDataGenerations; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.RepositoryData; import org.opensearch.repositories.RepositoryException; -import org.opensearch.repositories.ShardGenerations; import org.opensearch.repositories.blobstore.BlobStoreRepository; -import org.opensearch.threadpool.ThreadPool; -import java.io.IOException; import java.nio.channels.SeekableByteChannel; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; -import java.util.Collections; import java.util.List; -import java.util.Locale; import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.function.Function; -import java.util.stream.Collectors; import java.util.stream.Stream; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFileExists; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertRequestBuilderThrows; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -310,101 +295,6 @@ public void testFindDanglingLatestGeneration() throws Exception { ); } - public void testHandlingMissingRootLevelSnapshotMetadata() throws Exception { - Path repo = randomRepoPath(); - final String repoName = "test-repo"; - createRepository( - repoName, - "fs", - Settings.builder() - .put("location", repo) - .put("compress", false) - // Don't cache repository data because the test manually modifies the repository data - .put(BlobStoreRepository.CACHE_REPOSITORY_DATA.getKey(), false) - .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - ); - - final String snapshotPrefix = "test-snap-"; - final int snapshots = randomIntBetween(1, 2); - logger.info("--> creating [{}] snapshots", snapshots); - for (int i = 0; i < snapshots; ++i) { - // Workaround to simulate BwC situation: taking a snapshot without indices here so that we don't create any new version shard - // generations (the existence of which would short-circuit checks for the repo containing old version snapshots) - CreateSnapshotResponse createSnapshotResponse = client().admin() - .cluster() - .prepareCreateSnapshot(repoName, snapshotPrefix + i) - .setIndices() - .setWaitForCompletion(true) - .get(); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), is(0)); - assertThat( - createSnapshotResponse.getSnapshotInfo().successfulShards(), - equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) - ); - } - final RepositoryData repositoryData = getRepositoryData(repoName); - - final SnapshotId snapshotToCorrupt = randomFrom(repositoryData.getSnapshotIds()); - logger.info("--> delete root level snapshot metadata blob for snapshot [{}]", snapshotToCorrupt); - Files.delete(repo.resolve(String.format(Locale.ROOT, BlobStoreRepository.SNAPSHOT_NAME_FORMAT, snapshotToCorrupt.getUUID()))); - - logger.info("--> strip version information from index-N blob"); - final RepositoryData withoutVersions = new RepositoryData( - repositoryData.getGenId(), - repositoryData.getSnapshotIds().stream().collect(Collectors.toMap(SnapshotId::getUUID, Function.identity())), - repositoryData.getSnapshotIds().stream().collect(Collectors.toMap(SnapshotId::getUUID, repositoryData::getSnapshotState)), - Collections.emptyMap(), - Collections.emptyMap(), - ShardGenerations.EMPTY, - IndexMetaDataGenerations.EMPTY - ); - - Files.write( - repo.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + withoutVersions.getGenId()), - BytesReference.toBytes( - BytesReference.bytes(withoutVersions.snapshotsToXContent(XContentFactory.jsonBuilder(), Version.CURRENT)) - ), - StandardOpenOption.TRUNCATE_EXISTING - ); - - logger.info("--> verify that repo is assumed in old metadata format"); - final SnapshotsService snapshotsService = internalCluster().getCurrentClusterManagerNodeInstance(SnapshotsService.class); - final ThreadPool threadPool = internalCluster().getCurrentClusterManagerNodeInstance(ThreadPool.class); - assertThat( - PlainActionFuture.get( - f -> threadPool.generic() - .execute( - ActionRunnable.supply( - f, - () -> snapshotsService.minCompatibleVersion(Version.CURRENT, getRepositoryData(repoName), null) - ) - ) - ), - is(SnapshotsService.OLD_SNAPSHOT_FORMAT) - ); - - logger.info("--> verify that snapshot with missing root level metadata can be deleted"); - assertAcked(startDeleteSnapshot(repoName, snapshotToCorrupt.getName()).get()); - - logger.info("--> verify that repository is assumed in new metadata format after removing corrupted snapshot"); - assertThat( - PlainActionFuture.get( - f -> threadPool.generic() - .execute( - ActionRunnable.supply( - f, - () -> snapshotsService.minCompatibleVersion(Version.CURRENT, getRepositoryData(repoName), null) - ) - ) - ), - is(Version.CURRENT) - ); - final RepositoryData finalRepositoryData = getRepositoryData(repoName); - for (SnapshotId snapshotId : finalRepositoryData.getSnapshotIds()) { - assertThat(finalRepositoryData.getVersion(snapshotId), is(Version.CURRENT)); - } - } - public void testMountCorruptedRepositoryData() throws Exception { disableRepoConsistencyCheck("This test intentionally corrupts the repository contents"); Client client = client(); @@ -453,87 +343,6 @@ public void testMountCorruptedRepositoryData() throws Exception { expectThrows(RepositoryException.class, () -> getRepositoryData(otherRepo)); } - public void testHandleSnapshotErrorWithBwCFormat() throws IOException, ExecutionException, InterruptedException { - final String repoName = "test-repo"; - final Path repoPath = randomRepoPath(); - createRepository(repoName, "fs", repoPath); - final String oldVersionSnapshot = initWithSnapshotVersion(repoName, repoPath, SnapshotsService.OLD_SNAPSHOT_FORMAT); - - logger.info("--> recreating repository to clear caches"); - client().admin().cluster().prepareDeleteRepository(repoName).get(); - createRepository(repoName, "fs", repoPath); - - final String indexName = "test-index"; - createIndex(indexName); - - createFullSnapshot(repoName, "snapshot-1"); - - // In the old metadata version the shard level metadata could be moved to the next generation for all sorts of reasons, this should - // not break subsequent repository operations - logger.info("--> move shard level metadata to new generation"); - final IndexId indexId = getRepositoryData(repoName).resolveIndexId(indexName); - final Path shardPath = repoPath.resolve("indices").resolve(indexId.getId()).resolve("0"); - final Path initialShardMetaPath = shardPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + "0"); - assertFileExists(initialShardMetaPath); - Files.move(initialShardMetaPath, shardPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + "1")); - - startDeleteSnapshot(repoName, oldVersionSnapshot).get(); - - createFullSnapshot(repoName, "snapshot-2"); - } - - public void testRepairBrokenShardGenerations() throws Exception { - final String repoName = "test-repo"; - final Path repoPath = randomRepoPath(); - createRepository(repoName, "fs", repoPath); - final String oldVersionSnapshot = initWithSnapshotVersion(repoName, repoPath, SnapshotsService.OLD_SNAPSHOT_FORMAT); - - logger.info("--> recreating repository to clear caches"); - client().admin().cluster().prepareDeleteRepository(repoName).get(); - createRepository(repoName, "fs", repoPath); - - final String indexName = "test-index"; - createIndex(indexName); - - createFullSnapshot(repoName, "snapshot-1"); - - startDeleteSnapshot(repoName, oldVersionSnapshot).get(); - - logger.info("--> move shard level metadata to new generation and make RepositoryData point at an older generation"); - final IndexId indexId = getRepositoryData(repoName).resolveIndexId(indexName); - final Path shardPath = repoPath.resolve("indices").resolve(indexId.getId()).resolve("0"); - final Path initialShardMetaPath = shardPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + "0"); - assertFileExists(initialShardMetaPath); - Files.move(initialShardMetaPath, shardPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + randomIntBetween(1, 1000))); - - final RepositoryData repositoryData1 = getRepositoryData(repoName); - final Map snapshotIds = repositoryData1.getSnapshotIds() - .stream() - .collect(Collectors.toMap(SnapshotId::getUUID, Function.identity())); - final RepositoryData brokenRepoData = new RepositoryData( - repositoryData1.getGenId(), - snapshotIds, - snapshotIds.values().stream().collect(Collectors.toMap(SnapshotId::getUUID, repositoryData1::getSnapshotState)), - snapshotIds.values().stream().collect(Collectors.toMap(SnapshotId::getUUID, repositoryData1::getVersion)), - repositoryData1.getIndices().values().stream().collect(Collectors.toMap(Function.identity(), repositoryData1::getSnapshots)), - ShardGenerations.builder().putAll(repositoryData1.shardGenerations()).put(indexId, 0, "0").build(), - repositoryData1.indexMetaDataGenerations() - ); - Files.write( - repoPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + repositoryData1.getGenId()), - BytesReference.toBytes( - BytesReference.bytes(brokenRepoData.snapshotsToXContent(XContentFactory.jsonBuilder(), Version.CURRENT)) - ), - StandardOpenOption.TRUNCATE_EXISTING - ); - - logger.info("--> recreating repository to clear caches"); - client().admin().cluster().prepareDeleteRepository(repoName).get(); - createRepository(repoName, "fs", repoPath); - - createFullSnapshot(repoName, "snapshot-2"); - } - /** * Tests that a shard snapshot with a corrupted shard index file can still be used for restore and incremental snapshots. */ diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index b4287f201489b..c0fabb8becf6b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -37,7 +37,6 @@ import org.opensearch.Version; import org.opensearch.action.ActionFuture; -import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; @@ -47,7 +46,6 @@ import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.client.node.NodeClient; @@ -1386,44 +1384,6 @@ public void testPartialSnapshotAllShardsMissing() throws Exception { assertThat(createSnapshotResponse.getSnapshotInfo().state(), is(SnapshotState.PARTIAL)); } - /** - * Tests for the legacy snapshot path that is normally executed if the cluster contains any nodes older than - * {@link SnapshotsService#NO_REPO_INITIALIZE_VERSION}. - * Makes sure that blocking as well as non-blocking snapshot create paths execute cleanly as well as that error handling works out - * correctly by testing a snapshot name collision. - */ - public void testCreateSnapshotLegacyPath() throws Exception { - final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); - internalCluster().startDataOnlyNode(); - final String repoName = "test-repo"; - createRepository(repoName, "fs"); - createIndex("some-index"); - - final SnapshotsService snapshotsService = internalCluster().getClusterManagerNodeInstance(SnapshotsService.class); - final Snapshot snapshot1 = PlainActionFuture.get( - f -> snapshotsService.createSnapshotLegacy(new CreateSnapshotRequest(repoName, "snap-1"), f) - ); - awaitNoMoreRunningOperations(clusterManagerNode); - - final InvalidSnapshotNameException sne = expectThrows( - InvalidSnapshotNameException.class, - () -> PlainActionFuture.get( - f -> snapshotsService.executeSnapshotLegacy(new CreateSnapshotRequest(repoName, snapshot1.getSnapshotId().getName()), f) - ) - ); - - assertThat(sne.getMessage(), containsString("snapshot with the same name already exists")); - final SnapshotInfo snapshot2 = PlainActionFuture.get( - f -> snapshotsService.executeSnapshotLegacy(new CreateSnapshotRequest(repoName, "snap-2"), f) - ); - assertThat(snapshot2.state(), is(SnapshotState.SUCCESS)); - - final SnapshotInfo snapshot3 = PlainActionFuture.get( - f -> snapshotsService.executeSnapshotLegacy(new CreateSnapshotRequest(repoName, "snap-3").indices("does-not-exist-*"), f) - ); - assertThat(snapshot3.state(), is(SnapshotState.SUCCESS)); - } - public void testSnapshotDeleteRelocatingPrimaryIndex() throws Exception { internalCluster().startClusterManagerOnlyNode(); final List dataNodes = internalCluster().startDataOnlyNodes(2); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java new file mode 100644 index 0000000000000..0e964fdf14109 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java @@ -0,0 +1,227 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ +package org.opensearch.snapshots; + +import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import org.hamcrest.MatcherAssert; +import org.junit.BeforeClass; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; +import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder; +import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.client.Client; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.GroupShardsIterator; +import org.opensearch.cluster.routing.ShardIterator; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.collect.Map; +import org.opensearch.common.io.PathUtils; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.index.Index; +import org.opensearch.index.IndexNotFoundException; +import org.opensearch.monitor.fs.FsInfo; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.contains; +import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.FS; +import static org.opensearch.common.util.CollectionUtils.iterableAsArrayList; + +public final class SearchableSnapshotIT extends AbstractSnapshotIntegTestCase { + + @BeforeClass + public static void assumeFeatureFlag() { + assumeTrue( + "Searchable snapshot feature flag is enabled", + Boolean.parseBoolean(System.getProperty(FeatureFlags.SEARCHABLE_SNAPSHOT)) + ); + } + + @Override + protected boolean addMockInternalEngine() { + return false; + } + + @Override + protected Settings.Builder randomRepositorySettings() { + final Settings.Builder settings = Settings.builder(); + settings.put("location", randomRepoPath()).put("compress", randomBoolean()); + return settings; + } + + public void testCreateSearchableSnapshot() throws Exception { + final int numReplicasIndex1 = randomIntBetween(1, 4); + final int numReplicasIndex2 = randomIntBetween(0, 2); + internalCluster().ensureAtLeastNumDataNodes(Math.max(numReplicasIndex1, numReplicasIndex2) + 1); + final Client client = client(); + createRepository("test-repo", "fs"); + createIndex( + "test-idx-1", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, Integer.toString(numReplicasIndex1)) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") + .build() + ); + createIndex( + "test-idx-2", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, Integer.toString(numReplicasIndex2)) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") + .build() + ); + ensureGreen(); + indexRandomDocs("test-idx-1", 100); + indexRandomDocs("test-idx-2", 100); + + logger.info("--> snapshot"); + final CreateSnapshotResponse createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .setIndices("test-idx-1", "test-idx-2") + .get(); + MatcherAssert.assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + MatcherAssert.assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + + assertTrue(client.admin().indices().prepareDelete("test-idx-1", "test-idx-2").get().isAcknowledged()); + + internalCluster().ensureAtLeastNumSearchNodes(Math.max(numReplicasIndex1, numReplicasIndex2) + 1); + + logger.info("--> restore indices as 'remote_snapshot'"); + client.admin() + .cluster() + .prepareRestoreSnapshot("test-repo", "test-snap") + .setRenamePattern("(.+)") + .setRenameReplacement("$1-copy") + .setStorageType(RestoreSnapshotRequest.StorageType.REMOTE_SNAPSHOT) + .setWaitForCompletion(true) + .execute() + .actionGet(); + ensureGreen(); + + assertDocCount("test-idx-1-copy", 100L); + assertDocCount("test-idx-2-copy", 100L); + assertIndexDirectoryDoesNotExist("test-idx-1-copy", "test-idx-2-copy"); + } + + public void testSearchableSnapshotIndexIsReadOnly() throws Exception { + final String indexName = "test-index"; + final Client client = client(); + createRepository("test-repo", "fs"); + createIndex( + indexName, + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0").put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1").build() + ); + ensureGreen(); + + logger.info("--> snapshot"); + final CreateSnapshotResponse createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .setIndices(indexName) + .get(); + MatcherAssert.assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + MatcherAssert.assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + + assertTrue(client.admin().indices().prepareDelete(indexName).get().isAcknowledged()); + + logger.info("--> restore indices as 'remote_snapshot'"); + client.admin() + .cluster() + .prepareRestoreSnapshot("test-repo", "test-snap") + .setRenamePattern("(.+)") + .setRenameReplacement("$1") + .setStorageType(RestoreSnapshotRequest.StorageType.REMOTE_SNAPSHOT) + .setWaitForCompletion(true) + .execute() + .actionGet(); + ensureGreen(); + + assertIndexingBlocked(indexName); + assertIndexSettingChangeBlocked(indexName); + assertTrue(client.admin().indices().prepareDelete(indexName).get().isAcknowledged()); + assertThrows( + "Expect index to not exist", + IndexNotFoundException.class, + () -> client.admin().indices().prepareGetIndex().setIndices(indexName).execute().actionGet() + ); + } + + private void assertIndexingBlocked(String index) { + try { + final IndexRequestBuilder builder = client().prepareIndex(index); + builder.setSource("foo", "bar"); + builder.execute().actionGet(); + fail("Expected operation to throw an exception"); + } catch (ClusterBlockException e) { + MatcherAssert.assertThat(e.blocks(), contains(IndexMetadata.REMOTE_READ_ONLY_ALLOW_DELETE)); + } + } + + private void assertIndexSettingChangeBlocked(String index) { + try { + final UpdateSettingsRequestBuilder builder = client().admin().indices().prepareUpdateSettings(index); + builder.setSettings(Map.of("index.refresh_interval", 10)); + builder.execute().actionGet(); + fail("Expected operation to throw an exception"); + } catch (ClusterBlockException e) { + MatcherAssert.assertThat(e.blocks(), contains(IndexMetadata.REMOTE_READ_ONLY_ALLOW_DELETE)); + } + } + + /** + * Picks a shard out of the cluster state for each given index and asserts + * that the 'index' directory does not exist in the node's file system. + * This assertion is digging a bit into the implementation details to + * verify that the Lucene segment files are not copied from the snapshot + * repository to the node's local disk for a remote snapshot index. + */ + private void assertIndexDirectoryDoesNotExist(String... indexNames) { + final ClusterState state = client().admin().cluster().prepareState().get().getState(); + for (String indexName : indexNames) { + final Index index = state.metadata().index(indexName).getIndex(); + // Get the primary shards for the given index + final GroupShardsIterator shardIterators = state.getRoutingTable() + .activePrimaryShardsGrouped(new String[] { indexName }, false); + // Randomly pick one of the shards + final List iterators = iterableAsArrayList(shardIterators); + final ShardIterator shardIterator = RandomPicks.randomFrom(random(), iterators); + final ShardRouting shardRouting = shardIterator.nextOrNull(); + assertNotNull(shardRouting); + assertTrue(shardRouting.primary()); + assertTrue(shardRouting.assignedToNode()); + // Get the file system stats for the assigned node + final String nodeId = shardRouting.currentNodeId(); + final NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats(nodeId).addMetric(FS.metricName()).get(); + for (FsInfo.Path info : nodeStats.getNodes().get(0).getFs()) { + // Build the expected path for the index data for a "normal" + // index and assert it does not exist + final String path = info.getPath(); + final Path file = PathUtils.get(path) + .resolve("indices") + .resolve(index.getUUID()) + .resolve(Integer.toString(shardRouting.getId())) + .resolve("index"); + MatcherAssert.assertThat("Expect file not to exist: " + file, Files.exists(file), is(false)); + } + } + } +} diff --git a/server/src/main/java/org/opensearch/Build.java b/server/src/main/java/org/opensearch/Build.java index 364b17ad4aa33..13c951b10cfe3 100644 --- a/server/src/main/java/org/opensearch/Build.java +++ b/server/src/main/java/org/opensearch/Build.java @@ -207,58 +207,27 @@ public String date() { } public static Build readBuild(StreamInput in) throws IOException { - final String distribution; - final Type type; // the following is new for opensearch: we write the distribution to support any "forks" - if (in.getVersion().onOrAfter(Version.V_1_0_0)) { - distribution = in.readString(); - } else { - distribution = "other"; - } - - // The following block is kept for existing BWS tests to pass. - // TODO - clean up this code when we remove all v6 bwc tests. - // TODO - clean this up when OSS flavor is removed in all of the code base - // (Integ test zip still write OSS as distribution) - // See issue: https://github.com/opendistro-for-elasticsearch/search/issues/159 - if (in.getVersion().before(Version.V_1_3_0)) { - String flavor = in.readString(); - } + final String distribution = in.readString(); // be lenient when reading on the wire, the enumeration values from other versions might be different than what we know - type = Type.fromDisplayName(in.readString(), false); + final Type type = Type.fromDisplayName(in.readString(), false); String hash = in.readString(); String date = in.readString(); boolean snapshot = in.readBoolean(); - - final String version; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - version = in.readString(); - } else { - version = in.getVersion().toString(); - } + final String version = in.readString(); return new Build(type, hash, date, snapshot, version, distribution); } public static void writeBuild(Build build, StreamOutput out) throws IOException { // the following is new for opensearch: we write the distribution name to support any "forks" of the code - if (out.getVersion().onOrAfter(Version.V_1_0_0)) { - out.writeString(build.distribution); - } + out.writeString(build.distribution); - // The following block is kept for existing BWS tests to pass. - // TODO - clean up this code when we remove all v6 bwc tests. - // TODO - clean this up when OSS flavor is removed in all of the code base - if (out.getVersion().before(Version.V_1_3_0)) { - out.writeString("oss"); - } final Type buildType = build.type(); out.writeString(buildType.displayName()); out.writeString(build.hash()); out.writeString(build.date()); out.writeBoolean(build.isSnapshot()); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeString(build.getQualifiedVersion()); - } + out.writeString(build.getQualifiedVersion()); } /** diff --git a/server/src/main/java/org/opensearch/LegacyESVersion.java b/server/src/main/java/org/opensearch/LegacyESVersion.java index d4ac3c7d2f8b1..8cd60084ad710 100644 --- a/server/src/main/java/org/opensearch/LegacyESVersion.java +++ b/server/src/main/java/org/opensearch/LegacyESVersion.java @@ -48,36 +48,6 @@ */ public class LegacyESVersion extends Version { - public static final LegacyESVersion V_7_0_0 = new LegacyESVersion(7000099, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final LegacyESVersion V_7_0_1 = new LegacyESVersion(7000199, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final LegacyESVersion V_7_1_0 = new LegacyESVersion(7010099, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final LegacyESVersion V_7_1_1 = new LegacyESVersion(7010199, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final LegacyESVersion V_7_2_0 = new LegacyESVersion(7020099, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final LegacyESVersion V_7_2_1 = new LegacyESVersion(7020199, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final LegacyESVersion V_7_3_0 = new LegacyESVersion(7030099, org.apache.lucene.util.Version.LUCENE_8_1_0); - public static final LegacyESVersion V_7_3_1 = new LegacyESVersion(7030199, org.apache.lucene.util.Version.LUCENE_8_1_0); - public static final LegacyESVersion V_7_3_2 = new LegacyESVersion(7030299, org.apache.lucene.util.Version.LUCENE_8_1_0); - public static final LegacyESVersion V_7_4_0 = new LegacyESVersion(7040099, org.apache.lucene.util.Version.LUCENE_8_2_0); - public static final LegacyESVersion V_7_4_1 = new LegacyESVersion(7040199, org.apache.lucene.util.Version.LUCENE_8_2_0); - public static final LegacyESVersion V_7_4_2 = new LegacyESVersion(7040299, org.apache.lucene.util.Version.LUCENE_8_2_0); - public static final LegacyESVersion V_7_5_0 = new LegacyESVersion(7050099, org.apache.lucene.util.Version.LUCENE_8_3_0); - public static final LegacyESVersion V_7_5_1 = new LegacyESVersion(7050199, org.apache.lucene.util.Version.LUCENE_8_3_0); - public static final LegacyESVersion V_7_5_2 = new LegacyESVersion(7050299, org.apache.lucene.util.Version.LUCENE_8_3_0); - public static final LegacyESVersion V_7_6_0 = new LegacyESVersion(7060099, org.apache.lucene.util.Version.LUCENE_8_4_0); - public static final LegacyESVersion V_7_6_1 = new LegacyESVersion(7060199, org.apache.lucene.util.Version.LUCENE_8_4_0); - public static final LegacyESVersion V_7_6_2 = new LegacyESVersion(7060299, org.apache.lucene.util.Version.LUCENE_8_4_0); - public static final LegacyESVersion V_7_7_0 = new LegacyESVersion(7070099, org.apache.lucene.util.Version.LUCENE_8_5_1); - public static final LegacyESVersion V_7_7_1 = new LegacyESVersion(7070199, org.apache.lucene.util.Version.LUCENE_8_5_1); - public static final LegacyESVersion V_7_8_0 = new LegacyESVersion(7080099, org.apache.lucene.util.Version.LUCENE_8_5_1); - public static final LegacyESVersion V_7_8_1 = new LegacyESVersion(7080199, org.apache.lucene.util.Version.LUCENE_8_5_1); - public static final LegacyESVersion V_7_9_0 = new LegacyESVersion(7090099, org.apache.lucene.util.Version.LUCENE_8_6_0); - public static final LegacyESVersion V_7_9_1 = new LegacyESVersion(7090199, org.apache.lucene.util.Version.LUCENE_8_6_2); - public static final LegacyESVersion V_7_9_2 = new LegacyESVersion(7090299, org.apache.lucene.util.Version.LUCENE_8_6_2); - public static final LegacyESVersion V_7_9_3 = new LegacyESVersion(7090399, org.apache.lucene.util.Version.LUCENE_8_6_2); - public static final LegacyESVersion V_7_10_0 = new LegacyESVersion(7100099, org.apache.lucene.util.Version.LUCENE_8_7_0); - public static final LegacyESVersion V_7_10_1 = new LegacyESVersion(7100199, org.apache.lucene.util.Version.LUCENE_8_7_0); - public static final LegacyESVersion V_7_10_2 = new LegacyESVersion(7100299, org.apache.lucene.util.Version.LUCENE_8_7_0); - // todo move back to Version.java if retiring legacy version support protected static final ImmutableOpenIntMap idToVersion; protected static final ImmutableOpenMap stringToVersion; diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java index 4b6ca173ec692..d345253db592b 100644 --- a/server/src/main/java/org/opensearch/OpenSearchException.java +++ b/server/src/main/java/org/opensearch/OpenSearchException.java @@ -49,7 +49,6 @@ import org.opensearch.index.Index; import org.opensearch.index.shard.ShardId; import org.opensearch.rest.RestStatus; -import org.opensearch.search.SearchException; import org.opensearch.search.aggregations.MultiBucketConsumerService; import org.opensearch.transport.TcpTransport; @@ -115,7 +114,6 @@ public class OpenSearchException extends RuntimeException implements ToXContentF private static final Map, OpenSearchExceptionHandle> CLASS_TO_OPENSEARCH_EXCEPTION_HANDLE; private static final Pattern OS_METADATA = Pattern.compile("^opensearch\\."); - private static final Pattern ES_METADATA = Pattern.compile("^es\\."); private final Map> metadata = new HashMap<>(); private final Map> headers = new HashMap<>(); @@ -159,16 +157,7 @@ public OpenSearchException(StreamInput in) throws IOException { super(in.readOptionalString(), in.readException()); readStackTrace(this, in); headers.putAll(in.readMapOfLists(StreamInput::readString, StreamInput::readString)); - metadata.putAll(in.readMapOfLists(OpenSearchException::readAndReplace, StreamInput::readString)); - } - - private static String readAndReplace(StreamInput in) throws IOException { - String str = in.readString(); - return in.getVersion().onOrBefore(LegacyESVersion.V_7_10_2) ? ES_METADATA.matcher(str).replaceFirst("opensearch.") : str; - } - - private static void replaceAndWrite(StreamOutput out, String str) throws IOException { - out.writeString(out.getVersion().onOrBefore(LegacyESVersion.V_7_10_2) ? OS_METADATA.matcher(str).replaceFirst("es.") : str); + metadata.putAll(in.readMapOfLists(StreamInput::readString, StreamInput::readString)); } /** @@ -311,16 +300,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeException(this.getCause()); writeStackTraces(this, out, StreamOutput::writeException); out.writeMapOfLists(headers, StreamOutput::writeString, StreamOutput::writeString); - out.writeMapOfLists(metadata, OpenSearchException::replaceAndWrite, StreamOutput::writeString); + out.writeMapOfLists(metadata, StreamOutput::writeString, StreamOutput::writeString); } public static OpenSearchException readException(StreamInput input, int id) throws IOException { CheckedFunction opensearchException = ID_TO_SUPPLIER.get(id); if (opensearchException == null) { - if (id == 127 && input.getVersion().before(LegacyESVersion.V_7_5_0)) { - // was SearchContextException - return new SearchException(input); - } throw new IllegalStateException("unknown exception for id: " + id); } return opensearchException.apply(input); @@ -601,7 +586,7 @@ public static void generateFailureXContent(XContentBuilder builder, Params param } t = t.getCause(); } - builder.field(ERROR, ExceptionsHelper.summaryMessage(t)); + builder.field(ERROR, ExceptionsHelper.summaryMessage(e)); return; } @@ -1533,7 +1518,7 @@ private enum OpenSearchExceptionHandle { org.opensearch.cluster.coordination.CoordinationStateRejectedException.class, org.opensearch.cluster.coordination.CoordinationStateRejectedException::new, 150, - LegacyESVersion.V_7_0_0 + UNKNOWN_VERSION_ADDED ), SNAPSHOT_IN_PROGRESS_EXCEPTION( org.opensearch.snapshots.SnapshotInProgressException.class, @@ -1569,31 +1554,31 @@ private enum OpenSearchExceptionHandle { org.opensearch.index.seqno.RetentionLeaseInvalidRetainingSeqNoException.class, org.opensearch.index.seqno.RetentionLeaseInvalidRetainingSeqNoException::new, 156, - LegacyESVersion.V_7_5_0 + UNKNOWN_VERSION_ADDED ), INGEST_PROCESSOR_EXCEPTION( org.opensearch.ingest.IngestProcessorException.class, org.opensearch.ingest.IngestProcessorException::new, 157, - LegacyESVersion.V_7_5_0 + UNKNOWN_VERSION_ADDED ), PEER_RECOVERY_NOT_FOUND_EXCEPTION( org.opensearch.indices.recovery.PeerRecoveryNotFound.class, org.opensearch.indices.recovery.PeerRecoveryNotFound::new, 158, - LegacyESVersion.V_7_9_0 + UNKNOWN_VERSION_ADDED ), NODE_HEALTH_CHECK_FAILURE_EXCEPTION( org.opensearch.cluster.coordination.NodeHealthCheckFailureException.class, org.opensearch.cluster.coordination.NodeHealthCheckFailureException::new, 159, - LegacyESVersion.V_7_9_0 + UNKNOWN_VERSION_ADDED ), NO_SEED_NODE_LEFT_EXCEPTION( org.opensearch.transport.NoSeedNodeLeftException.class, org.opensearch.transport.NoSeedNodeLeftException::new, 160, - LegacyESVersion.V_7_10_0 + UNKNOWN_VERSION_ADDED ), REPLICATION_FAILED_EXCEPTION( org.opensearch.indices.replication.common.ReplicationFailedException.class, diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index 1bffe9ec98ec5..823130cd072c2 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -99,8 +99,9 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_2_2 = new Version(2020299, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_3_0 = new Version(2030099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_3_1 = new Version(2030199, org.apache.lucene.util.Version.LUCENE_9_3_0); - public static final Version V_2_4_0 = new Version(2040099, org.apache.lucene.util.Version.LUCENE_9_4_0); - public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_4_0); + public static final Version V_2_4_0 = new Version(2040099, org.apache.lucene.util.Version.LUCENE_9_4_1); + public static final Version V_2_5_0 = new Version(2050099, org.apache.lucene.util.Version.LUCENE_9_4_1); + public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_5_0); public static final Version CURRENT = V_3_0_0; public static Version readVersion(StreamInput in) throws IOException { @@ -360,7 +361,7 @@ protected Version computeMinCompatVersion() { // we don't have LegacyESVersion.V_6 constants, so set it to its last minor return LegacyESVersion.fromId(6080099); } else if (major == 2) { - return LegacyESVersion.V_7_10_0; + return LegacyESVersion.fromId(7100099); } else if (major == 6) { // force the minimum compatibility for version 6 to 5.6 since we don't reference version 5 anymore return LegacyESVersion.fromId(5060099); @@ -412,7 +413,7 @@ private Version computeMinIndexCompatVersion() { } else if (major == 7 || major == 1) { return LegacyESVersion.fromId(6000026); } else if (major == 2) { - return LegacyESVersion.V_7_0_0; + return LegacyESVersion.fromId(7000099); } else { bwcMajor = major - 1; } diff --git a/server/src/main/java/org/opensearch/action/ActionFuture.java b/server/src/main/java/org/opensearch/action/ActionFuture.java index 77b748f50bfbf..d796180eda021 100644 --- a/server/src/main/java/org/opensearch/action/ActionFuture.java +++ b/server/src/main/java/org/opensearch/action/ActionFuture.java @@ -40,7 +40,7 @@ /** * An extension to {@link Future} allowing for simplified "get" operations. * - * @opensearch.internal + * @opensearch.api */ public interface ActionFuture extends Future { diff --git a/server/src/main/java/org/opensearch/action/ActionListener.java b/server/src/main/java/org/opensearch/action/ActionListener.java index 8f632449c7d91..645ed4deec006 100644 --- a/server/src/main/java/org/opensearch/action/ActionListener.java +++ b/server/src/main/java/org/opensearch/action/ActionListener.java @@ -46,7 +46,7 @@ /** * A listener for action responses or failures. * - * @opensearch.internal + * @opensearch.api */ public interface ActionListener { /** diff --git a/server/src/main/java/org/opensearch/action/ActionListenerResponseHandler.java b/server/src/main/java/org/opensearch/action/ActionListenerResponseHandler.java index 26e9ba8621c53..af8fde4c9893c 100644 --- a/server/src/main/java/org/opensearch/action/ActionListenerResponseHandler.java +++ b/server/src/main/java/org/opensearch/action/ActionListenerResponseHandler.java @@ -46,7 +46,7 @@ * A simple base class for action response listeners, defaulting to using the SAME executor (as its * very common on response handlers). * - * @opensearch.internal + * @opensearch.api */ public class ActionListenerResponseHandler implements TransportResponseHandler { diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 6b53a7d6a2888..84bc9b395c5dc 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -42,6 +42,8 @@ import org.opensearch.action.admin.cluster.configuration.TransportClearVotingConfigExclusionsAction; import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateAction; import org.opensearch.action.admin.cluster.decommission.awareness.get.TransportGetDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.TransportDeleteDecommissionStateAction; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionAction; import org.opensearch.action.admin.cluster.decommission.awareness.put.TransportDecommissionAction; import org.opensearch.action.admin.cluster.health.ClusterHealthAction; @@ -313,6 +315,7 @@ import org.opensearch.rest.action.admin.cluster.RestClusterStatsAction; import org.opensearch.rest.action.admin.cluster.RestClusterUpdateSettingsAction; import org.opensearch.rest.action.admin.cluster.RestCreateSnapshotAction; +import org.opensearch.rest.action.admin.cluster.RestDeleteDecommissionStateAction; import org.opensearch.rest.action.admin.cluster.RestDeleteRepositoryAction; import org.opensearch.rest.action.admin.cluster.RestDeleteSnapshotAction; import org.opensearch.rest.action.admin.cluster.RestDeleteStoredScriptAction; @@ -703,6 +706,7 @@ public void reg // Decommission actions actions.register(DecommissionAction.INSTANCE, TransportDecommissionAction.class); actions.register(GetDecommissionStateAction.INSTANCE, TransportGetDecommissionStateAction.class); + actions.register(DeleteDecommissionStateAction.INSTANCE, TransportDeleteDecommissionStateAction.class); return unmodifiableMap(actions.getRegistry()); } @@ -885,6 +889,7 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestDeletePitAction()); registerHandler.accept(new RestGetAllPitsAction(nodesInCluster)); registerHandler.accept(new RestPitSegmentsAction(nodesInCluster)); + registerHandler.accept(new RestDeleteDecommissionStateAction()); for (ActionPlugin plugin : actionPlugins) { for (RestHandler handler : plugin.getRestHandlers( diff --git a/server/src/main/java/org/opensearch/action/ActionRequest.java b/server/src/main/java/org/opensearch/action/ActionRequest.java index c6d8eb9f273d6..a6879dd98691a 100644 --- a/server/src/main/java/org/opensearch/action/ActionRequest.java +++ b/server/src/main/java/org/opensearch/action/ActionRequest.java @@ -39,9 +39,9 @@ import java.io.IOException; /** - * Base action request + * Base action request implemented by plugins. * - * @opensearch.internal + * @opensearch.api */ public abstract class ActionRequest extends TransportRequest { diff --git a/server/src/main/java/org/opensearch/action/ActionRequestBuilder.java b/server/src/main/java/org/opensearch/action/ActionRequestBuilder.java index d1fddb076b350..27358a0412468 100644 --- a/server/src/main/java/org/opensearch/action/ActionRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/ActionRequestBuilder.java @@ -40,7 +40,7 @@ /** * Base Action Request Builder * - * @opensearch.internal + * @opensearch.api */ public abstract class ActionRequestBuilder { diff --git a/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java b/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java index d7da932c4dfc2..ffba4d2eb50c0 100644 --- a/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java +++ b/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java @@ -35,8 +35,8 @@ import org.opensearch.common.ValidationException; /** - * Base exception for an action request validation + * Base exception for an action request validation extendable by plugins * - * @opensearch.internal + * @opensearch.api */ public class ActionRequestValidationException extends ValidationException {} diff --git a/server/src/main/java/org/opensearch/action/ActionResponse.java b/server/src/main/java/org/opensearch/action/ActionResponse.java index c72fc87ccfaf8..ab0544365c0b9 100644 --- a/server/src/main/java/org/opensearch/action/ActionResponse.java +++ b/server/src/main/java/org/opensearch/action/ActionResponse.java @@ -38,9 +38,9 @@ import java.io.IOException; /** - * Base class for responses to action requests. + * Base class for responses to action requests implemented by plugins. * - * @opensearch.internal + * @opensearch.api */ public abstract class ActionResponse extends TransportResponse { diff --git a/server/src/main/java/org/opensearch/action/ActionRunnable.java b/server/src/main/java/org/opensearch/action/ActionRunnable.java index c718b33bd404a..2c3f70afda75d 100644 --- a/server/src/main/java/org/opensearch/action/ActionRunnable.java +++ b/server/src/main/java/org/opensearch/action/ActionRunnable.java @@ -41,7 +41,7 @@ * Base class for {@link Runnable}s that need to call {@link ActionListener#onFailure(Exception)} in case an uncaught * exception or error is thrown while the actual action is run. * - * @opensearch.internal + * @opensearch.api */ public abstract class ActionRunnable extends AbstractRunnable { diff --git a/server/src/main/java/org/opensearch/action/ActionType.java b/server/src/main/java/org/opensearch/action/ActionType.java index 9c17061990abe..c22cddd6fad71 100644 --- a/server/src/main/java/org/opensearch/action/ActionType.java +++ b/server/src/main/java/org/opensearch/action/ActionType.java @@ -39,7 +39,7 @@ /** * A generic action. Should strive to make it a singleton. * - * @opensearch.internal + * @opensearch.api */ public class ActionType { diff --git a/server/src/main/java/org/opensearch/action/DocWriteRequest.java b/server/src/main/java/org/opensearch/action/DocWriteRequest.java index a789189f9227e..ed59b5e95a01f 100644 --- a/server/src/main/java/org/opensearch/action/DocWriteRequest.java +++ b/server/src/main/java/org/opensearch/action/DocWriteRequest.java @@ -231,8 +231,7 @@ public static OpType fromString(String sOpType) { * Read a document write (index/delete/update) request * * @param shardId shard id of the request. {@code null} when reading as part of a {@link org.opensearch.action.bulk.BulkRequest} - * that does not have a unique shard id or when reading from a stream of version older than - * {@link org.opensearch.action.bulk.BulkShardRequest#COMPACT_SHARD_ID_VERSION} + * that does not have a unique shard id */ static DocWriteRequest readDocumentRequest(@Nullable ShardId shardId, StreamInput in) throws IOException { byte type = in.readByte(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java index a2a77a1316898..4b52f553a88e3 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java @@ -31,7 +31,6 @@ package org.opensearch.action.admin.cluster.configuration; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.cluster.ClusterState; @@ -110,13 +109,8 @@ public AddVotingConfigExclusionsRequest(String[] nodeDescriptions, String[] node public AddVotingConfigExclusionsRequest(StreamInput in) throws IOException { super(in); nodeDescriptions = in.readStringArray(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - nodeIds = in.readStringArray(); - nodeNames = in.readStringArray(); - } else { - nodeIds = Strings.EMPTY_ARRAY; - nodeNames = Strings.EMPTY_ARRAY; - } + nodeIds = in.readStringArray(); + nodeNames = in.readStringArray(); timeout = in.readTimeValue(); if (nodeDescriptions.length > 0) { @@ -249,10 +243,8 @@ public ActionRequestValidationException validate() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArray(nodeDescriptions); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - out.writeStringArray(nodeIds); - out.writeStringArray(nodeNames); - } + out.writeStringArray(nodeIds); + out.writeStringArray(nodeNames); out.writeTimeValue(timeout); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateAction.java new file mode 100644 index 0000000000000..3aff666d388be --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateAction.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.delete; + +import org.opensearch.action.ActionType; + +/** + * Delete decommission state action. + * + * @opensearch.internal + */ +public class DeleteDecommissionStateAction extends ActionType { + public static final DeleteDecommissionStateAction INSTANCE = new DeleteDecommissionStateAction(); + public static final String NAME = "cluster:admin/decommission/awareness/delete"; + + private DeleteDecommissionStateAction() { + super(NAME, DeleteDecommissionStateResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequest.java new file mode 100644 index 0000000000000..205be54a36c33 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequest.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.delete; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Request for deleting decommission request. + * + * @opensearch.internal + */ +public class DeleteDecommissionStateRequest extends ClusterManagerNodeRequest { + + public DeleteDecommissionStateRequest() {} + + public DeleteDecommissionStateRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequestBuilder.java new file mode 100644 index 0000000000000..08f194c53f18e --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequestBuilder.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.delete; + +import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; +import org.opensearch.client.OpenSearchClient; + +/** + * Builder for Delete decommission request. + * + * @opensearch.internal + */ +public class DeleteDecommissionStateRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< + DeleteDecommissionStateRequest, + DeleteDecommissionStateResponse, + DeleteDecommissionStateRequestBuilder> { + + public DeleteDecommissionStateRequestBuilder(OpenSearchClient client, DeleteDecommissionStateAction action) { + super(client, action, new DeleteDecommissionStateRequest()); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java new file mode 100644 index 0000000000000..2ff634966586a --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.delete; + +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Response returned after deletion of decommission request. + * + * @opensearch.internal + */ +public class DeleteDecommissionStateResponse extends AcknowledgedResponse { + + public DeleteDecommissionStateResponse(StreamInput in) throws IOException { + super(in); + } + + public DeleteDecommissionStateResponse(boolean acknowledged) { + super(acknowledged); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionStateAction.java new file mode 100644 index 0000000000000..7d8f4bdd8304c --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionStateAction.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.delete; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.decommission.DecommissionService; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +/** + * Transport action for delete decommission. + * + * @opensearch.internal + */ +public class TransportDeleteDecommissionStateAction extends TransportClusterManagerNodeAction< + DeleteDecommissionStateRequest, + DeleteDecommissionStateResponse> { + + private static final Logger logger = LogManager.getLogger(TransportDeleteDecommissionStateAction.class); + private final DecommissionService decommissionService; + + @Inject + public TransportDeleteDecommissionStateAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + DecommissionService decommissionService + ) { + super( + DeleteDecommissionStateAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + DeleteDecommissionStateRequest::new, + indexNameExpressionResolver + ); + this.decommissionService = decommissionService; + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected DeleteDecommissionStateResponse read(StreamInput in) throws IOException { + return new DeleteDecommissionStateResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(DeleteDecommissionStateRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected void clusterManagerOperation( + DeleteDecommissionStateRequest request, + ClusterState state, + ActionListener listener + ) { + logger.info("Received delete decommission Request [{}]", request); + this.decommissionService.startRecommissionAction(listener); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/package-info.java new file mode 100644 index 0000000000000..c2cfc03baa45e --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Delete decommission transport handlers. */ +package org.opensearch.action.admin.cluster.decommission.awareness.delete; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java index 90150c71bf3f2..1f301aa2b5273 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java @@ -10,11 +10,14 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import java.io.IOException; +import static org.opensearch.action.ValidateActions.addValidationError; + /** * Get Decommissioned attribute request * @@ -22,19 +25,56 @@ */ public class GetDecommissionStateRequest extends ClusterManagerNodeReadRequest { + private String attributeName; + public GetDecommissionStateRequest() {} + /** + * Constructs a new get decommission state request with given attribute name + * + * @param attributeName name of the attribute + */ + public GetDecommissionStateRequest(String attributeName) { + this.attributeName = attributeName; + } + public GetDecommissionStateRequest(StreamInput in) throws IOException { super(in); + attributeName = in.readString(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); + out.writeString(attributeName); } @Override public ActionRequestValidationException validate() { - return null; + ActionRequestValidationException validationException = null; + if (attributeName == null || Strings.isEmpty(attributeName)) { + validationException = addValidationError("attribute name is missing", validationException); + } + return validationException; + } + + /** + * Sets attribute name + * + * @param attributeName attribute name + * @return this request + */ + public GetDecommissionStateRequest attributeName(String attributeName) { + this.attributeName = attributeName; + return this; + } + + /** + * Returns attribute name + * + * @return attributeName name of attribute + */ + public String attributeName() { + return this.attributeName; } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java index 2b8616d0511cd..e766e9c674ff7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java @@ -27,4 +27,13 @@ public class GetDecommissionStateRequestBuilder extends ClusterManagerNodeReadOp public GetDecommissionStateRequestBuilder(OpenSearchClient client, GetDecommissionStateAction action) { super(client, action, new GetDecommissionStateRequest()); } + + /** + * @param attributeName name of attribute + * @return current object + */ + public GetDecommissionStateRequestBuilder setAttributeName(String attributeName) { + request.attributeName(attributeName); + return this; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java index 2034cdb16e40f..ec0bd7cf7e7eb 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java @@ -10,7 +10,6 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.action.ActionResponse; -import org.opensearch.cluster.decommission.DecommissionAttribute; import org.opensearch.cluster.decommission.DecommissionStatus; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -31,49 +30,40 @@ */ public class GetDecommissionStateResponse extends ActionResponse implements ToXContentObject { - private DecommissionAttribute decommissionedAttribute; + private String attributeValue; private DecommissionStatus status; GetDecommissionStateResponse() { this(null, null); } - GetDecommissionStateResponse(DecommissionAttribute decommissionedAttribute, DecommissionStatus status) { - this.decommissionedAttribute = decommissionedAttribute; + GetDecommissionStateResponse(String attributeValue, DecommissionStatus status) { + this.attributeValue = attributeValue; this.status = status; } GetDecommissionStateResponse(StreamInput in) throws IOException { // read decommissioned attribute and status only if it is present if (in.readBoolean()) { - this.decommissionedAttribute = new DecommissionAttribute(in); - } - if (in.readBoolean()) { + this.attributeValue = in.readString(); this.status = DecommissionStatus.fromString(in.readString()); } } @Override public void writeTo(StreamOutput out) throws IOException { - // if decommissioned attribute is null, mark absence of decommissioned attribute - if (decommissionedAttribute == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - decommissionedAttribute.writeTo(out); - } - - // if status is null, mark absence of status - if (status == null) { + // if decommissioned attribute value is null or status is null then mark its absence + if (attributeValue == null || status == null) { out.writeBoolean(false); } else { out.writeBoolean(true); + out.writeString(attributeValue); out.writeString(status.status()); } } - public DecommissionAttribute getDecommissionedAttribute() { - return decommissionedAttribute; + public String getAttributeValue() { + return attributeValue; } public DecommissionStatus getDecommissionStatus() { @@ -83,13 +73,8 @@ public DecommissionStatus getDecommissionStatus() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.startObject("awareness"); - if (decommissionedAttribute != null) { - builder.field(decommissionedAttribute.attributeName(), decommissionedAttribute.attributeValue()); - } - builder.endObject(); - if (status != null) { - builder.field("status", status); + if (attributeValue != null && status != null) { + builder.field(attributeValue, status); } builder.endObject(); return builder; @@ -97,58 +82,25 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public static GetDecommissionStateResponse fromXContent(XContentParser parser) throws IOException { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); - String attributeType = "awareness"; XContentParser.Token token; - DecommissionAttribute decommissionAttribute = null; + String attributeValue = null; DecommissionStatus status = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { - String currentFieldName = parser.currentName(); - if (attributeType.equals(currentFieldName)) { - if (parser.nextToken() != XContentParser.Token.START_OBJECT) { - throw new OpenSearchParseException( - "failed to parse decommission attribute type [{}], expected object", - attributeType - ); - } - token = parser.nextToken(); - if (token != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - String fieldName = parser.currentName(); - String value; - token = parser.nextToken(); - if (token == XContentParser.Token.VALUE_STRING) { - value = parser.text(); - } else { - throw new OpenSearchParseException( - "failed to parse attribute [{}], expected string for attribute value", - fieldName - ); - } - decommissionAttribute = new DecommissionAttribute(fieldName, value); - parser.nextToken(); - } else { - throw new OpenSearchParseException("failed to parse attribute type [{}], unexpected type", attributeType); - } - } else { - throw new OpenSearchParseException("failed to parse attribute type [{}]", attributeType); - } - } else if ("status".equals(currentFieldName)) { - if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { - throw new OpenSearchParseException( - "failed to parse status of decommissioning, expected string but found unknown type" - ); - } - status = DecommissionStatus.fromString(parser.text().toLowerCase(Locale.ROOT)); - } else { - throw new OpenSearchParseException( - "unknown field found [{}], failed to parse the decommission attribute", - currentFieldName - ); + attributeValue = parser.currentName(); + if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { + throw new OpenSearchParseException("failed to parse status of decommissioning, expected string but found unknown type"); } + status = DecommissionStatus.fromString(parser.text().toLowerCase(Locale.ROOT)); + } else { + throw new OpenSearchParseException( + "failed to parse decommission state, expected [{}] but found [{}]", + XContentParser.Token.FIELD_NAME, + token + ); } } - return new GetDecommissionStateResponse(decommissionAttribute, status); + return new GetDecommissionStateResponse(attributeValue, status); } @Override @@ -156,11 +108,14 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; GetDecommissionStateResponse that = (GetDecommissionStateResponse) o; - return decommissionedAttribute.equals(that.decommissionedAttribute) && status == that.status; + if (!Objects.equals(attributeValue, that.attributeValue)) { + return false; + } + return Objects.equals(status, that.status); } @Override public int hashCode() { - return Objects.hash(decommissionedAttribute, status); + return Objects.hash(attributeValue, status); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/TransportGetDecommissionStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/TransportGetDecommissionStateAction.java index 48ed13c6c0aaf..d811ab8cf6948 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/TransportGetDecommissionStateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/TransportGetDecommissionStateAction.java @@ -69,10 +69,11 @@ protected void clusterManagerOperation( ActionListener listener ) throws Exception { DecommissionAttributeMetadata decommissionAttributeMetadata = state.metadata().decommissionAttributeMetadata(); - if (decommissionAttributeMetadata != null) { + if (decommissionAttributeMetadata != null + && request.attributeName().equals(decommissionAttributeMetadata.decommissionAttribute().attributeName())) { listener.onResponse( new GetDecommissionStateResponse( - decommissionAttributeMetadata.decommissionAttribute(), + decommissionAttributeMetadata.decommissionAttribute().attributeValue(), decommissionAttributeMetadata.status() ) ); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java index 79a6688dc6049..ae96c8ddb2fde 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java @@ -14,6 +14,7 @@ import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.unit.TimeValue; import java.io.IOException; @@ -28,8 +29,15 @@ */ public class DecommissionRequest extends ClusterManagerNodeRequest { + public static final TimeValue DEFAULT_NODE_DRAINING_TIMEOUT = TimeValue.timeValueSeconds(120); + private DecommissionAttribute decommissionAttribute; + private TimeValue delayTimeout = DEFAULT_NODE_DRAINING_TIMEOUT; + + // holder for no_delay param. To avoid draining time timeout. + private boolean noDelay = false; + public DecommissionRequest() {} public DecommissionRequest(DecommissionAttribute decommissionAttribute) { @@ -39,12 +47,16 @@ public DecommissionRequest(DecommissionAttribute decommissionAttribute) { public DecommissionRequest(StreamInput in) throws IOException { super(in); decommissionAttribute = new DecommissionAttribute(in); + this.delayTimeout = in.readTimeValue(); + this.noDelay = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); decommissionAttribute.writeTo(out); + out.writeTimeValue(delayTimeout); + out.writeBoolean(noDelay); } /** @@ -65,6 +77,25 @@ public DecommissionAttribute getDecommissionAttribute() { return this.decommissionAttribute; } + public void setDelayTimeout(TimeValue delayTimeout) { + this.delayTimeout = delayTimeout; + } + + public TimeValue getDelayTimeout() { + return this.delayTimeout; + } + + public void setNoDelay(boolean noDelay) { + if (noDelay) { + this.delayTimeout = TimeValue.ZERO; + } + this.noDelay = noDelay; + } + + public boolean isNoDelay() { + return noDelay; + } + @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; @@ -74,6 +105,14 @@ public ActionRequestValidationException validate() { if (decommissionAttribute.attributeValue() == null || Strings.isEmpty(decommissionAttribute.attributeValue())) { validationException = addValidationError("attribute value is missing", validationException); } + // This validation should not fail since we are not allowing delay timeout to be set externally. + // Still keeping it for double check. + if (noDelay && delayTimeout.getSeconds() > 0) { + final String validationMessage = "Invalid decommission request. no_delay is true and delay_timeout is set to " + + delayTimeout.getSeconds() + + "] Seconds"; + validationException = addValidationError(validationMessage, validationException); + } return validationException; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestBuilder.java index 47af3b952c895..1c7a03fa10e76 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestBuilder.java @@ -12,6 +12,7 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.common.unit.TimeValue; /** * Register decommission request builder @@ -35,4 +36,14 @@ public DecommissionRequestBuilder setDecommissionedAttribute(DecommissionAttribu request.setDecommissionAttribute(decommissionAttribute); return this; } + + public DecommissionRequestBuilder setDelayTimeOut(TimeValue delayTimeOut) { + request.setDelayTimeout(delayTimeOut); + return this; + } + + public DecommissionRequestBuilder setNoDelay(boolean noDelay) { + request.setNoDelay(noDelay); + return this; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/TransportDecommissionAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/TransportDecommissionAction.java index 3a067d2f110b9..6f4e3cf82d2ce 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/TransportDecommissionAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/TransportDecommissionAction.java @@ -76,6 +76,6 @@ protected ClusterBlockException checkBlock(DecommissionRequest request, ClusterS protected void clusterManagerOperation(DecommissionRequest request, ClusterState state, ActionListener listener) throws Exception { logger.info("starting awareness attribute [{}] decommissioning", request.getDecommissionAttribute().toString()); - decommissionService.startDecommissionAction(request.getDecommissionAttribute(), listener); + decommissionService.startDecommissionAction(request, listener); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java index 1dedf481dec56..84a7616fe6b06 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.health; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.ActiveShardCount; @@ -90,11 +89,7 @@ public ClusterHealthRequest(StreamInput in) throws IOException { waitForEvents = Priority.readFrom(in); } waitForNoInitializingShards = in.readBoolean(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - indicesOptions = IndicesOptions.readIndicesOptions(in); - } else { - indicesOptions = IndicesOptions.lenientExpandOpen(); - } + indicesOptions = IndicesOptions.readIndicesOptions(in); } @Override @@ -122,9 +117,7 @@ public void writeTo(StreamOutput out) throws IOException { Priority.writeTo(waitForEvents, out); } out.writeBoolean(waitForNoInitializingShards); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - indicesOptions.writeIndicesOptions(out); - } + indicesOptions.writeIndicesOptions(out); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java index e8429580ec887..4c71993251f4f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java @@ -35,7 +35,6 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; @@ -43,6 +42,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.monitor.jvm.HotThreads; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -117,7 +117,7 @@ protected NodeHotThreads nodeOperation(NodeRequest request) { * * @opensearch.internal */ - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { NodesHotThreadsRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodeInfo.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodeInfo.java index 192815af1908f..fc54ecf795a1d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodeInfo.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodeInfo.java @@ -33,7 +33,6 @@ package org.opensearch.action.admin.cluster.node.info; import org.opensearch.Build; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.cluster.node.DiscoveryNode; @@ -99,9 +98,7 @@ public NodeInfo(StreamInput in) throws IOException { addInfoIfNonNull(HttpInfo.class, in.readOptionalWriteable(HttpInfo::new)); addInfoIfNonNull(PluginsAndModules.class, in.readOptionalWriteable(PluginsAndModules::new)); addInfoIfNonNull(IngestInfo.class, in.readOptionalWriteable(IngestInfo::new)); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { - addInfoIfNonNull(AggregationInfo.class, in.readOptionalWriteable(AggregationInfo::new)); - } + addInfoIfNonNull(AggregationInfo.class, in.readOptionalWriteable(AggregationInfo::new)); } public NodeInfo( @@ -198,11 +195,7 @@ private void addInfoIfNonNull(Class clazz, @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.V_1_0_0)) { - out.writeVInt(LegacyESVersion.V_7_10_2.id); - } else { - out.writeVInt(version.id); - } + out.writeVInt(version.id); Build.writeBuild(build, out); if (totalIndexingBuffer == null) { out.writeBoolean(false); @@ -224,8 +217,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(getInfo(HttpInfo.class)); out.writeOptionalWriteable(getInfo(PluginsAndModules.class)); out.writeOptionalWriteable(getInfo(IngestInfo.class)); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { - out.writeOptionalWriteable(getInfo(AggregationInfo.class)); - } + out.writeOptionalWriteable(getInfo(AggregationInfo.class)); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java index d51be9bc27ac9..77ffd98513698 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.node.info; -import org.opensearch.LegacyESVersion; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -63,22 +62,7 @@ public class NodesInfoRequest extends BaseNodesRequest { public NodesInfoRequest(StreamInput in) throws IOException { super(in); requestedMetrics.clear(); - if (in.getVersion().before(LegacyESVersion.V_7_7_0)) { - // prior to version 8.x, a NodesInfoRequest was serialized as a list - // of booleans in a fixed order - optionallyAddMetric(in.readBoolean(), Metric.SETTINGS.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.OS.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.PROCESS.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.JVM.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.THREAD_POOL.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.TRANSPORT.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.HTTP.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.PLUGINS.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.INGEST.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.INDICES.metricName()); - } else { - requestedMetrics.addAll(Arrays.asList(in.readStringArray())); - } + requestedMetrics.addAll(Arrays.asList(in.readStringArray())); } /** @@ -165,22 +149,7 @@ private void optionallyAddMetric(boolean addMetric, String metricName) { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(LegacyESVersion.V_7_7_0)) { - // prior to version 8.x, a NodesInfoRequest was serialized as a list - // of booleans in a fixed order - out.writeBoolean(Metric.SETTINGS.containedIn(requestedMetrics)); - out.writeBoolean(Metric.OS.containedIn(requestedMetrics)); - out.writeBoolean(Metric.PROCESS.containedIn(requestedMetrics)); - out.writeBoolean(Metric.JVM.containedIn(requestedMetrics)); - out.writeBoolean(Metric.THREAD_POOL.containedIn(requestedMetrics)); - out.writeBoolean(Metric.TRANSPORT.containedIn(requestedMetrics)); - out.writeBoolean(Metric.HTTP.containedIn(requestedMetrics)); - out.writeBoolean(Metric.PLUGINS.containedIn(requestedMetrics)); - out.writeBoolean(Metric.INGEST.containedIn(requestedMetrics)); - out.writeBoolean(Metric.INDICES.containedIn(requestedMetrics)); - } else { - out.writeStringArray(requestedMetrics.toArray(new String[0])); - } + out.writeStringArray(requestedMetrics.toArray(new String[0])); } /** diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java index 7bcf83ba28111..ee7b287b878e7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java @@ -34,7 +34,6 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; @@ -42,6 +41,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.node.NodeService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -126,7 +126,7 @@ protected NodeInfo nodeOperation(NodeInfoRequest nodeRequest) { * * @opensearch.internal */ - public static class NodeInfoRequest extends BaseNodeRequest { + public static class NodeInfoRequest extends TransportRequest { NodesInfoRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java index e31f5f304c836..b5298b5f5eefb 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.node.reload; -import org.opensearch.LegacyESVersion; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.common.io.stream.StreamInput; @@ -68,18 +67,16 @@ public NodesReloadSecureSettingsRequest() { public NodesReloadSecureSettingsRequest(StreamInput in) throws IOException { super(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - final BytesReference bytesRef = in.readOptionalBytesReference(); - if (bytesRef != null) { - byte[] bytes = BytesReference.toBytes(bytesRef); - try { - this.secureSettingsPassword = new SecureString(CharArrays.utf8BytesToChars(bytes)); - } finally { - Arrays.fill(bytes, (byte) 0); - } - } else { - this.secureSettingsPassword = null; + final BytesReference bytesRef = in.readOptionalBytesReference(); + if (bytesRef != null) { + byte[] bytes = BytesReference.toBytes(bytesRef); + try { + this.secureSettingsPassword = new SecureString(CharArrays.utf8BytesToChars(bytes)); + } finally { + Arrays.fill(bytes, (byte) 0); } + } else { + this.secureSettingsPassword = null; } } @@ -114,16 +111,14 @@ boolean hasPassword() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - if (this.secureSettingsPassword == null) { - out.writeOptionalBytesReference(null); - } else { - final byte[] passwordBytes = CharArrays.toUtf8Bytes(this.secureSettingsPassword.getChars()); - try { - out.writeOptionalBytesReference(new BytesArray(passwordBytes)); - } finally { - Arrays.fill(passwordBytes, (byte) 0); - } + if (this.secureSettingsPassword == null) { + out.writeOptionalBytesReference(null); + } else { + final byte[] passwordBytes = CharArrays.toUtf8Bytes(this.secureSettingsPassword.getChars()); + try { + out.writeOptionalBytesReference(new BytesArray(passwordBytes)); + } finally { + Arrays.fill(passwordBytes, (byte) 0); } } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java index d7ad4357fa046..920c66bc5c543 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java @@ -39,7 +39,6 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; @@ -54,6 +53,7 @@ import org.opensearch.plugins.ReloadablePlugin; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -188,7 +188,7 @@ protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeReque * * @opensearch.internal */ - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { NodesReloadSecureSettingsRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index 7f0ac615cc449..aa8849934f6ee 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.node.stats; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.cluster.node.DiscoveryNode; @@ -138,18 +137,10 @@ public NodeStats(StreamInput in) throws IOException { ingestStats = in.readOptionalWriteable(IngestStats::new); adaptiveSelectionStats = in.readOptionalWriteable(AdaptiveSelectionStats::new); scriptCacheStats = null; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - if (in.getVersion().before(LegacyESVersion.V_7_9_0)) { - scriptCacheStats = in.readOptionalWriteable(ScriptCacheStats::new); - } else if (scriptStats != null) { - scriptCacheStats = scriptStats.toScriptCacheStats(); - } - } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - indexingPressureStats = in.readOptionalWriteable(IndexingPressureStats::new); - } else { - indexingPressureStats = null; + if (scriptStats != null) { + scriptCacheStats = scriptStats.toScriptCacheStats(); } + indexingPressureStats = in.readOptionalWriteable(IndexingPressureStats::new); if (in.getVersion().onOrAfter(Version.V_1_2_0)) { shardIndexingPressureStats = in.readOptionalWriteable(ShardIndexingPressureStats::new); } else { @@ -327,12 +318,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(discoveryStats); out.writeOptionalWriteable(ingestStats); out.writeOptionalWriteable(adaptiveSelectionStats); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_8_0) && out.getVersion().before(LegacyESVersion.V_7_9_0)) { - out.writeOptionalWriteable(scriptCacheStats); - } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - out.writeOptionalWriteable(indexingPressureStats); - } + out.writeOptionalWriteable(indexingPressureStats); if (out.getVersion().onOrAfter(Version.V_1_2_0)) { out.writeOptionalWriteable(shardIndexingPressureStats); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java index babec0b7c119f..f6fb788289a5a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.node.stats; -import org.opensearch.LegacyESVersion; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.common.io.stream.StreamInput; @@ -64,22 +63,7 @@ public NodesStatsRequest(StreamInput in) throws IOException { indices = new CommonStatsFlags(in); requestedMetrics.clear(); - if (in.getVersion().before(LegacyESVersion.V_7_7_0)) { - optionallyAddMetric(in.readBoolean(), Metric.OS.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.PROCESS.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.JVM.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.THREAD_POOL.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.FS.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.TRANSPORT.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.HTTP.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.BREAKER.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.SCRIPT.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.DISCOVERY.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.INGEST.metricName()); - optionallyAddMetric(in.readBoolean(), Metric.ADAPTIVE_SELECTION.metricName()); - } else { - requestedMetrics.addAll(in.readStringList()); - } + requestedMetrics.addAll(in.readStringList()); } /** @@ -200,22 +184,7 @@ private void optionallyAddMetric(boolean includeMetric, String metricName) { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); indices.writeTo(out); - if (out.getVersion().before(LegacyESVersion.V_7_7_0)) { - out.writeBoolean(Metric.OS.containedIn(requestedMetrics)); - out.writeBoolean(Metric.PROCESS.containedIn(requestedMetrics)); - out.writeBoolean(Metric.JVM.containedIn(requestedMetrics)); - out.writeBoolean(Metric.THREAD_POOL.containedIn(requestedMetrics)); - out.writeBoolean(Metric.FS.containedIn(requestedMetrics)); - out.writeBoolean(Metric.TRANSPORT.containedIn(requestedMetrics)); - out.writeBoolean(Metric.HTTP.containedIn(requestedMetrics)); - out.writeBoolean(Metric.BREAKER.containedIn(requestedMetrics)); - out.writeBoolean(Metric.SCRIPT.containedIn(requestedMetrics)); - out.writeBoolean(Metric.DISCOVERY.containedIn(requestedMetrics)); - out.writeBoolean(Metric.INGEST.containedIn(requestedMetrics)); - out.writeBoolean(Metric.ADAPTIVE_SELECTION.containedIn(requestedMetrics)); - } else { - out.writeStringArray(requestedMetrics.toArray(new String[0])); - } + out.writeStringArray(requestedMetrics.toArray(new String[0])); } /** diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 644c7f02d45f0..5d5d54c8fe7ed 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -34,7 +34,6 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; @@ -42,6 +41,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.node.NodeService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -127,7 +127,7 @@ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) { * * @opensearch.internal */ - public static class NodeStatsRequest extends BaseNodeRequest { + public static class NodeStatsRequest extends TransportRequest { NodesStatsRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java index a4b24a7a91f1f..794c942a4e7d2 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.node.tasks.cancel; -import org.opensearch.LegacyESVersion; import org.opensearch.action.support.tasks.BaseTasksRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -60,18 +59,14 @@ public CancelTasksRequest() {} public CancelTasksRequest(StreamInput in) throws IOException { super(in); this.reason = in.readString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - waitForCompletion = in.readBoolean(); - } + waitForCompletion = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(reason); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - out.writeBoolean(waitForCompletion); - } + out.writeBoolean(waitForCompletion); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodeUsage.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodeUsage.java index 58e43ca9f3568..be2a1141b8a02 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodeUsage.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodeUsage.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.node.usage; -import org.opensearch.LegacyESVersion; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.io.stream.StreamInput; @@ -61,11 +60,7 @@ public NodeUsage(StreamInput in) throws IOException { timestamp = in.readLong(); sinceTime = in.readLong(); restUsage = (Map) in.readGenericValue(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - aggregationUsage = (Map) in.readGenericValue(); - } else { - aggregationUsage = null; - } + aggregationUsage = (Map) in.readGenericValue(); } /** @@ -144,9 +139,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(timestamp); out.writeLong(sinceTime); out.writeGenericValue(restUsage); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - out.writeGenericValue(aggregationUsage); - } + out.writeGenericValue(aggregationUsage); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequest.java index 01f66bd843642..1badfa6d02f15 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.node.usage; -import org.opensearch.LegacyESVersion; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -52,9 +51,7 @@ public class NodesUsageRequest extends BaseNodesRequest { public NodesUsageRequest(StreamInput in) throws IOException { super(in); this.restActions = in.readBoolean(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - this.aggregations = in.readBoolean(); - } + this.aggregations = in.readBoolean(); } /** @@ -116,8 +113,6 @@ public NodesUsageRequest aggregations(boolean aggregations) { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeBoolean(restActions); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - out.writeBoolean(aggregations); - } + out.writeBoolean(aggregations); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java index c7612f7e15838..dbd3673149efe 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java @@ -34,7 +34,6 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; @@ -42,6 +41,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.search.aggregations.support.AggregationUsageService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import org.opensearch.usage.UsageService; @@ -117,7 +117,7 @@ protected NodeUsage nodeOperation(NodeUsageRequest nodeUsageRequest) { * * @opensearch.internal */ - public static class NodeUsageRequest extends BaseNodeRequest { + public static class NodeUsageRequest extends TransportRequest { NodesUsageRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java index a3804db687a2d..07b918e427784 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java @@ -34,8 +34,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.StepListener; @@ -91,8 +89,6 @@ public final class TransportCleanupRepositoryAction extends TransportClusterMana private static final Logger logger = LogManager.getLogger(TransportCleanupRepositoryAction.class); - private static final Version MIN_VERSION = LegacyESVersion.V_7_4_0; - private final RepositoriesService repositoriesService; private final SnapshotsService snapshotsService; @@ -179,17 +175,7 @@ protected void clusterManagerOperation( ClusterState state, ActionListener listener ) { - if (state.nodes().getMinNodeVersion().onOrAfter(MIN_VERSION)) { - cleanupRepo(request.name(), ActionListener.map(listener, CleanupRepositoryResponse::new)); - } else { - throw new IllegalArgumentException( - "Repository cleanup is only supported from version [" - + MIN_VERSION - + "] but the oldest node version in the cluster is [" - + state.nodes().getMinNodeVersion() - + ']' - ); - } + cleanupRepo(request.name(), ActionListener.map(listener, CleanupRepositoryResponse::new)); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index d78a4c95246b4..cb64718ed5843 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -60,7 +60,6 @@ import static org.opensearch.common.settings.Settings.readSettingsFromStream; import static org.opensearch.common.settings.Settings.writeSettingsToStream; import static org.opensearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; -import static org.opensearch.snapshots.SnapshotInfo.METADATA_FIELD_INTRODUCED; /** * Create snapshot request @@ -124,9 +123,7 @@ public CreateSnapshotRequest(StreamInput in) throws IOException { includeGlobalState = in.readBoolean(); waitForCompletion = in.readBoolean(); partial = in.readBoolean(); - if (in.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) { - userMetadata = in.readMap(); - } + userMetadata = in.readMap(); } @Override @@ -140,9 +137,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(includeGlobalState); out.writeBoolean(waitForCompletion); out.writeBoolean(partial); - if (out.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) { - out.writeMap(userMetadata); - } + out.writeMap(userMetadata); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java index ed4af6d915792..f604a30121797 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java @@ -103,18 +103,10 @@ protected void clusterManagerOperation( ClusterState state, final ActionListener listener ) { - if (state.nodes().getMinNodeVersion().before(SnapshotsService.NO_REPO_INITIALIZE_VERSION)) { - if (request.waitForCompletion()) { - snapshotsService.executeSnapshotLegacy(request, ActionListener.map(listener, CreateSnapshotResponse::new)); - } else { - snapshotsService.createSnapshotLegacy(request, ActionListener.map(listener, snapshot -> new CreateSnapshotResponse())); - } + if (request.waitForCompletion()) { + snapshotsService.executeSnapshot(request, ActionListener.map(listener, CreateSnapshotResponse::new)); } else { - if (request.waitForCompletion()) { - snapshotsService.executeSnapshot(request, ActionListener.map(listener, CreateSnapshotResponse::new)); - } else { - snapshotsService.createSnapshot(request, ActionListener.map(listener, snapshot -> new CreateSnapshotResponse())); - } + snapshotsService.createSnapshot(request, ActionListener.map(listener, snapshot -> new CreateSnapshotResponse())); } } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java index 61c4fdb9d5c14..832b37050ffe6 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java @@ -36,7 +36,6 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.snapshots.SnapshotsService; import java.io.IOException; @@ -84,27 +83,14 @@ public DeleteSnapshotRequest(String repository) { public DeleteSnapshotRequest(StreamInput in) throws IOException { super(in); repository = in.readString(); - if (in.getVersion().onOrAfter(SnapshotsService.MULTI_DELETE_VERSION)) { - snapshots = in.readStringArray(); - } else { - snapshots = new String[] { in.readString() }; - } + snapshots = in.readStringArray(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(repository); - if (out.getVersion().onOrAfter(SnapshotsService.MULTI_DELETE_VERSION)) { - out.writeStringArray(snapshots); - } else { - if (snapshots.length != 1) { - throw new IllegalArgumentException( - "Can't write snapshot delete with more than one snapshot to version [" + out.getVersion() + "]" - ); - } - out.writeString(snapshots[0]); - } + out.writeStringArray(snapshots); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index 1b673217a248b..4f998b3484642 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.cluster.snapshots.restore; -import org.opensearch.LegacyESVersion; +import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; @@ -42,6 +42,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.ToXContentObject; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentType; @@ -68,6 +69,38 @@ public class RestoreSnapshotRequest extends ClusterManagerNodeRequest source) { } else { throw new IllegalArgumentException("malformed ignore_index_settings section, should be an array of strings"); } + } else if (name.equals("storage_type")) { + if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT)) { + if (entry.getValue() instanceof String) { + storageType(StorageType.fromString((String) entry.getValue())); + } else { + throw new IllegalArgumentException("malformed storage_type"); + } + } else { + throw new IllegalArgumentException( + "Unsupported parameter " + name + ". Feature flag is not enabled for this experimental feature" + ); + } } else { if (IndicesOptions.isIndicesOptions(name) == false) { throw new IllegalArgumentException("Unknown parameter " + name); @@ -579,6 +633,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.value(ignoreIndexSetting); } builder.endArray(); + if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) && storageType != null) { + storageType.toXContent(builder); + } builder.endObject(); return builder; } @@ -605,7 +662,8 @@ public boolean equals(Object o) { && Objects.equals(renameReplacement, that.renameReplacement) && Objects.equals(indexSettings, that.indexSettings) && Arrays.equals(ignoreIndexSettings, that.ignoreIndexSettings) - && Objects.equals(snapshotUuid, that.snapshotUuid); + && Objects.equals(snapshotUuid, that.snapshotUuid) + && Objects.equals(storageType, that.storageType); } @Override @@ -621,7 +679,8 @@ public int hashCode() { partial, includeAliases, indexSettings, - snapshotUuid + snapshotUuid, + storageType ); result = 31 * result + Arrays.hashCode(indices); result = 31 * result + Arrays.hashCode(ignoreIndexSettings); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java index 68397851699fb..0104637a00035 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java @@ -248,4 +248,12 @@ public RestoreSnapshotRequestBuilder setIgnoreIndexSettings(List ignoreI request.ignoreIndexSettings(ignoreIndexSettings); return this; } + + /** + * Sets the storage type + */ + public RestoreSnapshotRequestBuilder setStorageType(RestoreSnapshotRequest.StorageType storageType) { + request.storageType(storageType); + return this; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java index 8fd1ed22a0d14..5fa908a039887 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.snapshots.status; -import org.opensearch.LegacyESVersion; import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.SnapshotsInProgress.State; import org.opensearch.common.Nullable; @@ -92,15 +91,8 @@ public class SnapshotStatus implements ToXContentObject, Writeable { state = State.fromValue(in.readByte()); shards = Collections.unmodifiableList(in.readList(SnapshotIndexShardStatus::new)); includeGlobalState = in.readOptionalBoolean(); - final long startTime; - final long time; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - startTime = in.readLong(); - time = in.readLong(); - } else { - startTime = 0L; - time = 0L; - } + final long startTime = in.readLong(); + final long time = in.readLong(); updateShardStats(startTime, time); } @@ -207,10 +199,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeByte(state.value()); out.writeList(shards); out.writeOptionalBoolean(includeGlobalState); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeLong(stats.getStartTime()); - out.writeLong(stats.getTime()); - } + out.writeLong(stats.getStartTime()); + out.writeLong(stats.getTime()); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java index 86d0499a23f9e..e9bf564afaf32 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java @@ -36,7 +36,6 @@ import org.opensearch.action.ActionType; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.nodes.BaseNodesResponse; @@ -51,6 +50,7 @@ import org.opensearch.snapshots.Snapshot; import org.opensearch.snapshots.SnapshotShardsService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -207,7 +207,7 @@ protected void writeNodesTo(StreamOutput out, List nodes) th * * @opensearch.internal */ - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { private final List snapshots; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java index 89cd112d30c79..d2d7d843e19db 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java @@ -32,14 +32,12 @@ package org.opensearch.action.admin.cluster.state; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionResponse; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.common.unit.ByteSizeValue; import java.io.IOException; import java.util.Objects; @@ -59,9 +57,6 @@ public ClusterStateResponse(StreamInput in) throws IOException { super(in); clusterName = new ClusterName(in); clusterState = in.readOptionalWriteable(innerIn -> ClusterState.readFrom(innerIn, null)); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - new ByteSizeValue(in); - } waitForTimedOut = in.readBoolean(); } @@ -98,9 +93,6 @@ public boolean isWaitForTimedOut() { public void writeTo(StreamOutput out) throws IOException { clusterName.writeTo(out); out.writeOptionalWriteable(clusterState); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - ByteSizeValue.ZERO.writeTo(out); - } out.writeBoolean(waitForTimedOut); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java index c6519d6669ea8..8c5d741251501 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.stats; -import org.opensearch.LegacyESVersion; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.cluster.ClusterName; @@ -71,11 +70,9 @@ public ClusterStatsResponse(StreamInput in) throws IOException { String clusterUUID = null; MappingStats mappingStats = null; AnalysisStats analysisStats = null; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - clusterUUID = in.readOptionalString(); - mappingStats = in.readOptionalWriteable(MappingStats::new); - analysisStats = in.readOptionalWriteable(AnalysisStats::new); - } + clusterUUID = in.readOptionalString(); + mappingStats = in.readOptionalWriteable(MappingStats::new); + analysisStats = in.readOptionalWriteable(AnalysisStats::new); this.clusterUUID = clusterUUID; // built from nodes rather than from the stream directly @@ -132,11 +129,9 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeVLong(timestamp); out.writeOptionalWriteable(status); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - out.writeOptionalString(clusterUUID); - out.writeOptionalWriteable(indicesStats.getMappings()); - out.writeOptionalWriteable(indicesStats.getAnalysis()); - } + out.writeOptionalString(clusterUUID); + out.writeOptionalWriteable(indicesStats.getMappings()); + out.writeOptionalWriteable(indicesStats.getAnalysis()); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index a13932e137ab0..401813a6174fa 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -40,7 +40,6 @@ import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.health.ClusterHealthStatus; @@ -57,6 +56,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.node.NodeService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import org.opensearch.transport.Transports; @@ -216,7 +216,7 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq * * @opensearch.internal */ - public static class ClusterStatsNodeRequest extends BaseNodeRequest { + public static class ClusterStatsNodeRequest extends TransportRequest { ClusterStatsRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/Alias.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/Alias.java index f9a785d1759d8..3f78d99d49423 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/Alias.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/Alias.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.alias; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchGenerationException; import org.opensearch.common.Nullable; import org.opensearch.common.ParseField; @@ -90,11 +89,7 @@ public Alias(StreamInput in) throws IOException { indexRouting = in.readOptionalString(); searchRouting = in.readOptionalString(); writeIndex = in.readOptionalBoolean(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - isHidden = in.readOptionalBoolean(); - } else { - isHidden = null; - } + isHidden = in.readOptionalBoolean(); } public Alias(String name) { @@ -236,9 +231,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(indexRouting); out.writeOptionalString(searchRouting); out.writeOptionalBoolean(writeIndex); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - out.writeOptionalBoolean(isHidden); - } + out.writeOptionalBoolean(isHidden); } /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java index 62f51aa3f3bff..97f9911d8227e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.alias; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchGenerationException; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.AliasesRequest; @@ -88,11 +87,7 @@ public class IndicesAliasesRequest extends AcknowledgedRequest tokens, DetailAnalyzeResponse detail) { } public Response(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - AnalyzeToken[] tokenArray = in.readOptionalArray(AnalyzeToken::new, AnalyzeToken[]::new); - tokens = tokenArray != null ? Arrays.asList(tokenArray) : null; - } else { - int size = in.readVInt(); - if (size > 0) { - tokens = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - tokens.add(new AnalyzeToken(in)); - } - } else { - tokens = null; - } - } + AnalyzeToken[] tokenArray = in.readOptionalArray(AnalyzeToken::new, AnalyzeToken[]::new); + tokens = tokenArray != null ? Arrays.asList(tokenArray) : null; detail = in.readOptionalWriteable(DetailAnalyzeResponse::new); } @@ -371,22 +358,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - AnalyzeToken[] tokenArray = null; - if (tokens != null) { - tokenArray = tokens.toArray(new AnalyzeToken[0]); - } - out.writeOptionalArray(tokenArray); - } else { - if (tokens != null) { - out.writeVInt(tokens.size()); - for (AnalyzeToken token : tokens) { - token.writeTo(out); - } - } else { - out.writeVInt(0); - } + AnalyzeToken[] tokenArray = null; + if (tokens != null) { + tokenArray = tokens.toArray(new AnalyzeToken[0]); } + out.writeOptionalArray(tokenArray); out.writeOptionalWriteable(detail); } @@ -766,19 +742,7 @@ public AnalyzeTokenList(String name, AnalyzeToken[] tokens) { AnalyzeTokenList(StreamInput in) throws IOException { name = in.readString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - tokens = in.readOptionalArray(AnalyzeToken::new, AnalyzeToken[]::new); - } else { - int size = in.readVInt(); - if (size > 0) { - tokens = new AnalyzeToken[size]; - for (int i = 0; i < size; i++) { - tokens[i] = new AnalyzeToken(in); - } - } else { - tokens = null; - } - } + tokens = in.readOptionalArray(AnalyzeToken::new, AnalyzeToken[]::new); } public String getName() { @@ -811,18 +775,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - out.writeOptionalArray(tokens); - } else { - if (tokens != null) { - out.writeVInt(tokens.length); - for (AnalyzeToken token : tokens) { - token.writeTo(out); - } - } else { - out.writeVInt(0); - } - } + out.writeOptionalArray(tokens); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java index b16cabfda4d67..1095cec447442 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.close; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.ActiveShardCount; @@ -61,11 +60,7 @@ public CloseIndexRequest(StreamInput in) throws IOException { super(in); indices = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - waitForActiveShards = ActiveShardCount.readFrom(in); - } else { - waitForActiveShards = ActiveShardCount.NONE; - } + waitForActiveShards = ActiveShardCount.readFrom(in); } public CloseIndexRequest() {} @@ -143,8 +138,6 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArray(indices); indicesOptions.writeIndicesOptions(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - waitForActiveShards.writeTo(out); - } + waitForActiveShards.writeTo(out); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java index 1fc9017359a8c..0388ea47bfc69 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java @@ -31,7 +31,6 @@ package org.opensearch.action.admin.indices.close; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.master.ShardsAcknowledgedResponse; @@ -49,7 +48,6 @@ import java.util.List; import java.util.Objects; -import static java.util.Collections.emptyList; import static java.util.Collections.unmodifiableList; /** @@ -62,12 +60,8 @@ public class CloseIndexResponse extends ShardsAcknowledgedResponse { private final List indices; CloseIndexResponse(StreamInput in) throws IOException { - super(in, in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - indices = unmodifiableList(in.readList(IndexResult::new)); - } else { - indices = unmodifiableList(emptyList()); - } + super(in, true); + indices = unmodifiableList(in.readList(IndexResult::new)); } public CloseIndexResponse(final boolean acknowledged, final boolean shardsAcknowledged, final List indices) { @@ -82,12 +76,8 @@ public List getIndices() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - writeShardsAcknowledged(out); - } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - out.writeList(indices); - } + writeShardsAcknowledged(out); + out.writeList(indices); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java b/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java index fe39e2a254301..691b2c7c95730 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java @@ -33,7 +33,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.support.ActionFilters; @@ -205,11 +204,7 @@ public static class ShardRequest extends ReplicationRequest { ShardRequest(StreamInput in) throws IOException { super(in); clusterBlock = new ClusterBlock(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - phase1 = in.readBoolean(); - } else { - phase1 = false; - } + phase1 = in.readBoolean(); } public ShardRequest(final ShardId shardId, final ClusterBlock clusterBlock, final boolean phase1, final TaskId parentTaskId) { @@ -228,9 +223,7 @@ public String toString() { public void writeTo(final StreamOutput out) throws IOException { super.writeTo(out); clusterBlock.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - out.writeBoolean(phase1); - } + out.writeBoolean(phase1); } public ClusterBlock clusterBlock() { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java index 95837d82be7ac..302c2aad64bb4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.create; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchGenerationException; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; @@ -126,9 +125,6 @@ public CreateIndexRequest(StreamInput in) throws IOException { for (int i = 0; i < aliasesSize; i++) { aliases.add(new Alias(in)); } - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readBoolean(); // updateAllTypes - } waitForActiveShards = ActiveShardCount.readFrom(in); } @@ -505,9 +501,6 @@ public void writeTo(StreamOutput out) throws IOException { for (Alias alias : aliases) { alias.writeTo(out); } - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(true); // updateAllTypes - } waitForActiveShards.writeTo(out); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java index 6026dd10c607b..6885de74e4479 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java @@ -34,16 +34,16 @@ import java.io.IOException; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportRequest; /** * Used when querying every node in the cluster for a specific dangling index. * * @opensearch.internal */ -public class NodeFindDanglingIndexRequest extends BaseNodeRequest { +public class NodeFindDanglingIndexRequest extends TransportRequest { private final String indexUUID; public NodeFindDanglingIndexRequest(String indexUUID) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesRequest.java index 9b737fff8316e..696daf75942fb 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesRequest.java @@ -32,9 +32,9 @@ package org.opensearch.action.admin.indices.dangling.list; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportRequest; import java.io.IOException; @@ -43,7 +43,7 @@ * * @opensearch.internal */ -public class NodeListDanglingIndicesRequest extends BaseNodeRequest { +public class NodeListDanglingIndicesRequest extends TransportRequest { /** * Filter the response by index UUID. Leave as null to find all indices. */ diff --git a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java index b428c3523b666..77007fba539ec 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java @@ -32,13 +32,12 @@ package org.opensearch.action.admin.indices.forcemerge; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.support.broadcast.BroadcastRequest; -import org.opensearch.common.Nullable; import org.opensearch.common.UUIDs; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.index.engine.Engine; import java.io.IOException; import java.util.Arrays; @@ -74,13 +73,12 @@ public static final class Defaults { private boolean onlyExpungeDeletes = Defaults.ONLY_EXPUNGE_DELETES; private boolean flush = Defaults.FLUSH; - private static final Version FORCE_MERGE_UUID_VERSION = LegacyESVersion.V_7_7_0; + private static final Version FORCE_MERGE_UUID_VERSION = Version.V_3_0_0; /** * Force merge UUID to store in the live commit data of a shard under * {@link org.opensearch.index.engine.Engine#FORCE_MERGE_UUID_KEY} after force merging it. */ - @Nullable private final String forceMergeUUID; /** @@ -99,9 +97,11 @@ public ForceMergeRequest(StreamInput in) throws IOException { onlyExpungeDeletes = in.readBoolean(); flush = in.readBoolean(); if (in.getVersion().onOrAfter(FORCE_MERGE_UUID_VERSION)) { - forceMergeUUID = in.readOptionalString(); - } else { - forceMergeUUID = null; + forceMergeUUID = in.readString(); + } else if ((forceMergeUUID = in.readOptionalString()) == null) { + throw new IllegalStateException( + "As of legacy version 7.7 [" + Engine.FORCE_MERGE_UUID_KEY + "] is no longer optional in force merge requests." + ); } } @@ -143,7 +143,6 @@ public ForceMergeRequest onlyExpungeDeletes(boolean onlyExpungeDeletes) { * Force merge UUID to use when force merging or {@code null} if not using one in a mixed version cluster containing nodes older than * {@link #FORCE_MERGE_UUID_VERSION}. */ - @Nullable public String forceMergeUUID() { return forceMergeUUID; } @@ -183,6 +182,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(onlyExpungeDeletes); out.writeBoolean(flush); if (out.getVersion().onOrAfter(FORCE_MERGE_UUID_VERSION)) { + out.writeString(forceMergeUUID); + } else { out.writeOptionalString(forceMergeUUID); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java index e93fbe86e4ece..9de7dbadaa7df 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java @@ -33,7 +33,6 @@ package org.opensearch.action.admin.indices.get; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionResponse; import org.opensearch.cluster.metadata.AliasMetadata; @@ -152,14 +151,12 @@ public GetIndexResponse( } defaultSettings = defaultSettingsMapBuilder.build(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - ImmutableOpenMap.Builder dataStreamsMapBuilder = ImmutableOpenMap.builder(); - int dataStreamsSize = in.readVInt(); - for (int i = 0; i < dataStreamsSize; i++) { - dataStreamsMapBuilder.put(in.readString(), in.readOptionalString()); - } - dataStreams = dataStreamsMapBuilder.build(); + ImmutableOpenMap.Builder dataStreamsMapBuilder = ImmutableOpenMap.builder(); + int dataStreamsSize = in.readVInt(); + for (int i = 0; i < dataStreamsSize; i++) { + dataStreamsMapBuilder.put(in.readString(), in.readOptionalString()); } + dataStreams = dataStreamsMapBuilder.build(); } public String[] indices() { @@ -272,12 +269,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(indexEntry.key); Settings.writeSettingsToStream(indexEntry.value, out); } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { - out.writeVInt(dataStreams.size()); - for (ObjectObjectCursor indexEntry : dataStreams) { - out.writeString(indexEntry.key); - out.writeOptionalString(indexEntry.value); - } + out.writeVInt(dataStreams.size()); + for (ObjectObjectCursor indexEntry : dataStreams) { + out.writeString(indexEntry.key); + out.writeOptionalString(indexEntry.value); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java index 64f76db5e1549..6d238a385231f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java @@ -33,7 +33,6 @@ package org.opensearch.action.admin.indices.mapping.get; import org.opensearch.OpenSearchException; -import org.opensearch.Version; import org.opensearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetadata; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.single.shard.TransportSingleShardAction; @@ -120,8 +119,7 @@ protected ShardsIterator shards(ClusterState state, InternalRequest request) { protected GetFieldMappingsResponse shardOperation(final GetFieldMappingsIndexRequest request, ShardId shardId) { assert shardId != null; IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - Version indexCreatedVersion = indexService.mapperService().getIndexSettings().getIndexVersionCreated(); - Predicate metadataFieldPredicate = (f) -> indicesService.isMetadataField(indexCreatedVersion, f); + Predicate metadataFieldPredicate = (f) -> indicesService.isMetadataField(f); Predicate fieldPredicate = metadataFieldPredicate.or(indicesService.getFieldFilter().apply(shardId.getIndexName())); DocumentMapper documentMapper = indexService.mapperService().documentMapper(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java index 85fd74f0762a5..df660e027ea3d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -33,7 +33,6 @@ package org.opensearch.action.admin.indices.mapping.put; import com.carrotsearch.hppc.ObjectHashSet; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchGenerationException; import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; @@ -117,14 +116,9 @@ public PutMappingRequest(StreamInput in) throws IOException { } } source = in.readString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readBoolean(); // updateAllTypes - } concreteIndex = in.readOptionalWriteable(Index::new); origin = in.readOptionalString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - writeIndexOnly = in.readBoolean(); - } + writeIndexOnly = in.readBoolean(); } public PutMappingRequest() {} @@ -349,14 +343,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(source); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(true); // updateAllTypes - } out.writeOptionalWriteable(concreteIndex); out.writeOptionalString(origin); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - out.writeBoolean(writeIndexOnly); - } + out.writeBoolean(writeIndexOnly); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java index c3992f8d42967..e21616657502a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java @@ -36,18 +36,15 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; -import org.apache.lucene.util.Accountable; import org.opensearch.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.BroadcastResponse; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.index.engine.Segment; import java.io.IOException; import java.util.ArrayList; -import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -197,21 +194,6 @@ private static void toXContent(XContentBuilder builder, Sort sort) throws IOExce builder.endArray(); } - private static void toXContent(XContentBuilder builder, Accountable tree) throws IOException { - builder.startObject(); - builder.field(Fields.DESCRIPTION, tree.toString()); - builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(tree.ramBytesUsed())); - Collection children = tree.getChildResources(); - if (children.isEmpty() == false) { - builder.startArray(Fields.CHILDREN); - for (Accountable child : children) { - toXContent(builder, child); - } - builder.endArray(); - } - builder.endObject(); - } - /** * Fields for parsing and toXContent * diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java index bd5d9c651af7a..484bc93496fc8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -34,7 +34,6 @@ import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionResponse; import org.opensearch.action.support.DefaultShardOperationFailedException; @@ -247,13 +246,8 @@ public Failure(String nodeId, String index, int shardId, Throwable reason) { } private Failure(StreamInput in) throws IOException { - if (in.getVersion().before(LegacyESVersion.V_7_4_0)) { - nodeId = in.readString(); - } readFrom(in, this); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - nodeId = in.readString(); - } + nodeId = in.readString(); } public String nodeId() { @@ -266,13 +260,8 @@ static Failure readFailure(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().before(LegacyESVersion.V_7_4_0)) { - out.writeString(nodeId); - } super.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeString(nodeId); - } + out.writeString(nodeId); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java index 50784e60a3f19..f5d9528422b58 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java @@ -31,7 +31,6 @@ package org.opensearch.action.admin.indices.shrink; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.admin.indices.alias.Alias; @@ -122,9 +121,6 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); targetIndexRequest.writeTo(out); out.writeString(sourceIndex); - if (type == ResizeType.CLONE && out.getVersion().before(LegacyESVersion.V_7_4_0)) { - throw new IllegalArgumentException("can't send clone request to a node that's older than " + LegacyESVersion.V_7_4_0); - } out.writeEnum(type); out.writeOptionalBoolean(copySettings); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java index 9a24d8a42dc9d..fd3d6daa9c393 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.stats; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; @@ -87,9 +86,7 @@ public CommonStatsFlags(StreamInput in) throws IOException { fieldDataFields = in.readStringArray(); completionDataFields = in.readStringArray(); includeSegmentFileSizes = in.readBoolean(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - includeUnloadedSegments = in.readBoolean(); - } + includeUnloadedSegments = in.readBoolean(); if (in.getVersion().onOrAfter(Version.V_1_2_0)) { includeAllShardIndexingPressureTrackers = in.readBoolean(); includeOnlyTopIndexingPressureMetrics = in.readBoolean(); @@ -111,9 +108,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArrayNullable(fieldDataFields); out.writeStringArrayNullable(completionDataFields); out.writeBoolean(includeSegmentFileSizes); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeBoolean(includeUnloadedSegments); - } + out.writeBoolean(includeUnloadedSegments); if (out.getVersion().onOrAfter(Version.V_1_2_0)) { out.writeBoolean(includeAllShardIndexingPressureTrackers); out.writeBoolean(includeOnlyTopIndexingPressureMetrics); diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkItemRequest.java b/server/src/main/java/org/opensearch/action/bulk/BulkItemRequest.java index f39ba589a3019..9845413fe8af8 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkItemRequest.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkItemRequest.java @@ -59,8 +59,7 @@ public class BulkItemRequest implements Writeable, Accountable { private volatile BulkItemResponse primaryResponse; /** - * @param shardId {@code null} if reading from a stream before {@link BulkShardRequest#COMPACT_SHARD_ID_VERSION} to force BwC read - * that includes shard id + * @param shardId the shard id */ BulkItemRequest(@Nullable ShardId shardId, StreamInput in) throws IOException { id = in.readVInt(); diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java index 346e394bbb35e..364952dff82f1 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java @@ -33,7 +33,6 @@ package org.opensearch.action.bulk; import org.opensearch.ExceptionsHelper; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.DocWriteRequest.OpType; @@ -270,11 +269,7 @@ public Failure(StreamInput in) throws IOException { cause = in.readException(); status = ExceptionsHelper.status(cause); seqNo = in.readZLong(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_6_0)) { - term = in.readVLong(); - } else { - term = SequenceNumbers.UNASSIGNED_PRIMARY_TERM; - } + term = in.readVLong(); aborted = in.readBoolean(); } @@ -287,9 +282,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(id); out.writeException(cause); out.writeZLong(seqNo); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_6_0)) { - out.writeVLong(term); - } + out.writeVLong(term); out.writeBoolean(aborted); } diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkShardRequest.java b/server/src/main/java/org/opensearch/action/bulk/BulkShardRequest.java index a7d0de98981ba..484f57abb8b07 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkShardRequest.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkShardRequest.java @@ -34,8 +34,6 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.RamUsageEstimator; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; import org.opensearch.action.support.replication.ReplicatedWriteRequest; import org.opensearch.action.support.replication.ReplicationRequest; import org.opensearch.common.io.stream.StreamInput; @@ -54,14 +52,13 @@ */ public class BulkShardRequest extends ReplicatedWriteRequest implements Accountable { - public static final Version COMPACT_SHARD_ID_VERSION = LegacyESVersion.V_7_9_0; private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(BulkShardRequest.class); private final BulkItemRequest[] items; public BulkShardRequest(StreamInput in) throws IOException { super(in); - final ShardId itemShardId = in.getVersion().onOrAfter(COMPACT_SHARD_ID_VERSION) ? shardId : null; + final ShardId itemShardId = shardId; items = in.readArray(i -> i.readOptionalWriteable(inpt -> new BulkItemRequest(itemShardId, inpt)), BulkItemRequest[]::new); } @@ -95,14 +92,14 @@ public String[] indices() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeArray(out.getVersion().onOrAfter(COMPACT_SHARD_ID_VERSION) ? (o, item) -> { + out.writeArray((o, item) -> { if (item != null) { o.writeBoolean(true); item.writeThin(o); } else { o.writeBoolean(false); } - } : StreamOutput::writeOptionalWriteable, items); + }, items); } @Override diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkShardResponse.java b/server/src/main/java/org/opensearch/action/bulk/BulkShardResponse.java index b96ebcb3a3c0a..cfedcde92194c 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkShardResponse.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkShardResponse.java @@ -32,8 +32,6 @@ package org.opensearch.action.bulk; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; import org.opensearch.action.DocWriteResponse; import org.opensearch.action.support.WriteResponse; import org.opensearch.action.support.replication.ReplicationResponse; @@ -50,18 +48,13 @@ */ public class BulkShardResponse extends ReplicationResponse implements WriteResponse { - private static final Version COMPACT_SHARD_ID_VERSION = LegacyESVersion.V_7_9_0; - private final ShardId shardId; private final BulkItemResponse[] responses; BulkShardResponse(StreamInput in) throws IOException { super(in); shardId = new ShardId(in); - responses = in.readArray( - in.getVersion().onOrAfter(COMPACT_SHARD_ID_VERSION) ? i -> new BulkItemResponse(shardId, i) : BulkItemResponse::new, - BulkItemResponse[]::new - ); + responses = in.readArray(i -> new BulkItemResponse(shardId, i), BulkItemResponse[]::new); } // NOTE: public for testing only @@ -96,9 +89,6 @@ public void setForcedRefresh(boolean forcedRefresh) { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); shardId.writeTo(out); - out.writeArray( - out.getVersion().onOrAfter(COMPACT_SHARD_ID_VERSION) ? (o, item) -> item.writeThin(out) : (o, item) -> item.writeTo(o), - responses - ); + out.writeArray((o, item) -> item.writeThin(out), responses); } } diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java index de285983b846b..9e23213c02ab6 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SparseFixedBitSet; import org.opensearch.Assertions; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.ExceptionsHelper; import org.opensearch.ResourceAlreadyExistsException; @@ -455,11 +454,7 @@ void createIndex(String index, TimeValue timeout, Version minNodeVersion, Action createIndexRequest.index(index); createIndexRequest.cause("auto(bulk api)"); createIndexRequest.clusterManagerNodeTimeout(timeout); - if (minNodeVersion.onOrAfter(LegacyESVersion.V_7_8_0)) { - client.execute(AutoCreateAction.INSTANCE, createIndexRequest, listener); - } else { - client.admin().indices().create(createIndexRequest, listener); - } + client.execute(AutoCreateAction.INSTANCE, createIndexRequest, listener); } private boolean setResponseFailureIfIndexMatches( diff --git a/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java b/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java index ce723df0c383a..86880c0211c1d 100644 --- a/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java +++ b/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java @@ -33,7 +33,6 @@ package org.opensearch.action.delete; import org.apache.lucene.util.RamUsageEstimator; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.CompositeIndicesRequest; @@ -96,9 +95,6 @@ public DeleteRequest(@Nullable ShardId shardId, StreamInput in) throws IOExcepti } id = in.readString(); routing = in.readOptionalString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalString(); // _parent - } version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); ifSeqNo = in.readZLong(); @@ -280,9 +276,6 @@ private void writeBody(StreamOutput out) throws IOException { } out.writeString(id); out.writeOptionalString(routing()); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalString(null); // _parent - } out.writeLong(version); out.writeByte(versionType.getValue()); out.writeZLong(ifSeqNo); diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilities.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilities.java index 4fdf8d551af61..4afce67f98297 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilities.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilities.java @@ -32,7 +32,6 @@ package org.opensearch.action.fieldcaps; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; @@ -125,11 +124,7 @@ public FieldCapabilities( this.indices = in.readOptionalStringArray(); this.nonSearchableIndices = in.readOptionalStringArray(); this.nonAggregatableIndices = in.readOptionalStringArray(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_6_0)) { - meta = in.readMap(StreamInput::readString, i -> i.readSet(StreamInput::readString)); - } else { - meta = Collections.emptyMap(); - } + this.meta = in.readMap(StreamInput::readString, i -> i.readSet(StreamInput::readString)); } @Override @@ -141,9 +136,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalStringArray(indices); out.writeOptionalStringArray(nonSearchableIndices); out.writeOptionalStringArray(nonAggregatableIndices); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_6_0)) { - out.writeMap(meta, StreamOutput::writeString, (o, set) -> o.writeCollection(set, StreamOutput::writeString)); - } + out.writeMap(meta, StreamOutput::writeString, (o, set) -> o.writeCollection(set, StreamOutput::writeString)); } @Override diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesIndexRequest.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesIndexRequest.java index 3cfd21087582d..2b6a89c66ef4b 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesIndexRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.fieldcaps; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; @@ -70,8 +69,8 @@ public class FieldCapabilitiesIndexRequest extends ActionRequest implements Indi index = in.readOptionalString(); fields = in.readStringArray(); originalIndices = OriginalIndices.readOriginalIndices(in); - indexFilter = in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0) ? in.readOptionalNamedWriteable(QueryBuilder.class) : null; - nowInMillis = in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0) ? in.readLong() : 0L; + indexFilter = in.readOptionalNamedWriteable(QueryBuilder.class); + nowInMillis = in.readLong(); } FieldCapabilitiesIndexRequest( @@ -133,10 +132,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(index); out.writeStringArray(fields); OriginalIndices.writeOriginalIndices(originalIndices, out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - out.writeOptionalNamedWriteable(indexFilter); - out.writeLong(nowInMillis); - } + out.writeOptionalNamedWriteable(indexFilter); + out.writeLong(nowInMillis); } @Override diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java index 9225e0cdc6571..4da5d04fe9d7a 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java @@ -32,7 +32,6 @@ package org.opensearch.action.fieldcaps; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionResponse; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -62,7 +61,7 @@ public class FieldCapabilitiesIndexResponse extends ActionResponse implements Wr super(in); this.indexName = in.readString(); this.responseMap = in.readMap(StreamInput::readString, IndexFieldCapabilities::new); - this.canMatch = in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0) ? in.readBoolean() : true; + this.canMatch = in.readBoolean(); } /** @@ -95,9 +94,7 @@ public IndexFieldCapabilities getField(String field) { public void writeTo(StreamOutput out) throws IOException { out.writeString(indexName); out.writeMap(responseMap, StreamOutput::writeString, (valueOut, fc) -> fc.writeTo(valueOut)); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - out.writeBoolean(canMatch); - } + out.writeBoolean(canMatch); } @Override diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java index 688568ba9a6d6..2e00c55c31dc6 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.fieldcaps; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; @@ -72,13 +71,9 @@ public FieldCapabilitiesRequest(StreamInput in) throws IOException { indices = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); mergeResults = in.readBoolean(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - includeUnmapped = in.readBoolean(); - } else { - includeUnmapped = false; - } - indexFilter = in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0) ? in.readOptionalNamedWriteable(QueryBuilder.class) : null; - nowInMillis = in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0) ? in.readOptionalLong() : null; + includeUnmapped = in.readBoolean(); + indexFilter = in.readOptionalNamedWriteable(QueryBuilder.class); + nowInMillis = in.readOptionalLong(); } public FieldCapabilitiesRequest() {} @@ -109,13 +104,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArray(indices); indicesOptions.writeIndicesOptions(out); out.writeBoolean(mergeResults); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeBoolean(includeUnmapped); - } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - out.writeOptionalNamedWriteable(indexFilter); - out.writeOptionalLong(nowInMillis); - } + out.writeBoolean(includeUnmapped); + out.writeOptionalNamedWriteable(indexFilter); + out.writeOptionalLong(nowInMillis); } @Override diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java index e5f644987182c..847cca25ceb35 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java @@ -32,7 +32,6 @@ package org.opensearch.action.fieldcaps; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionResponse; import org.opensearch.common.ParseField; import org.opensearch.common.Strings; @@ -87,11 +86,7 @@ private FieldCapabilitiesResponse( public FieldCapabilitiesResponse(StreamInput in) throws IOException { super(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - indices = in.readStringArray(); - } else { - indices = Strings.EMPTY_ARRAY; - } + indices = in.readStringArray(); this.responseMap = in.readMap(StreamInput::readString, FieldCapabilitiesResponse::readField); indexResponses = in.readList(FieldCapabilitiesIndexResponse::new); } @@ -138,9 +133,7 @@ private static Map readField(StreamInput in) throws I @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeStringArray(indices); - } + out.writeStringArray(indices); out.writeMap(responseMap, StreamOutput::writeString, FieldCapabilitiesResponse::writeField); out.writeList(indexResponses); } diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/IndexFieldCapabilities.java b/server/src/main/java/org/opensearch/action/fieldcaps/IndexFieldCapabilities.java index 062e5bc1af7e5..73025c2eac3f0 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/IndexFieldCapabilities.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/IndexFieldCapabilities.java @@ -32,17 +32,13 @@ package org.opensearch.action.fieldcaps; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; import java.io.IOException; -import java.util.Collections; import java.util.Map; import java.util.Objects; -import java.util.Set; -import java.util.stream.Collectors; /** * Describes the capabilities of a field in a single index. @@ -74,42 +70,20 @@ public class IndexFieldCapabilities implements Writeable { } IndexFieldCapabilities(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - this.name = in.readString(); - this.type = in.readString(); - this.isSearchable = in.readBoolean(); - this.isAggregatable = in.readBoolean(); - this.meta = in.readMap(StreamInput::readString, StreamInput::readString); - } else { - // Previously we reused the FieldCapabilities class to represent index field capabilities. - FieldCapabilities fieldCaps = new FieldCapabilities(in); - this.name = fieldCaps.getName(); - this.type = fieldCaps.getType(); - this.isSearchable = fieldCaps.isSearchable(); - this.isAggregatable = fieldCaps.isAggregatable(); - this.meta = fieldCaps.meta() - .entrySet() - .stream() - .collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().iterator().next())); - } + this.name = in.readString(); + this.type = in.readString(); + this.isSearchable = in.readBoolean(); + this.isAggregatable = in.readBoolean(); + this.meta = in.readMap(StreamInput::readString, StreamInput::readString); } @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - out.writeString(name); - out.writeString(type); - out.writeBoolean(isSearchable); - out.writeBoolean(isAggregatable); - out.writeMap(meta, StreamOutput::writeString, StreamOutput::writeString); - } else { - // Previously we reused the FieldCapabilities class to represent index field capabilities. - Map> wrappedMeta = meta.entrySet() - .stream() - .collect(Collectors.toMap(Map.Entry::getKey, entry -> Collections.singleton(entry.getValue()))); - FieldCapabilities fieldCaps = new FieldCapabilities(name, type, isSearchable, isAggregatable, null, null, null, wrappedMeta); - fieldCaps.writeTo(out); - } + out.writeString(name); + out.writeString(type); + out.writeBoolean(isSearchable); + out.writeBoolean(isAggregatable); + out.writeMap(meta, StreamOutput::writeString, StreamOutput::writeString); } public String getName() { diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java b/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java index 99962741299ca..7d9ab4ff93f59 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java @@ -153,8 +153,7 @@ private FieldCapabilitiesIndexResponse shardOperation(final FieldCapabilitiesInd for (String field : fieldNames) { MappedFieldType ft = mapperService.fieldType(field); if (ft != null) { - if (indicesService.isMetadataField(mapperService.getIndexSettings().getIndexVersionCreated(), field) - || fieldPredicate.test(ft.name())) { + if (indicesService.isMetadataField(field) || fieldPredicate.test(ft.name())) { IndexFieldCapabilities fieldCap = new IndexFieldCapabilities( field, ft.familyTypeName(), diff --git a/server/src/main/java/org/opensearch/action/get/GetRequest.java b/server/src/main/java/org/opensearch/action/get/GetRequest.java index 5f740ba789bb2..64148f070cc16 100644 --- a/server/src/main/java/org/opensearch/action/get/GetRequest.java +++ b/server/src/main/java/org/opensearch/action/get/GetRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.get; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.RealtimeRequest; @@ -89,9 +88,6 @@ public GetRequest() {} } id = in.readString(); routing = in.readOptionalString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalString(); - } preference = in.readOptionalString(); refresh = in.readBoolean(); storedFields = in.readOptionalStringArray(); @@ -260,9 +256,6 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeString(id); out.writeOptionalString(routing); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalString(null); - } out.writeOptionalString(preference); out.writeBoolean(refresh); diff --git a/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java b/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java index 00df8657736ae..91f506dafafe1 100644 --- a/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java +++ b/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.get; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.action.ActionRequest; @@ -114,9 +113,6 @@ public Item(StreamInput in) throws IOException { } id = in.readString(); routing = in.readOptionalString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalString(); // _parent - } storedFields = in.readOptionalStringArray(); version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); @@ -211,9 +207,6 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeString(id); out.writeOptionalString(routing); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalString(null); // _parent - } out.writeOptionalStringArray(storedFields); out.writeLong(version); out.writeByte(versionType.getValue()); diff --git a/server/src/main/java/org/opensearch/action/get/MultiGetResponse.java b/server/src/main/java/org/opensearch/action/get/MultiGetResponse.java index b3664935b9489..a763564ddf855 100644 --- a/server/src/main/java/org/opensearch/action/get/MultiGetResponse.java +++ b/server/src/main/java/org/opensearch/action/get/MultiGetResponse.java @@ -63,6 +63,10 @@ public class MultiGetResponse extends ActionResponse implements Iterable(in.readString(), in.readBoolean()); } else { - this.description = null; - } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { - this.type = in.readString(); - boolean hasConditional = in.readBoolean(); - if (hasConditional) { - this.conditionalWithResult = new Tuple<>(in.readString(), in.readBoolean()); - } else { - this.conditionalWithResult = null; // no condition exists - } - } else { - this.conditionalWithResult = null; - this.type = null; + this.conditionalWithResult = null; // no condition exists } } @@ -216,16 +206,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(processorTag); out.writeOptionalWriteable(ingestDocument); out.writeException(failure); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { - out.writeOptionalString(description); - } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { - out.writeString(type); - out.writeBoolean(conditionalWithResult != null); - if (conditionalWithResult != null) { - out.writeString(conditionalWithResult.v1()); - out.writeBoolean(conditionalWithResult.v2()); - } + out.writeOptionalString(description); + out.writeString(type); + out.writeBoolean(conditionalWithResult != null); + if (conditionalWithResult != null) { + out.writeString(conditionalWithResult.v1()); + out.writeBoolean(conditionalWithResult.v2()); } } diff --git a/server/src/main/java/org/opensearch/action/main/MainResponse.java b/server/src/main/java/org/opensearch/action/main/MainResponse.java index 691bbda512275..0fbfdab9ba294 100644 --- a/server/src/main/java/org/opensearch/action/main/MainResponse.java +++ b/server/src/main/java/org/opensearch/action/main/MainResponse.java @@ -33,7 +33,6 @@ package org.opensearch.action.main; import org.opensearch.Build; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionResponse; import org.opensearch.cluster.ClusterName; @@ -71,9 +70,6 @@ public class MainResponse extends ActionResponse implements ToXContentObject { clusterName = new ClusterName(in); clusterUuid = in.readString(); build = Build.readBuild(in); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readBoolean(); - } } public MainResponse(String nodeName, Version version, ClusterName clusterName, String clusterUuid, Build build) { @@ -111,9 +107,6 @@ public void writeTo(StreamOutput out) throws IOException { clusterName.writeTo(out); out.writeString(clusterUuid); Build.writeBuild(build, out); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(true); - } } @Override diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java index c90f75e3c0aed..de0c0dd9bbfc3 100644 --- a/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java @@ -9,16 +9,16 @@ package org.opensearch.action.search; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportRequest; import java.io.IOException; /** * Inner node get all pits request */ -public class GetAllPitNodeRequest extends BaseNodeRequest { +public class GetAllPitNodeRequest extends TransportRequest { public GetAllPitNodeRequest() { super(); diff --git a/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java b/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java index 6c25a16a65c75..c4ba3becbc151 100644 --- a/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java +++ b/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java @@ -32,7 +32,6 @@ package org.opensearch.action.search; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.ExceptionsHelper; import org.opensearch.action.ActionResponse; @@ -147,11 +146,7 @@ public MultiSearchResponse(StreamInput in) throws IOException { for (int i = 0; i < items.length; i++) { items[i] = new Item(in); } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - tookInMillis = in.readVLong(); - } else { - tookInMillis = 0L; - } + tookInMillis = in.readVLong(); } public MultiSearchResponse(Item[] items, long tookInMillis) { @@ -184,9 +179,7 @@ public void writeTo(StreamOutput out) throws IOException { for (Item item : items) { item.writeTo(out); } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeVLong(tookInMillis); - } + out.writeVLong(tookInMillis); } @Override diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequest.java b/server/src/main/java/org/opensearch/action/search/SearchRequest.java index da34dab6383d9..ceb2a150bf352 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.search; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; @@ -237,11 +236,7 @@ public SearchRequest(StreamInput in) throws IOException { requestCache = in.readOptionalBoolean(); batchedReduceSize = in.readVInt(); maxConcurrentShardRequests = in.readVInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - preFilterShardSize = in.readOptionalVInt(); - } else { - preFilterShardSize = in.readVInt(); - } + preFilterShardSize = in.readOptionalVInt(); allowPartialSearchResults = in.readOptionalBoolean(); localClusterAlias = in.readOptionalString(); if (localClusterAlias != null) { @@ -251,9 +246,7 @@ public SearchRequest(StreamInput in) throws IOException { absoluteStartMillis = DEFAULT_ABSOLUTE_START_MILLIS; finalReduce = true; } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - ccsMinimizeRoundtrips = in.readBoolean(); - } + ccsMinimizeRoundtrips = in.readBoolean(); if (in.getVersion().onOrAfter(Version.V_1_1_0)) { cancelAfterTimeInterval = in.readOptionalTimeValue(); @@ -277,20 +270,14 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalBoolean(requestCache); out.writeVInt(batchedReduceSize); out.writeVInt(maxConcurrentShardRequests); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - out.writeOptionalVInt(preFilterShardSize); - } else { - out.writeVInt(preFilterShardSize == null ? DEFAULT_BATCHED_REDUCE_SIZE : preFilterShardSize); - } + out.writeOptionalVInt(preFilterShardSize); out.writeOptionalBoolean(allowPartialSearchResults); out.writeOptionalString(localClusterAlias); if (localClusterAlias != null) { out.writeVLong(absoluteStartMillis); out.writeBoolean(finalReduce); } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(ccsMinimizeRoundtrips); - } + out.writeBoolean(ccsMinimizeRoundtrips); if (out.getVersion().onOrAfter(Version.V_1_1_0)) { out.writeOptionalTimeValue(cancelAfterTimeInterval); diff --git a/server/src/main/java/org/opensearch/action/search/SearchResponse.java b/server/src/main/java/org/opensearch/action/search/SearchResponse.java index 06ac642a0547f..3f01b92263b17 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/opensearch/action/search/SearchResponse.java @@ -33,7 +33,6 @@ package org.opensearch.action.search; import org.apache.lucene.search.TotalHits; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionResponse; import org.opensearch.common.Nullable; import org.opensearch.common.ParseField; @@ -109,11 +108,7 @@ public SearchResponse(StreamInput in) throws IOException { scrollId = in.readOptionalString(); tookInMillis = in.readVLong(); skippedShards = in.readVInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { - pointInTimeId = in.readOptionalString(); - } else { - pointInTimeId = null; - } + pointInTimeId = in.readOptionalString(); } public SearchResponse( @@ -462,9 +457,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(scrollId); out.writeVLong(tookInMillis); out.writeVInt(skippedShards); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { - out.writeOptionalString(pointInTimeId); - } + out.writeOptionalString(pointInTimeId); } @Override diff --git a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java index 241b3de72a258..8cf92ae5a23d8 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java @@ -32,8 +32,6 @@ package org.opensearch.action.search; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionListenerResponseHandler; import org.opensearch.action.IndicesRequest; @@ -494,10 +492,6 @@ public void writeTo(StreamOutput out) throws IOException { } } - static boolean keepStatesInContext(Version version) { - return version.before(LegacyESVersion.V_7_10_0); - } - public static void registerRequestHandler(TransportService transportService, SearchService searchService) { transportService.registerRequestHandler( FREE_CONTEXT_SCROLL_ACTION_NAME, @@ -549,7 +543,7 @@ public static void registerRequestHandler(TransportService transportService, Sea ShardSearchRequest::new, (request, channel, task) -> searchService.executeDfsPhase( request, - keepStatesInContext(channel.getVersion()), + false, (SearchShardTask) task, new ChannelActionListener<>(channel, DFS_ACTION_NAME, request) ) @@ -564,7 +558,7 @@ public static void registerRequestHandler(TransportService transportService, Sea (request, channel, task) -> { searchService.executeQueryPhase( request, - keepStatesInContext(channel.getVersion()), + false, (SearchShardTask) task, new ChannelActionListener<>(channel, QUERY_ACTION_NAME, request) ); diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchHelper.java b/server/src/main/java/org/opensearch/action/search/TransportSearchHelper.java index 3e5c76aa1f66f..da432a73c8a1d 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchHelper.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchHelper.java @@ -32,7 +32,6 @@ package org.opensearch.action.search; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamInput; @@ -62,18 +61,13 @@ static InternalScrollSearchRequest internalScrollSearchRequest(ShardSearchContex } static String buildScrollId(AtomicArray searchPhaseResults, Version version) { - boolean includeContextUUID = version.onOrAfter(LegacyESVersion.V_7_7_0); try { BytesStreamOutput out = new BytesStreamOutput(); - if (includeContextUUID) { - out.writeString(INCLUDE_CONTEXT_UUID); - } + out.writeString(INCLUDE_CONTEXT_UUID); out.writeString(searchPhaseResults.length() == 1 ? ParsedScrollId.QUERY_AND_FETCH_TYPE : ParsedScrollId.QUERY_THEN_FETCH_TYPE); out.writeVInt(searchPhaseResults.asList().size()); for (SearchPhaseResult searchPhaseResult : searchPhaseResults.asList()) { - if (includeContextUUID) { - out.writeString(searchPhaseResult.getContextId().getSessionId()); - } + out.writeString(searchPhaseResult.getContextId().getSessionId()); out.writeLong(searchPhaseResult.getContextId().getId()); SearchShardTarget searchShardTarget = searchPhaseResult.getSearchShardTarget(); if (searchShardTarget.getClusterAlias() != null) { diff --git a/server/src/main/java/org/opensearch/action/support/IndicesOptions.java b/server/src/main/java/org/opensearch/action/support/IndicesOptions.java index 3fb572d0ecbbf..2cf211fa6da3c 100644 --- a/server/src/main/java/org/opensearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/opensearch/action/support/IndicesOptions.java @@ -31,7 +31,6 @@ package org.opensearch.action.support; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; @@ -276,21 +275,12 @@ public EnumSet getExpandWildcards() { public void writeIndicesOptions(StreamOutput out) throws IOException { EnumSet