diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 1ba3ee562317a..914426eebe35e 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -49,4 +49,5 @@ BWC_VERSION: - "2.1.1" - "2.2.0" - "2.2.1" + - "2.2.2" - "2.3.0" diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 9682461d9e110..07755ef69c6a3 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -4,869 +4,1391 @@ updates: package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /benchmarks/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/reaper/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/archives/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/archives/darwin-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/archives/oss-darwin-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/bwc/bugfix/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/bwc/minor/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/opensearch-build-resources/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/opensearch.build/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/reaper/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/symbolic-link-preserving-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/testingConventions/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/thirdPartyAudit/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/thirdPartyAudit/sample_jars/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /client/benchmark/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /client/client-benchmark-noop-api-plugin/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /client/rest/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /client/rest-high-level/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /client/sniffer/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /client/test/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/darwin-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/integ-test-zip/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/linux-arm64-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/linux-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/no-jdk-darwin-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/no-jdk-linux-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/no-jdk-windows-zip/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/windows-zip/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/bwc/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/bwc/bugfix/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/bwc/maintenance/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/bwc/minor/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/bwc/staged/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/docker/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/docker/docker-arm64-export/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/docker/docker-build-context/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/docker/docker-export/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/arm64-deb/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/arm64-rpm/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/deb/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/no-jdk-deb/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/no-jdk-rpm/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/rpm/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/tools/java-version-checker/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/tools/keystore-cli/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/tools/launchers/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/tools/plugin-cli/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/tools/upgrade-cli/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /doc-tools/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /doc-tools/missing-doclet/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/cli/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/core/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/dissect/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/geo/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/grok/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/nio/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/plugin-classloader/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/secure-sm/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/ssl-config/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/x-content/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/aggs-matrix-stats/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/analysis-common/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/geo/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/ingest-common/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/ingest-geoip/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/ingest-user-agent/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/lang-expression/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/lang-mustache/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/lang-painless/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/lang-painless/spi/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/mapper-extras/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/opensearch-dashboards/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/parent-join/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/percolator/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/rank-eval/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/reindex/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/repository-url/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/systemd/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/transport-netty4/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-icu/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-kuromoji/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-nori/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-phonetic/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-smartcn/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-stempel/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-ukrainian/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-azure-classic/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-ec2/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-ec2/qa/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-ec2/qa/amazon-ec2/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-gce/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-gce/qa/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-gce/qa/gce/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/custom-settings/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/custom-significance-heuristic/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/custom-suggester/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/painless-allowlist/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/rescore/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/rest-handler/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/script-expert-scoring/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/ingest-attachment/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/mapper-annotated-text/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/mapper-murmur3/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/mapper-size/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/repository-azure/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/repository-gcs/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/repository-hdfs/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/repository-s3/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/store-smb/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/transport-nio/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/ccs-unavailable-clusters/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/die-with-dignity/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/evil-tests/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/full-cluster-restart/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/logging-config/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/mixed-cluster/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/multi-cluster-search/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/no-bootstrap-tests/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/centos-6/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/centos-7/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/debian-8/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/debian-9/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/fedora-28/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/fedora-29/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/oel-6/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/oel-7/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/sles-12/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/ubuntu-1604/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/ubuntu-1804/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/windows-2012r2/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/windows-2016/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/remote-clusters/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/repository-multi-version/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/rolling-upgrade/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/smoke-test-http/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/smoke-test-ingest-disabled/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/smoke-test-ingest-with-all-dependencies/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/smoke-test-multinode/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/smoke-test-plugins/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/translog-policy/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/unconfigured-node-name/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/verify-version-constants/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/wildfly/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /rest-api-spec/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /sandbox/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /sandbox/libs/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /sandbox/modules/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /sandbox/plugins/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /server/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/external-modules/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/external-modules/delayed-aggs/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/azure-fixture/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/gcs-fixture/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/hdfs-fixture/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/krb5kdc-fixture/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/minio-fixture/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/old-elasticsearch/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/s3-fixture/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/framework/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/logger-usage/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" version: 2 diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index c76e27d6dfc7d..4537cadf71074 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -10,7 +10,7 @@ - [ ] New functionality has been documented. - [ ] New functionality has javadoc added - [ ] Commits are signed per the DCO using --signoff -- [ ] Commit changes are listed out in CHANGELOG.md file (See: [Changelog](../CONTRIBUTING.md#changelog)) +- [ ] Commit changes are listed out in CHANGELOG.md file (See: [Changelog](../blob/main/CONTRIBUTING.md#changelog)) By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. For more information on following Developer Certificate of Origin and signing off your commits, please check [here](https://github.com/opensearch-project/OpenSearch/blob/main/CONTRIBUTING.md#developer-certificate-of-origin). diff --git a/.github/workflows/changelog_verifier.yml b/.github/workflows/changelog_verifier.yml index 505b02426f22c..cda5dde462068 100644 --- a/.github/workflows/changelog_verifier.yml +++ b/.github/workflows/changelog_verifier.yml @@ -11,12 +11,6 @@ jobs: - uses: actions/checkout@v3 with: token: ${{ secrets.GITHUB_TOKEN }} - ref: ${{ github.event.pull_request.head.ref }} - - - uses: dangoslen/dependabot-changelog-helper@v1 - - - uses: stefanzweifel/git-auto-commit-action@v4 - with: - commit_message: "Update changelog" + ref: ${{ github.event.pull_request.head.sha }} - uses: dangoslen/changelog-enforcer@v3 diff --git a/.github/workflows/dependabot_pr.yml b/.github/workflows/dependabot_pr.yml index 2ac904bf4ccf7..ed98bae8978ed 100644 --- a/.github/workflows/dependabot_pr.yml +++ b/.github/workflows/dependabot_pr.yml @@ -47,3 +47,17 @@ jobs: commit_user_name: dependabot[bot] commit_user_email: support@github.com commit_options: '--signoff' + + - name: Update the changelog + uses: dangoslen/dependabot-changelog-helper@v1 + with: + version: 'Unreleased' + + - name: Commit the changes + uses: stefanzweifel/git-auto-commit-action@v4 + with: + commit_message: "Update changelog" + branch: ${{ github.head_ref }} + commit_user_name: dependabot[bot] + commit_user_email: support@github.com + commit_options: '--signoff' diff --git a/.linelint.yml b/.linelint.yml index 6240c8b3d7a96..ec947019f8ab6 100644 --- a/.linelint.yml +++ b/.linelint.yml @@ -7,6 +7,7 @@ ignore: - .idea/ - '*.sha1' - '*.txt' + - 'CHANGELOG.md' - '.github/CODEOWNERS' - 'buildSrc/src/testKit/opensearch.build/LICENSE' - 'buildSrc/src/testKit/opensearch.build/NOTICE' diff --git a/CHANGELOG.md b/CHANGELOG.md index 2a02bfdaf0320..76f134f10c29e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,16 +4,60 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ## [Unreleased] ### Added - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) +- Point in time rest layer changes for create and delete PIT API ([#4064](https://github.com/opensearch-project/OpenSearch/pull/4064)) +- Added @dreamer-89 as an Opensearch maintainer ([#4342](https://github.com/opensearch-project/OpenSearch/pull/4342)) +- Added release notes for 1.3.5 ([#4343](https://github.com/opensearch-project/OpenSearch/pull/4343)) +- Added release notes for 2.2.1 ([#4344](https://github.com/opensearch-project/OpenSearch/pull/4344)) +- Label configuration for dependabot PRs ([#4348](https://github.com/opensearch-project/OpenSearch/pull/4348)) +- Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) +- BWC version 2.2.2 ([#4383](https://github.com/opensearch-project/OpenSearch/pull/4383)) + +### Dependencies +- Bumps `com.diffplug.spotless` from 6.9.1 to 6.10.0 +- Bumps `xmlbeans` from 5.1.0 to 5.1.1 ### Changed +- Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) +- Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240)) +- Plugin ZIP publication groupId value is configurable ([#4156](https://github.com/opensearch-project/OpenSearch/pull/4156)) +- Update to Netty 4.1.80.Final ([#4359](https://github.com/opensearch-project/OpenSearch/pull/4359)) ### Deprecated ### Removed ### Fixed +- `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289)) +- PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) +- `opensearch.bat` and `opensearch-service.bat install` failing to run, missing logs directory ([#4305](https://github.com/opensearch-project/OpenSearch/pull/4305)) +- Restore using the class ClusterInfoRequest and ClusterInfoRequestBuilder from package 'org.opensearch.action.support.master.info' for subclasses ([#4307](https://github.com/opensearch-project/OpenSearch/pull/4307)) +- Do not fail replica shard due to primary closure ([#4133](https://github.com/opensearch-project/OpenSearch/pull/4133)) +- Add timeout on Mockito.verify to reduce flakyness in testReplicationOnDone test([#4314](https://github.com/opensearch-project/OpenSearch/pull/4314)) +- Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) +- Fixed cancellation of segment replication events ([#4225](https://github.com/opensearch-project/OpenSearch/pull/4225)) +- Bugs for dependabot changelog verifier workflow ([#4364](https://github.com/opensearch-project/OpenSearch/pull/4364)) +- Fix flaky random test `NRTReplicationEngineTests.testUpdateSegments` ([#4352](https://github.com/opensearch-project/OpenSearch/pull/4352)) + +### Security +- CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) + +## [2.x] +### Added +- Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) +- Label configuration for dependabot PRs ([#4348](https://github.com/opensearch-project/OpenSearch/pull/4348)) + +### Changed + +### Deprecated + +### Removed + +### Fixed +- PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) +- Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) ### Security [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 16821b1915032..fc02d52f0bc3b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -131,8 +131,6 @@ As a contributor, you must ensure that every pull request has the changes listed Adding in the change is two step process - 1. Add your changes to the corresponding section within the CHANGELOG file with dummy pull request information, publish the PR - `Your change here ([#PR_NUMBER](PR_URL))` - 2. Update the entry for your change in [`CHANGELOG.md`](CHANGELOG.md) and make sure that you reference the pull request there. diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 94e649a634c7f..2f54656b2ab59 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -23,6 +23,7 @@ | Ryan Bogan | [ryanbogan](https://github.com/ryanbogan) | Amazon | | Sarat Vemulapalli | [saratvemulapalli](https://github.com/saratvemulapalli) | Amazon | | Shweta Thareja |[shwetathareja](https://github.com/shwetathareja) | Amazon | +| Suraj Singh |[dreamer-89](https://github.com/dreamer-89) | Amazon | | Tianli Feng | [tlfeng](https://github.com/tlfeng) | Amazon | | Vacha Shah | [VachaShah](https://github.com/VachaShah) | Amazon | | Xue Zhou | [xuezhou25](https://github.com/xuezhou25) | Amazon | diff --git a/build.gradle b/build.gradle index ce5ea6cdd7e11..a1f4f2d04883a 100644 --- a/build.gradle +++ b/build.gradle @@ -55,7 +55,7 @@ plugins { id 'lifecycle-base' id 'opensearch.docker-support' id 'opensearch.global-build-info' - id "com.diffplug.spotless" version "6.9.1" apply false + id "com.diffplug.spotless" version "6.10.0" apply false id "org.gradle.test-retry" version "1.4.0" apply false id "test-report-aggregation" id 'jacoco-report-aggregation' diff --git a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java index d83384ec7d172..70c3737ba3674 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java @@ -9,7 +9,8 @@ import org.gradle.api.Plugin; import org.gradle.api.Project; -import org.gradle.api.publish.Publication; +import org.gradle.api.logging.Logger; +import org.gradle.api.logging.Logging; import org.gradle.api.publish.PublishingExtension; import org.gradle.api.publish.maven.MavenPublication; import org.gradle.api.publish.maven.plugins.MavenPublishPlugin; @@ -18,6 +19,9 @@ import org.gradle.api.Task; public class Publish implements Plugin { + + private static final Logger LOGGER = Logging.getLogger(Publish.class); + public final static String EXTENSION_NAME = "zipmavensettings"; public final static String PUBLICATION_NAME = "pluginZip"; public final static String STAGING_REPO = "zipStaging"; @@ -37,27 +41,25 @@ public static void configMaven(Project project) { }); }); publishing.publications(publications -> { - final Publication publication = publications.findByName(PUBLICATION_NAME); - if (publication == null) { - publications.create(PUBLICATION_NAME, MavenPublication.class, mavenZip -> { - String zipGroup = "org.opensearch.plugin"; - String zipArtifact = project.getName(); - String zipVersion = getProperty("version", project); - mavenZip.artifact(project.getTasks().named("bundlePlugin")); - mavenZip.setGroupId(zipGroup); - mavenZip.setArtifactId(zipArtifact); - mavenZip.setVersion(zipVersion); - }); - } else { - final MavenPublication mavenZip = (MavenPublication) publication; - String zipGroup = "org.opensearch.plugin"; - String zipArtifact = project.getName(); - String zipVersion = getProperty("version", project); - mavenZip.artifact(project.getTasks().named("bundlePlugin")); - mavenZip.setGroupId(zipGroup); - mavenZip.setArtifactId(zipArtifact); - mavenZip.setVersion(zipVersion); + MavenPublication mavenZip = (MavenPublication) publications.findByName(PUBLICATION_NAME); + + if (mavenZip == null) { + mavenZip = publications.create(PUBLICATION_NAME, MavenPublication.class); } + + String groupId = mavenZip.getGroupId(); + if (groupId == null) { + // The groupId is not customized thus we get the value from "project.group". + // See https://docs.gradle.org/current/userguide/publishing_maven.html#sec:identity_values_in_the_generated_pom + groupId = getProperty("group", project); + } + + String artifactId = project.getName(); + String pluginVersion = getProperty("version", project); + mavenZip.artifact(project.getTasks().named("bundlePlugin")); + mavenZip.setGroupId(groupId); + mavenZip.setArtifactId(artifactId); + mavenZip.setVersion(pluginVersion); }); }); } diff --git a/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java index 8c1314c4b4394..06632e2dfa476 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java @@ -10,19 +10,21 @@ import org.gradle.testkit.runner.BuildResult; import org.gradle.testkit.runner.GradleRunner; -import org.gradle.testfixtures.ProjectBuilder; -import org.gradle.api.Project; +import org.gradle.testkit.runner.UnexpectedBuildFailure; import org.opensearch.gradle.test.GradleUnitTestCase; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.junit.rules.TemporaryFolder; -import java.io.IOException; -import org.gradle.api.publish.maven.tasks.PublishToMavenRepository; import java.io.File; +import java.io.FileReader; import java.io.FileWriter; +import java.io.IOException; import java.io.Writer; +import java.net.URISyntaxException; +import java.net.URL; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import static org.gradle.testkit.runner.TaskOutcome.SUCCESS; @@ -30,14 +32,16 @@ import org.apache.maven.model.Model; import org.apache.maven.model.io.xpp3.MavenXpp3Reader; import org.codehaus.plexus.util.xml.pull.XmlPullParserException; -import java.io.FileReader; -import org.gradle.api.tasks.bundling.Zip; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.List; -import java.util.ArrayList; public class PublishTests extends GradleUnitTestCase { private TemporaryFolder projectDir; + private static final String TEMPLATE_RESOURCE_FOLDER = "pluginzip"; + private final String PROJECT_NAME = "sample-plugin"; + private final String ZIP_PUBLISH_TASK = "publishPluginZipPublicationToZipStagingRepository"; @Before public void setUp() throws IOException { @@ -51,155 +55,200 @@ public void tearDown() { } @Test - public void testZipPublish() throws IOException, XmlPullParserException { - String zipPublishTask = "publishPluginZipPublicationToZipStagingRepository"; - prepareProjectForPublishTask(zipPublishTask); - - // Generate the build.gradle file - String buildFileContent = "apply plugin: 'maven-publish' \n" - + "apply plugin: 'java' \n" - + "publishing {\n" - + " repositories {\n" - + " maven {\n" - + " url = 'local-staging-repo/'\n" - + " name = 'zipStaging'\n" - + " }\n" - + " }\n" - + " publications {\n" - + " pluginZip(MavenPublication) {\n" - + " groupId = 'org.opensearch.plugin' \n" - + " artifactId = 'sample-plugin' \n" - + " version = '2.0.0.0' \n" - + " artifact('sample-plugin.zip') \n" - + " }\n" - + " }\n" - + "}"; - writeString(projectDir.newFile("build.gradle"), buildFileContent); - // Execute the task publishPluginZipPublicationToZipStagingRepository - List allArguments = new ArrayList(); - allArguments.add("build"); - allArguments.add(zipPublishTask); - GradleRunner runner = GradleRunner.create(); - runner.forwardOutput(); - runner.withPluginClasspath(); - runner.withArguments(allArguments); - runner.withProjectDir(projectDir.getRoot()); + public void missingGroupValue() throws IOException, URISyntaxException, XmlPullParserException { + GradleRunner runner = prepareGradleRunnerFromTemplate("missingGroupValue.gradle"); + Exception e = assertThrows(UnexpectedBuildFailure.class, runner::build); + assertTrue(e.getMessage().contains("Invalid publication 'pluginZip': groupId cannot be empty.")); + } + + /** + * This would be the most common use case where user declares Maven publication entity with basic info + * and the resulting POM file will use groupId and version values from the Gradle project object. + */ + @Test + public void groupAndVersionValue() throws IOException, URISyntaxException, XmlPullParserException { + GradleRunner runner = prepareGradleRunnerFromTemplate("groupAndVersionValue.gradle"); BuildResult result = runner.build(); - // Check if task publishMavenzipPublicationToZipstagingRepository has ran well - assertEquals(SUCCESS, result.task(":" + zipPublishTask).getOutcome()); - // check if the zip has been published to local staging repo + + /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */ + assertEquals(SUCCESS, result.task(":" + "build").getOutcome()); + assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome()); + + // check if both the zip and pom files have been published to local staging repo assertTrue( - new File(projectDir.getRoot(), "local-staging-repo/org/opensearch/plugin/sample-plugin/2.0.0.0/sample-plugin-2.0.0.0.zip") - .exists() + new File( + projectDir.getRoot(), + String.join( + File.separator, + "build", + "local-staging-repo", + "org", + "custom", + "group", + PROJECT_NAME, + "2.0.0.0", + PROJECT_NAME + "-2.0.0.0.pom" + ) + ).exists() ); - assertEquals(SUCCESS, result.task(":" + "build").getOutcome()); - // Parse the maven file and validate the groupID to org.opensearch.plugin + assertTrue( + new File( + projectDir.getRoot(), + String.join( + File.separator, + "build", + "local-staging-repo", + "org", + "custom", + "group", + PROJECT_NAME, + "2.0.0.0", + PROJECT_NAME + "-2.0.0.0.zip" + ) + ).exists() + ); + + // Parse the maven file and validate the groupID MavenXpp3Reader reader = new MavenXpp3Reader(); Model model = reader.read( new FileReader( - new File(projectDir.getRoot(), "local-staging-repo/org/opensearch/plugin/sample-plugin/2.0.0.0/sample-plugin-2.0.0.0.pom") + new File( + projectDir.getRoot(), + String.join( + File.separator, + "build", + "local-staging-repo", + "org", + "custom", + "group", + PROJECT_NAME, + "2.0.0.0", + PROJECT_NAME + "-2.0.0.0.pom" + ) + ) ) ); - assertEquals(model.getGroupId(), "org.opensearch.plugin"); + assertEquals(model.getVersion(), "2.0.0.0"); + assertEquals(model.getGroupId(), "org.custom.group"); + assertEquals(model.getUrl(), "https://github.com/doe/sample-plugin"); } + /** + * In this case the Publication entity is completely missing but still the POM file is generated using the default + * values including the groupId and version values obtained from the Gradle project object. + */ @Test - public void testZipPublishWithPom() throws IOException, XmlPullParserException { - String zipPublishTask = "publishPluginZipPublicationToZipStagingRepository"; - Project project = prepareProjectForPublishTask(zipPublishTask); - - // Generate the build.gradle file - String buildFileContent = "apply plugin: 'maven-publish' \n" - + "apply plugin: 'java' \n" - + "publishing {\n" - + " repositories {\n" - + " maven {\n" - + " url = 'local-staging-repo/'\n" - + " name = 'zipStaging'\n" - + " }\n" - + " }\n" - + " publications {\n" - + " pluginZip(MavenPublication) {\n" - + " groupId = 'org.opensearch.plugin' \n" - + " artifactId = 'sample-plugin' \n" - + " version = '2.0.0.0' \n" - + " artifact('sample-plugin.zip') \n" - + " pom {\n" - + " name = 'sample-plugin'\n" - + " description = 'sample-description'\n" - + " licenses {\n" - + " license {\n" - + " name = \"The Apache License, Version 2.0\"\n" - + " url = \"http://www.apache.org/licenses/LICENSE-2.0.txt\"\n" - + " }\n" - + " }\n" - + " developers {\n" - + " developer {\n" - + " name = 'opensearch'\n" - + " url = 'https://github.com/opensearch-project/OpenSearch'\n" - + " }\n" - + " }\n" - + " url = 'https://github.com/opensearch-project/OpenSearch'\n" - + " scm {\n" - + " url = 'https://github.com/opensearch-project/OpenSearch'\n" - + " }\n" - + " }" - + " }\n" - + " }\n" - + "}"; - writeString(projectDir.newFile("build.gradle"), buildFileContent); - // Execute the task publishPluginZipPublicationToZipStagingRepository - List allArguments = new ArrayList(); - allArguments.add("build"); - allArguments.add(zipPublishTask); - GradleRunner runner = GradleRunner.create(); - runner.forwardOutput(); - runner.withPluginClasspath(); - runner.withArguments(allArguments); - runner.withProjectDir(projectDir.getRoot()); + public void missingPOMEntity() throws IOException, URISyntaxException, XmlPullParserException { + GradleRunner runner = prepareGradleRunnerFromTemplate("missingPOMEntity.gradle"); BuildResult result = runner.build(); - // Check if task publishMavenzipPublicationToZipstagingRepository has ran well - assertEquals(SUCCESS, result.task(":" + zipPublishTask).getOutcome()); - // check if the zip has been published to local staging repo - assertTrue( - new File(projectDir.getRoot(), "local-staging-repo/org/opensearch/plugin/sample-plugin/2.0.0.0/sample-plugin-2.0.0.0.zip") - .exists() + + /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */ + assertEquals(SUCCESS, result.task(":" + "build").getOutcome()); + assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome()); + + // Parse the maven file and validate it + MavenXpp3Reader reader = new MavenXpp3Reader(); + Model model = reader.read( + new FileReader( + new File( + projectDir.getRoot(), + String.join( + File.separator, + "build", + "local-staging-repo", + "org", + "custom", + "group", + PROJECT_NAME, + "2.0.0.0", + PROJECT_NAME + "-2.0.0.0.pom" + ) + ) + ) ); + + assertEquals(model.getArtifactId(), PROJECT_NAME); + assertEquals(model.getGroupId(), "org.custom.group"); + assertEquals(model.getVersion(), "2.0.0.0"); + assertEquals(model.getPackaging(), "zip"); + + assertNull(model.getName()); + assertNull(model.getDescription()); + + assertEquals(0, model.getDevelopers().size()); + assertEquals(0, model.getContributors().size()); + assertEquals(0, model.getLicenses().size()); + } + + /** + * In some cases we need the POM groupId value to be different from the Gradle "project.group" value hence we + * allow for groupId customization (it will override whatever the Gradle "project.group" value is). + */ + @Test + public void customizedGroupValue() throws IOException, URISyntaxException, XmlPullParserException { + GradleRunner runner = prepareGradleRunnerFromTemplate("customizedGroupValue.gradle"); + BuildResult result = runner.build(); + + /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */ assertEquals(SUCCESS, result.task(":" + "build").getOutcome()); - // Parse the maven file and validate the groupID to org.opensearch.plugin + assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome()); + + // Parse the maven file and validate the groupID MavenXpp3Reader reader = new MavenXpp3Reader(); Model model = reader.read( new FileReader( - new File(projectDir.getRoot(), "local-staging-repo/org/opensearch/plugin/sample-plugin/2.0.0.0/sample-plugin-2.0.0.0.pom") + new File( + projectDir.getRoot(), + String.join( + File.separator, + "build", + "local-staging-repo", + "I", + "am", + "customized", + PROJECT_NAME, + "2.0.0.0", + PROJECT_NAME + "-2.0.0.0.pom" + ) + ) ) ); - assertEquals(model.getGroupId(), "org.opensearch.plugin"); - assertEquals(model.getUrl(), "https://github.com/opensearch-project/OpenSearch"); + + assertEquals(model.getGroupId(), "I.am.customized"); } - protected Project prepareProjectForPublishTask(String zipPublishTask) throws IOException { - Project project = ProjectBuilder.builder().build(); - - // Apply the opensearch.pluginzip plugin - project.getPluginManager().apply("opensearch.pluginzip"); - // Check if the plugin has been applied to the project - assertTrue(project.getPluginManager().hasPlugin("opensearch.pluginzip")); - // Check if the project has the task from class PublishToMavenRepository after plugin apply - assertNotNull(project.getTasks().withType(PublishToMavenRepository.class)); - // Create a mock bundlePlugin task - Zip task = project.getTasks().create("bundlePlugin", Zip.class); - Publish.configMaven(project); - // Check if the main task publishPluginZipPublicationToZipStagingRepository exists after plugin apply - assertTrue(project.getTasks().getNames().contains(zipPublishTask)); - assertNotNull("Task to generate: ", project.getTasks().getByName(zipPublishTask)); - // Run Gradle functional tests, but calling a build.gradle file, that resembles the plugin publish behavior - - // Create a sample plugin zip file - File sampleZip = new File(projectDir.getRoot(), "sample-plugin.zip"); - Files.createFile(sampleZip.toPath()); - writeString(projectDir.newFile("settings.gradle"), ""); - - return project; + /** + * If the customized groupId value is invalid (from the Maven POM perspective) then we need to be sure it is + * caught and reported properly. + */ + @Test + public void customizedInvalidGroupValue() throws IOException, URISyntaxException { + GradleRunner runner = prepareGradleRunnerFromTemplate("customizedInvalidGroupValue.gradle"); + Exception e = assertThrows(UnexpectedBuildFailure.class, runner::build); + assertTrue( + e.getMessage().contains("Invalid publication 'pluginZip': groupId ( ) is not a valid Maven identifier ([A-Za-z0-9_\\-.]+).") + ); + } + + private GradleRunner prepareGradleRunnerFromTemplate(String templateName) throws IOException, URISyntaxException { + useTemplateFile(projectDir.newFile("build.gradle"), templateName); + prepareGradleFilesAndSources(); + + GradleRunner runner = GradleRunner.create() + .forwardOutput() + .withPluginClasspath() + .withArguments("build", ZIP_PUBLISH_TASK) + .withProjectDir(projectDir.getRoot()); + + return runner; + } + + private void prepareGradleFilesAndSources() throws IOException { + // A dummy "source" file that is processed with bundlePlugin and put into a ZIP artifact file + File bundleFile = new File(projectDir.getRoot(), PROJECT_NAME + "-source.txt"); + Path zipFile = Files.createFile(bundleFile.toPath()); + // Setting a project name via settings.gradle file + writeString(projectDir.newFile("settings.gradle"), "rootProject.name = '" + PROJECT_NAME + "'"); } private void writeString(File file, String string) throws IOException { @@ -208,4 +257,24 @@ private void writeString(File file, String string) throws IOException { } } + /** + * Write the content of the "template" file into the target file. + * The template file must be located in the {@value TEMPLATE_RESOURCE_FOLDER} folder. + * @param targetFile A target file + * @param templateFile A name of the template file located under {@value TEMPLATE_RESOURCE_FOLDER} folder + */ + private void useTemplateFile(File targetFile, String templateFile) throws IOException, URISyntaxException { + + URL resource = getClass().getClassLoader().getResource(String.join(File.separator, TEMPLATE_RESOURCE_FOLDER, templateFile)); + Path resPath = Paths.get(resource.toURI()).toAbsolutePath(); + List lines = Files.readAllLines(resPath, StandardCharsets.UTF_8); + + try (Writer writer = new FileWriter(targetFile)) { + for (String line : lines) { + writer.write(line); + writer.write(System.lineSeparator()); + } + } + } + } diff --git a/buildSrc/src/test/resources/pluginzip/customizedGroupValue.gradle b/buildSrc/src/test/resources/pluginzip/customizedGroupValue.gradle new file mode 100644 index 0000000000000..1bde3edda2d91 --- /dev/null +++ b/buildSrc/src/test/resources/pluginzip/customizedGroupValue.gradle @@ -0,0 +1,45 @@ +plugins { + id 'java-gradle-plugin' + id 'nebula.maven-base-publish' + id 'opensearch.pluginzip' +} + +group="org.custom.group" +version='2.0.0.0' + +// A bundlePlugin task mockup +tasks.register('bundlePlugin', Zip.class) { + archiveFileName = "sample-plugin-${version}.zip" + destinationDirectory = layout.buildDirectory.dir('distributions') + from layout.projectDirectory.file('sample-plugin-source.txt') +} + +publishing { + publications { + pluginZip(MavenPublication) { + groupId = "I.am.customized" + pom { + name = "sample-plugin" + description = "pluginDescription" + licenses { + license { + name = "The Apache License, Version 2.0" + url = "http://www.apache.org/licenses/LICENSE-2.0.txt" + } + } + developers { + developer { + name = "John Doe" + url = "https://github.com/john-doe/" + organization = "Doe.inc" + organizationUrl = "https://doe.inc/" + } + } + url = "https://github.com/doe/sample-plugin" + scm { + url = "https://github.com/doe/sample-plugin" + } + } + } + } +} diff --git a/buildSrc/src/test/resources/pluginzip/customizedInvalidGroupValue.gradle b/buildSrc/src/test/resources/pluginzip/customizedInvalidGroupValue.gradle new file mode 100644 index 0000000000000..b6deeeb12ca6a --- /dev/null +++ b/buildSrc/src/test/resources/pluginzip/customizedInvalidGroupValue.gradle @@ -0,0 +1,45 @@ +plugins { + id 'java-gradle-plugin' + id 'nebula.maven-base-publish' + id 'opensearch.pluginzip' +} + +group="org.custom.group" +version='2.0.0.0' + +// A bundlePlugin task mockup +tasks.register('bundlePlugin', Zip.class) { + archiveFileName = "sample-plugin-${version}.zip" + destinationDirectory = layout.buildDirectory.dir('distributions') + from layout.projectDirectory.file('sample-plugin-source.txt') +} + +publishing { + publications { + pluginZip(MavenPublication) { + groupId = " " // <-- User provides invalid value + pom { + name = "sample-plugin" + description = "pluginDescription" + licenses { + license { + name = "The Apache License, Version 2.0" + url = "http://www.apache.org/licenses/LICENSE-2.0.txt" + } + } + developers { + developer { + name = "John Doe" + url = "https://github.com/john-doe/" + organization = "Doe.inc" + organizationUrl = "https://doe.inc/" + } + } + url = "https://github.com/doe/sample-plugin" + scm { + url = "https://github.com/doe/sample-plugin" + } + } + } + } +} diff --git a/buildSrc/src/test/resources/pluginzip/groupAndVersionValue.gradle b/buildSrc/src/test/resources/pluginzip/groupAndVersionValue.gradle new file mode 100644 index 0000000000000..bdab385f6082c --- /dev/null +++ b/buildSrc/src/test/resources/pluginzip/groupAndVersionValue.gradle @@ -0,0 +1,44 @@ +plugins { + id 'java-gradle-plugin' + id 'nebula.maven-base-publish' + id 'opensearch.pluginzip' +} + +group="org.custom.group" +version='2.0.0.0' + +// A bundlePlugin task mockup +tasks.register('bundlePlugin', Zip.class) { + archiveFileName = "sample-plugin-${version}.zip" + destinationDirectory = layout.buildDirectory.dir('distributions') + from layout.projectDirectory.file('sample-plugin-source.txt') +} + +publishing { + publications { + pluginZip(MavenPublication) { + pom { + name = "sample-plugin" + description = "pluginDescription" + licenses { + license { + name = "The Apache License, Version 2.0" + url = "http://www.apache.org/licenses/LICENSE-2.0.txt" + } + } + developers { + developer { + name = "John Doe" + url = "https://github.com/john-doe/" + organization = "Doe.inc" + organizationUrl = "https://doe.inc/" + } + } + url = "https://github.com/doe/sample-plugin" + scm { + url = "https://github.com/doe/sample-plugin" + } + } + } + } +} diff --git a/buildSrc/src/test/resources/pluginzip/missingGroupValue.gradle b/buildSrc/src/test/resources/pluginzip/missingGroupValue.gradle new file mode 100644 index 0000000000000..602c178ea1a5b --- /dev/null +++ b/buildSrc/src/test/resources/pluginzip/missingGroupValue.gradle @@ -0,0 +1,22 @@ +plugins { + id 'java-gradle-plugin' + id 'nebula.maven-base-publish' + id 'opensearch.pluginzip' +} + +//group="org.custom.group" +version='2.0.0.0' + +// A bundlePlugin task mockup +tasks.register('bundlePlugin', Zip.class) { + archiveFileName = "sample-plugin-${version}.zip" + destinationDirectory = layout.buildDirectory.dir('distributions') + from layout.projectDirectory.file('sample-plugin-source.txt') +} + +publishing { + publications { + pluginZip(MavenPublication) { + } + } +} diff --git a/buildSrc/src/test/resources/pluginzip/missingPOMEntity.gradle b/buildSrc/src/test/resources/pluginzip/missingPOMEntity.gradle new file mode 100644 index 0000000000000..2cc67c2e98954 --- /dev/null +++ b/buildSrc/src/test/resources/pluginzip/missingPOMEntity.gradle @@ -0,0 +1,22 @@ +plugins { + id 'java-gradle-plugin' + id 'nebula.maven-base-publish' + id 'opensearch.pluginzip' +} + +group="org.custom.group" +version='2.0.0.0' + +// A bundlePlugin task mockup +tasks.register('bundlePlugin', Zip.class) { + archiveFileName = "sample-plugin-${version}.zip" + destinationDirectory = layout.buildDirectory.dir('distributions') + from layout.projectDirectory.file('sample-plugin-source.txt') +} + +publishing { + publications { + pluginZip(MavenPublication) { + } + } +} diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 4af1acfed0ab2..6cc24a3f09244 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -11,25 +11,25 @@ spatial4j = 0.7 jts = 1.15.0 jackson = 2.13.3 jackson_databind = 2.13.3 -snakeyaml = 1.26 +snakeyaml = 1.31 icu4j = 70.1 supercsv = 2.4.0 log4j = 2.17.1 -slf4j = 1.6.2 +slf4j = 1.7.36 asm = 9.3 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.5.0 -netty = 4.1.79.Final +netty = 4.1.80.Final joda = 2.10.13 # client dependencies httpclient = 4.5.13 -httpcore = 4.4.12 -httpasyncclient = 4.1.4 +httpcore = 4.4.15 +httpasyncclient = 4.1.5 commonslogging = 1.2 -commonscodec = 1.13 +commonscodec = 1.15 # plugin dependencies aws = 1.12.270 @@ -42,7 +42,7 @@ bouncycastle=1.70 randomizedrunner = 2.7.1 junit = 4.13.2 hamcrest = 2.1 -mockito = 4.6.1 +mockito = 4.7.0 objenesis = 3.2 bytebuddy = 1.12.12 diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java index 6fa57295f48e4..eedc27d1d2ea7 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java @@ -54,6 +54,8 @@ import org.opensearch.action.get.MultiGetRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.search.ClearScrollRequest; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.DeletePitRequest; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchScrollRequest; @@ -92,6 +94,7 @@ import org.opensearch.index.reindex.ReindexRequest; import org.opensearch.index.reindex.UpdateByQueryRequest; import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.rest.action.search.RestCreatePitAction; import org.opensearch.rest.action.search.RestSearchAction; import org.opensearch.script.mustache.MultiSearchTemplateRequest; import org.opensearch.script.mustache.SearchTemplateRequest; @@ -433,9 +436,19 @@ static void addSearchRequestParams(Params params, SearchRequest searchRequest) { params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true"); params.withRouting(searchRequest.routing()); params.withPreference(searchRequest.preference()); - params.withIndicesOptions(searchRequest.indicesOptions()); + if (searchRequest.pointInTimeBuilder() == null) { + params.withIndicesOptions(searchRequest.indicesOptions()); + } params.withSearchType(searchRequest.searchType().name().toLowerCase(Locale.ROOT)); - params.putParam("ccs_minimize_roundtrips", Boolean.toString(searchRequest.isCcsMinimizeRoundtrips())); + /** + * Merging search responses as part of CCS flow to reduce roundtrips is not supported for point in time - + * refer to org.opensearch.action.search.SearchResponseMerger + */ + if (searchRequest.pointInTimeBuilder() != null) { + params.putParam("ccs_minimize_roundtrips", "false"); + } else { + params.putParam("ccs_minimize_roundtrips", Boolean.toString(searchRequest.isCcsMinimizeRoundtrips())); + } if (searchRequest.getPreFilterShardSize() != null) { params.putParam("pre_filter_shard_size", Integer.toString(searchRequest.getPreFilterShardSize())); } @@ -464,6 +477,27 @@ static Request clearScroll(ClearScrollRequest clearScrollRequest) throws IOExcep return request; } + static Request createPit(CreatePitRequest createPitRequest) throws IOException { + Params params = new Params(); + params.putParam(RestCreatePitAction.ALLOW_PARTIAL_PIT_CREATION, Boolean.toString(createPitRequest.shouldAllowPartialPitCreation())); + params.putParam(RestCreatePitAction.KEEP_ALIVE, createPitRequest.getKeepAlive()); + params.withIndicesOptions(createPitRequest.indicesOptions()); + Request request = new Request(HttpPost.METHOD_NAME, endpoint(createPitRequest.indices(), "_search/point_in_time")); + request.addParameters(params.asMap()); + request.setEntity(createEntity(createPitRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request deletePit(DeletePitRequest deletePitRequest) throws IOException { + Request request = new Request(HttpDelete.METHOD_NAME, "/_search/point_in_time"); + request.setEntity(createEntity(deletePitRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request deleteAllPits() { + return new Request(HttpDelete.METHOD_NAME, "/_search/point_in_time/_all"); + } + static Request multiSearch(MultiSearchRequest multiSearchRequest) throws IOException { Request request = new Request(HttpPost.METHOD_NAME, "/_msearch"); diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java index 28a441bdf7f7f..0c73c65f6175f 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java @@ -59,6 +59,10 @@ import org.opensearch.action.index.IndexResponse; import org.opensearch.action.search.ClearScrollRequest; import org.opensearch.action.search.ClearScrollResponse; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchResponse; import org.opensearch.action.search.SearchRequest; @@ -1250,6 +1254,120 @@ public final Cancellable scrollAsync( ); } + /** + * Create PIT context using create PIT API + * + * @param createPitRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + */ + public final CreatePitResponse createPit(CreatePitRequest createPitRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity( + createPitRequest, + RequestConverters::createPit, + options, + CreatePitResponse::fromXContent, + emptySet() + ); + } + + /** + * Asynchronously Create PIT context using create PIT API + * + * @param createPitRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + * @return the response + */ + public final Cancellable createPitAsync( + CreatePitRequest createPitRequest, + RequestOptions options, + ActionListener listener + ) { + return performRequestAsyncAndParseEntity( + createPitRequest, + RequestConverters::createPit, + options, + CreatePitResponse::fromXContent, + listener, + emptySet() + ); + } + + /** + * Delete point in time searches using delete PIT API + * + * @param deletePitRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + */ + public final DeletePitResponse deletePit(DeletePitRequest deletePitRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity( + deletePitRequest, + RequestConverters::deletePit, + options, + DeletePitResponse::fromXContent, + emptySet() + ); + } + + /** + * Asynchronously Delete point in time searches using delete PIT API + * + * @param deletePitRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + * @return the response + */ + public final Cancellable deletePitAsync( + DeletePitRequest deletePitRequest, + RequestOptions options, + ActionListener listener + ) { + return performRequestAsyncAndParseEntity( + deletePitRequest, + RequestConverters::deletePit, + options, + DeletePitResponse::fromXContent, + listener, + emptySet() + ); + } + + /** + * Delete all point in time searches using delete all PITs API + * + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + */ + public final DeletePitResponse deleteAllPits(RequestOptions options) throws IOException { + return performRequestAndParseEntity( + new MainRequest(), + (request) -> RequestConverters.deleteAllPits(), + options, + DeletePitResponse::fromXContent, + emptySet() + ); + } + + /** + * Asynchronously Delete all point in time searches using delete all PITs API + * + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + * @return the response + */ + public final Cancellable deleteAllPitsAsync(RequestOptions options, ActionListener listener) { + return performRequestAsyncAndParseEntity( + new MainRequest(), + (request) -> RequestConverters.deleteAllPits(), + options, + DeletePitResponse::fromXContent, + listener, + emptySet() + ); + } + /** * Clears one or more scroll ids using the Clear Scroll API. * diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java new file mode 100644 index 0000000000000..395ec6e46a7b3 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java @@ -0,0 +1,102 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client; + +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.junit.Before; +import org.opensearch.OpenSearchStatusException; +import org.opensearch.action.ActionListener; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitInfo; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.common.unit.TimeValue; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * Tests point in time API with rest high level client + */ +public class PitIT extends OpenSearchRestHighLevelClientTestCase { + + @Before + public void indexDocuments() throws IOException { + Request doc1 = new Request(HttpPut.METHOD_NAME, "/index/_doc/1"); + doc1.setJsonEntity("{\"type\":\"type1\", \"id\":1, \"num\":10, \"num2\":50}"); + client().performRequest(doc1); + Request doc2 = new Request(HttpPut.METHOD_NAME, "/index/_doc/2"); + doc2.setJsonEntity("{\"type\":\"type1\", \"id\":2, \"num\":20, \"num2\":40}"); + client().performRequest(doc2); + Request doc3 = new Request(HttpPut.METHOD_NAME, "/index/_doc/3"); + doc3.setJsonEntity("{\"type\":\"type1\", \"id\":3, \"num\":50, \"num2\":35}"); + client().performRequest(doc3); + Request doc4 = new Request(HttpPut.METHOD_NAME, "/index/_doc/4"); + doc4.setJsonEntity("{\"type\":\"type2\", \"id\":4, \"num\":100, \"num2\":10}"); + client().performRequest(doc4); + Request doc5 = new Request(HttpPut.METHOD_NAME, "/index/_doc/5"); + doc5.setJsonEntity("{\"type\":\"type2\", \"id\":5, \"num\":100, \"num2\":10}"); + client().performRequest(doc5); + client().performRequest(new Request(HttpPost.METHOD_NAME, "/_refresh")); + } + + public void testCreateAndDeletePit() throws IOException { + CreatePitRequest pitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, "index"); + CreatePitResponse pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + assertTrue(pitResponse.getId() != null); + assertEquals(1, pitResponse.getTotalShards()); + assertEquals(1, pitResponse.getSuccessfulShards()); + assertEquals(0, pitResponse.getFailedShards()); + assertEquals(0, pitResponse.getSkippedShards()); + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + DeletePitRequest deletePitRequest = new DeletePitRequest(pitIds); + DeletePitResponse deletePitResponse = execute(deletePitRequest, highLevelClient()::deletePit, highLevelClient()::deletePitAsync); + assertTrue(deletePitResponse.getDeletePitResults().get(0).isSuccessful()); + assertTrue(deletePitResponse.getDeletePitResults().get(0).getPitId().equals(pitResponse.getId())); + } + + public void testDeleteAllPits() throws IOException { + CreatePitRequest pitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, "index"); + CreatePitResponse pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + CreatePitResponse pitResponse1 = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + assertTrue(pitResponse.getId() != null); + assertTrue(pitResponse1.getId() != null); + DeletePitResponse deletePitResponse = highLevelClient().deleteAllPits(RequestOptions.DEFAULT); + for (DeletePitInfo deletePitInfo : deletePitResponse.getDeletePitResults()) { + assertTrue(deletePitInfo.isSuccessful()); + } + pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + pitResponse1 = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + assertTrue(pitResponse.getId() != null); + assertTrue(pitResponse1.getId() != null); + ActionListener deletePitListener = new ActionListener<>() { + @Override + public void onResponse(DeletePitResponse response) { + for (DeletePitInfo deletePitInfo : response.getDeletePitResults()) { + assertTrue(deletePitInfo.isSuccessful()); + } + } + + @Override + public void onFailure(Exception e) { + if (!(e instanceof OpenSearchStatusException)) { + throw new AssertionError("Delete all failed"); + } + } + }; + highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); + // validate no pits case + highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); + } +} diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java index 97c0f2f475826..ee5795deb165d 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java @@ -53,6 +53,8 @@ import org.opensearch.action.get.MultiGetRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.search.ClearScrollRequest; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.DeletePitRequest; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchScrollRequest; @@ -131,6 +133,7 @@ import java.util.Locale; import java.util.Map; import java.util.StringJoiner; +import java.util.concurrent.TimeUnit; import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; @@ -1303,6 +1306,47 @@ public void testClearScroll() throws IOException { assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); } + public void testCreatePit() throws IOException { + String[] indices = randomIndicesNames(0, 5); + Map expectedParams = new HashMap<>(); + expectedParams.put("keep_alive", "1d"); + expectedParams.put("allow_partial_pit_creation", "true"); + CreatePitRequest createPitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, indices); + setRandomIndicesOptions(createPitRequest::indicesOptions, createPitRequest::indicesOptions, expectedParams); + Request request = RequestConverters.createPit(createPitRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + String index = String.join(",", indices); + if (Strings.hasLength(index)) { + endpoint.add(index); + } + endpoint.add("_search/point_in_time"); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals(endpoint.toString(), request.getEndpoint()); + assertEquals(expectedParams, request.getParameters()); + assertToXContentBody(createPitRequest, request.getEntity()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + } + + public void testDeletePit() throws IOException { + List pitIdsList = new ArrayList<>(); + pitIdsList.add("pitId1"); + pitIdsList.add("pitId2"); + DeletePitRequest deletePitRequest = new DeletePitRequest(pitIdsList); + Request request = RequestConverters.deletePit(deletePitRequest); + String endpoint = "/_search/point_in_time"; + assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); + assertEquals(endpoint, request.getEndpoint()); + assertToXContentBody(deletePitRequest, request.getEntity()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + } + + public void testDeleteAllPits() { + Request request = RequestConverters.deleteAllPits(); + String endpoint = "/_search/point_in_time/_all"; + assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); + assertEquals(endpoint, request.getEndpoint()); + } + public void testSearchTemplate() throws Exception { // Create a random request. String[] indices = randomIndicesNames(0, 5); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java index 3da0f81023f72..cdd63743f2644 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java @@ -134,6 +134,7 @@ public class RestHighLevelClientTests extends OpenSearchTestCase { // core "ping", "info", + "delete_all_pits", // security "security.get_ssl_certificates", "security.authenticate", diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java index 19e287fb91be5..8b509e5d19e92 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java @@ -43,6 +43,10 @@ import org.opensearch.action.fieldcaps.FieldCapabilitiesResponse; import org.opensearch.action.search.ClearScrollRequest; import org.opensearch.action.search.ClearScrollResponse; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchResponse; import org.opensearch.action.search.SearchRequest; @@ -89,6 +93,7 @@ import org.opensearch.search.aggregations.metrics.WeightedAvgAggregationBuilder; import org.opensearch.search.aggregations.support.MultiValuesSourceFieldConfig; import org.opensearch.search.aggregations.support.ValueType; +import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.fetch.subphase.FetchSourceContext; import org.opensearch.search.fetch.subphase.highlight.HighlightBuilder; @@ -100,11 +105,13 @@ import org.junit.Before; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; @@ -762,6 +769,46 @@ public void testSearchScroll() throws Exception { } } + public void testSearchWithPit() throws Exception { + for (int i = 0; i < 100; i++) { + XContentBuilder builder = jsonBuilder().startObject().field("field", i).endObject(); + Request doc = new Request(HttpPut.METHOD_NAME, "/test/_doc/" + Integer.toString(i)); + doc.setJsonEntity(Strings.toString(builder)); + client().performRequest(doc); + } + client().performRequest(new Request(HttpPost.METHOD_NAME, "/test/_refresh")); + + CreatePitRequest pitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, "test"); + CreatePitResponse pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().size(35) + .sort("field", SortOrder.ASC) + .pointInTimeBuilder(new PointInTimeBuilder(pitResponse.getId())); + SearchRequest searchRequest = new SearchRequest().source(searchSourceBuilder); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + + try { + long counter = 0; + assertSearchHeader(searchResponse); + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(100L)); + assertThat(searchResponse.getHits().getHits().length, equalTo(35)); + for (SearchHit hit : searchResponse.getHits()) { + assertThat(((Number) hit.getSortValues()[0]).longValue(), equalTo(counter++)); + } + } finally { + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + DeletePitRequest deletePitRequest = new DeletePitRequest(pitIds); + DeletePitResponse deletePitResponse = execute( + deletePitRequest, + highLevelClient()::deletePit, + highLevelClient()::deletePitAsync + ); + assertTrue(deletePitResponse.getDeletePitResults().get(0).isSuccessful()); + assertTrue(deletePitResponse.getDeletePitResults().get(0).getPitId().equals(pitResponse.getId())); + } + } + public void testMultiSearch() throws Exception { MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); SearchRequest searchRequest1 = new SearchRequest("index1"); diff --git a/client/rest/licenses/commons-codec-1.13.jar.sha1 b/client/rest/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/client/rest/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/client/rest/licenses/commons-codec-1.15.jar.sha1 b/client/rest/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/client/rest/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/client/rest/licenses/httpasyncclient-4.1.4.jar.sha1 b/client/rest/licenses/httpasyncclient-4.1.4.jar.sha1 deleted file mode 100644 index 8360ab45c7ab3..0000000000000 --- a/client/rest/licenses/httpasyncclient-4.1.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f3a3240681faae3fa46b573a4c7e50cec9db0d86 \ No newline at end of file diff --git a/client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 b/client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 new file mode 100644 index 0000000000000..366a9e31069a6 --- /dev/null +++ b/client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 @@ -0,0 +1 @@ +cd18227f1eb8e9a263286c1d7362ceb24f6f9b32 \ No newline at end of file diff --git a/client/rest/licenses/httpcore-4.4.12.jar.sha1 b/client/rest/licenses/httpcore-4.4.12.jar.sha1 deleted file mode 100644 index 3c046171b30da..0000000000000 --- a/client/rest/licenses/httpcore-4.4.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file diff --git a/client/rest/licenses/httpcore-4.4.15.jar.sha1 b/client/rest/licenses/httpcore-4.4.15.jar.sha1 new file mode 100644 index 0000000000000..42a03b5d7a376 --- /dev/null +++ b/client/rest/licenses/httpcore-4.4.15.jar.sha1 @@ -0,0 +1 @@ +7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/client/rest/licenses/httpcore-nio-4.4.12.jar.sha1 b/client/rest/licenses/httpcore-nio-4.4.12.jar.sha1 deleted file mode 100644 index 4de932dc5aca0..0000000000000 --- a/client/rest/licenses/httpcore-nio-4.4.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -84cd29eca842f31db02987cfedea245af020198b \ No newline at end of file diff --git a/client/rest/licenses/httpcore-nio-4.4.15.jar.sha1 b/client/rest/licenses/httpcore-nio-4.4.15.jar.sha1 new file mode 100644 index 0000000000000..251b35ab6a1a5 --- /dev/null +++ b/client/rest/licenses/httpcore-nio-4.4.15.jar.sha1 @@ -0,0 +1 @@ +85d2b6825d42db909a1474f0ffbd6328429b7a32 \ No newline at end of file diff --git a/client/sniffer/licenses/commons-codec-1.13.jar.sha1 b/client/sniffer/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/client/sniffer/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/client/sniffer/licenses/commons-codec-1.15.jar.sha1 b/client/sniffer/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/client/sniffer/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore-4.4.12.jar.sha1 b/client/sniffer/licenses/httpcore-4.4.12.jar.sha1 deleted file mode 100644 index 3c046171b30da..0000000000000 --- a/client/sniffer/licenses/httpcore-4.4.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore-4.4.15.jar.sha1 b/client/sniffer/licenses/httpcore-4.4.15.jar.sha1 new file mode 100644 index 0000000000000..42a03b5d7a376 --- /dev/null +++ b/client/sniffer/licenses/httpcore-4.4.15.jar.sha1 @@ -0,0 +1 @@ +7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/distribution/src/bin/opensearch-service.bat b/distribution/src/bin/opensearch-service.bat index 4dd8356340d10..a11dc8316e8b1 100644 --- a/distribution/src/bin/opensearch-service.bat +++ b/distribution/src/bin/opensearch-service.bat @@ -8,6 +8,10 @@ if /i "%1" == "install" set NOJAVA= call "%~dp0opensearch-env.bat" %NOJAVA% || exit /b 1 +rem opensearch-service-x64.exe is based off of the Apache Commons Daemon procrun service application. +rem Run "opensearch-service-x64.exe version" for version information. +rem Run "opensearch-service-x64.exe help" for command options. +rem See https://commons.apache.org/proper/commons-daemon/procrun.html for more information. set EXECUTABLE=%OPENSEARCH_HOME%\bin\opensearch-service-x64.exe if "%SERVICE_ID%" == "" set SERVICE_ID=opensearch-service-x64 set ARCH=64-bit @@ -20,6 +24,10 @@ exit /B 1 set OPENSEARCH_VERSION=${project.version} if "%SERVICE_LOG_DIR%" == "" set SERVICE_LOG_DIR=%OPENSEARCH_HOME%\logs +rem The logs directory must exist for the service to start. +if not exist "%SERVICE_LOG_DIR%" ( + mkdir "%SERVICE_LOG_DIR%" +) if "x%1x" == "xx" goto displayUsage set SERVICE_CMD=%1 @@ -45,7 +53,8 @@ echo Usage: opensearch-service.bat install^|remove^|start^|stop^|manager [SERVIC goto:eof :doStart -"%EXECUTABLE%" //OPENSEARCH//%SERVICE_ID% %LOG_OPTS% +rem //ES == Execute Service +"%EXECUTABLE%" //ES//%SERVICE_ID% %LOG_OPTS% if not errorlevel 1 goto started echo Failed starting '%SERVICE_ID%' service exit /B 1 @@ -55,6 +64,7 @@ echo The service '%SERVICE_ID%' has been started goto:eof :doStop +rem //SS == Stop Service "%EXECUTABLE%" //SS//%SERVICE_ID% %LOG_OPTS% if not errorlevel 1 goto stopped echo Failed stopping '%SERVICE_ID%' service @@ -65,8 +75,11 @@ echo The service '%SERVICE_ID%' has been stopped goto:eof :doManagment +rem opensearch-service-mgr.exe is based off of the Apache Commons Daemon procrun monitor application. +rem See https://commons.apache.org/proper/commons-daemon/procrun.html for more information. set EXECUTABLE_MGR=%OPENSEARCH_HOME%\bin\opensearch-service-mgr -"%EXECUTABLE_MGR%" //OPENSEARCH//%SERVICE_ID% +rem //ES == Edit Service +"%EXECUTABLE_MGR%" //ES//%SERVICE_ID% if not errorlevel 1 goto managed echo Failed starting service manager for '%SERVICE_ID%' exit /B 1 @@ -77,6 +90,7 @@ goto:eof :doRemove rem Remove the service +rem //DS == Delete Service "%EXECUTABLE%" //DS//%SERVICE_ID% %LOG_OPTS% if not errorlevel 1 goto removed echo Failed removing '%SERVICE_ID%' service @@ -207,6 +221,7 @@ if not "%SERVICE_USERNAME%" == "" ( set SERVICE_PARAMS=%SERVICE_PARAMS% --ServiceUser "%SERVICE_USERNAME%" --ServicePassword "%SERVICE_PASSWORD%" ) ) +rem //IS == Install Service "%EXECUTABLE%" //IS//%SERVICE_ID% --Startup %OPENSEARCH_START_TYPE% --StopTimeout %OPENSEARCH_STOP_TIMEOUT% --StartClass org.opensearch.bootstrap.OpenSearch --StartMethod main ++StartParams --quiet --StopClass org.opensearch.bootstrap.OpenSearch --StopMethod close --Classpath "%OPENSEARCH_CLASSPATH%" --JvmMs %JVM_MS% --JvmMx %JVM_MX% --JvmSs %JVM_SS% --JvmOptions %OTHER_JAVA_OPTS% ++JvmOptions %OPENSEARCH_PARAMS% %LOG_OPTS% --PidFile "%SERVICE_ID%.pid" --DisplayName "%SERVICE_DISPLAY_NAME%" --Description "%SERVICE_DESCRIPTION%" --Jvm "%JAVA_HOME%%JVM_DLL%" --StartMode jvm --StopMode jvm --StartPath "%OPENSEARCH_HOME%" %SERVICE_PARAMS% ++Environment HOSTNAME="%%COMPUTERNAME%%" if not errorlevel 1 goto installed diff --git a/distribution/src/bin/opensearch.bat b/distribution/src/bin/opensearch.bat index 49a12aa5c968d..dda15124e1654 100644 --- a/distribution/src/bin/opensearch.bat +++ b/distribution/src/bin/opensearch.bat @@ -56,6 +56,12 @@ IF ERRORLEVEL 1 ( EXIT /B %ERRORLEVEL% ) +if "%SERVICE_LOG_DIR%" == "" set SERVICE_LOG_DIR=%OPENSEARCH_HOME%\logs +rem The logs directory must exist for the service to start. +if not exist "%SERVICE_LOG_DIR%" ( + mkdir "%SERVICE_LOG_DIR%" +) + SET KEYSTORE_PASSWORD= IF "%checkpassword%"=="Y" ( CALL "%~dp0opensearch-keystore.bat" has-passwd --silent diff --git a/libs/x-content/licenses/snakeyaml-1.26.jar.sha1 b/libs/x-content/licenses/snakeyaml-1.26.jar.sha1 deleted file mode 100644 index fde3aba8edad0..0000000000000 --- a/libs/x-content/licenses/snakeyaml-1.26.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a78a8747147d2c5807683e76ec2b633e95c14fe9 \ No newline at end of file diff --git a/libs/x-content/licenses/snakeyaml-1.31.jar.sha1 b/libs/x-content/licenses/snakeyaml-1.31.jar.sha1 new file mode 100644 index 0000000000000..1ac9b78b88687 --- /dev/null +++ b/libs/x-content/licenses/snakeyaml-1.31.jar.sha1 @@ -0,0 +1 @@ +cf26b7b05fef01e7bec00cb88ab4feeeba743e12 \ No newline at end of file diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index b72cb6d868d79..8bbe0bf2ef65f 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -58,6 +58,7 @@ dependencies { api "io.netty:netty-buffer:${versions.netty}" api "io.netty:netty-codec:${versions.netty}" api "io.netty:netty-codec-http:${versions.netty}" + api "io.netty:netty-codec-http2:${versions.netty}" api "io.netty:netty-common:${versions.netty}" api "io.netty:netty-handler:${versions.netty}" api "io.netty:netty-resolver:${versions.netty}" @@ -143,6 +144,14 @@ thirdPartyAudit { 'org.apache.log4j.Level', 'org.apache.log4j.Logger', + // from io.netty.handler.ssl.OpenSslEngine (netty) + 'org.bouncycastle.openssl.PEMEncryptedKeyPair', + 'org.bouncycastle.openssl.PEMParser', + 'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter', + 'org.bouncycastle.openssl.jcajce.JceOpenSSLPKCS8DecryptorProviderBuilder', + 'org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder', + 'org.bouncycastle.pkcs.PKCS8EncryptedPrivateKeyInfo', + // from io.netty.handler.ssl.OpenSslEngine (netty) 'io.netty.internal.tcnative.Buffer', 'io.netty.internal.tcnative.CertificateCompressionAlgo', diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.79.Final.jar.sha1 deleted file mode 100644 index 8e9e4d0b7f754..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c014412b599489b1db27c6bc08d8a46da94e397 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..471fe8b211df2 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +a087321a63d9991e25f7b7d24ef53edcbcb954ff \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.79.Final.jar.sha1 deleted file mode 100644 index c0920231d79a8..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -18f5b02af7ca611978bc28f2cb58cbb3b9b0f0ef \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..0f8e3bebe1532 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +4941821a158d16311665d8606aefa610ecf0f64c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.79.Final.jar.sha1 deleted file mode 100644 index a3f650da5abbd..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -882c70bc0a30a98bf3ce477f043e967ac026044c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..d18720d164335 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +efb23f9d5187d2f733595ef7930137f0cb2cec48 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..d96a286b98493 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +bf7b66834188ef1a6f6095291c6b81a1880798ba \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.79.Final.jar.sha1 deleted file mode 100644 index faa7b099406a3..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2814bd465731355323aba0fdd22163bfce638a75 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..d256e77b7024c --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +3d43ce22863bc590e4e33fbdabbb58dc05f4c43d \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.79.Final.jar.sha1 deleted file mode 100644 index 8e314f164da69..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2dc22423c8ed19906615fb936a5fcb7db14a4e6c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..022ad6bc93dba --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +cf7029d2f9bc4eeae8ff15af7a528d06b518a017 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.79.Final.jar.sha1 deleted file mode 100644 index af550935bb911..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -55ecb1ff4464b56564a90824a741c3911264aaa4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..ad0f71b569377 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +3bbb0d4bfbbab867e5b757b97a6e5e0d1348d94c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.79.Final.jar.sha1 deleted file mode 100644 index c6e18efb3ad3d..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6cc2b49749b4fbcc39c687027e04e65e857552a9 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..2bfb4f377d89b --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +57fcace7a1b8567aa39921c915d1b1ba78fd4d2d \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 deleted file mode 100644 index 7f984663dfa85..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -731937caec938b77b39df932a8da8aaca8d5ec05 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..998e6e8560724 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +da3d7da1a8d317ae2c82b400fd255fe610c43ebe \ No newline at end of file diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4Http2IT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4Http2IT.java new file mode 100644 index 0000000000000..1424b392af8e7 --- /dev/null +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4Http2IT.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.netty4; + +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.util.ReferenceCounted; +import org.opensearch.OpenSearchNetty4IntegTestCase; +import org.opensearch.common.transport.TransportAddress; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; + +import java.util.Collection; +import java.util.Locale; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.hasSize; + +@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1) +public class Netty4Http2IT extends OpenSearchNetty4IntegTestCase { + + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + + public void testThatNettyHttpServerSupportsHttp2() throws Exception { + String[] requests = new String[] { "/", "/_nodes/stats", "/", "/_cluster/state", "/" }; + + HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); + TransportAddress[] boundAddresses = httpServerTransport.boundAddress().boundAddresses(); + TransportAddress transportAddress = randomFrom(boundAddresses); + + try (Netty4HttpClient nettyHttpClient = Netty4HttpClient.http2()) { + Collection responses = nettyHttpClient.get(transportAddress.address(), requests); + try { + assertThat(responses, hasSize(5)); + + Collection opaqueIds = Netty4HttpClient.returnOpaqueIds(responses); + assertOpaqueIdsInAnyOrder(opaqueIds); + } finally { + responses.forEach(ReferenceCounted::release); + } + } + } + + private void assertOpaqueIdsInAnyOrder(Collection opaqueIds) { + // check if opaque ids are present in any order, since for HTTP/2 we use streaming (no head of line blocking) + // and responses may come back at any order + int i = 0; + String msg = String.format(Locale.ROOT, "Expected list of opaque ids to be in any order, got [%s]", opaqueIds); + assertThat(msg, opaqueIds, containsInAnyOrder(IntStream.range(0, 5).mapToObj(Integer::toString).toArray())); + } + +} diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java index 08df9259d475f..db76c0b145840 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java @@ -100,7 +100,7 @@ public void testLimitsInFlightRequests() throws Exception { HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); - try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { + try (Netty4HttpClient nettyHttpClient = Netty4HttpClient.http()) { Collection singleResponse = nettyHttpClient.post(transportAddress.address(), requests.subList(0, 1)); try { assertThat(singleResponse, hasSize(1)); @@ -130,7 +130,7 @@ public void testDoesNotLimitExcludedRequests() throws Exception { HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); - try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { + try (Netty4HttpClient nettyHttpClient = Netty4HttpClient.http()) { Collection responses = nettyHttpClient.put(transportAddress.address(), requestUris); try { assertThat(responses, hasSize(requestUris.size())); diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4PipeliningIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4PipeliningIT.java index 2bd1fa07f8afc..96193b0ecb954 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4PipeliningIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4PipeliningIT.java @@ -61,7 +61,7 @@ public void testThatNettyHttpServerSupportsPipelining() throws Exception { TransportAddress[] boundAddresses = httpServerTransport.boundAddress().boundAddresses(); TransportAddress transportAddress = randomFrom(boundAddresses); - try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { + try (Netty4HttpClient nettyHttpClient = Netty4HttpClient.http()) { Collection responses = nettyHttpClient.get(transportAddress.address(), requests); try { assertThat(responses, hasSize(5)); diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java index 66d60032d11a8..2dd7aaf41986f 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java @@ -33,7 +33,10 @@ package org.opensearch.http.netty4; import io.netty.channel.Channel; +import io.netty.channel.ChannelPipeline; + import org.opensearch.action.ActionListener; +import org.opensearch.common.Nullable; import org.opensearch.common.concurrent.CompletableContext; import org.opensearch.http.HttpChannel; import org.opensearch.http.HttpResponse; @@ -45,9 +48,15 @@ public class Netty4HttpChannel implements HttpChannel { private final Channel channel; private final CompletableContext closeContext = new CompletableContext<>(); + private final ChannelPipeline inboundPipeline; Netty4HttpChannel(Channel channel) { + this(channel, null); + } + + Netty4HttpChannel(Channel channel, ChannelPipeline inboundPipeline) { this.channel = channel; + this.inboundPipeline = inboundPipeline; Netty4TcpChannel.addListener(this.channel.closeFuture(), closeContext); } @@ -81,6 +90,10 @@ public void close() { channel.close(); } + public @Nullable ChannelPipeline inboundPipeline() { + return inboundPipeline; + } + public Channel getNettyChannel() { return channel; } diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java index decab45ffca38..1e0a4d89f2fd5 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java @@ -40,18 +40,36 @@ import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelOption; +import io.netty.channel.ChannelPipeline; import io.netty.channel.FixedRecvByteBufAllocator; import io.netty.channel.RecvByteBufAllocator; +import io.netty.channel.SimpleChannelInboundHandler; import io.netty.channel.socket.nio.NioChannelOption; import io.netty.handler.codec.ByteToMessageDecoder; import io.netty.handler.codec.http.HttpContentCompressor; import io.netty.handler.codec.http.HttpContentDecompressor; +import io.netty.handler.codec.http.HttpMessage; import io.netty.handler.codec.http.HttpObjectAggregator; import io.netty.handler.codec.http.HttpRequestDecoder; import io.netty.handler.codec.http.HttpResponseEncoder; +import io.netty.handler.codec.http.HttpServerCodec; +import io.netty.handler.codec.http.HttpServerUpgradeHandler; +import io.netty.handler.codec.http.HttpServerUpgradeHandler.UpgradeCodec; +import io.netty.handler.codec.http.HttpServerUpgradeHandler.UpgradeCodecFactory; +import io.netty.handler.codec.http2.CleartextHttp2ServerUpgradeHandler; +import io.netty.handler.codec.http2.Http2CodecUtil; +import io.netty.handler.codec.http2.Http2FrameCodecBuilder; +import io.netty.handler.codec.http2.Http2MultiplexHandler; +import io.netty.handler.codec.http2.Http2ServerUpgradeCodec; +import io.netty.handler.codec.http2.Http2StreamFrameToHttpObjectCodec; +import io.netty.handler.logging.LogLevel; +import io.netty.handler.logging.LoggingHandler; import io.netty.handler.timeout.ReadTimeoutException; import io.netty.handler.timeout.ReadTimeoutHandler; +import io.netty.util.AsciiString; import io.netty.util.AttributeKey; +import io.netty.util.ReferenceCountUtil; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.ExceptionsHelper; @@ -335,38 +353,152 @@ protected HttpChannelHandler(final Netty4HttpServerTransport transport, final Ht this.responseCreator = new Netty4HttpResponseCreator(); } + public ChannelHandler getRequestHandler() { + return requestHandler; + } + @Override protected void initChannel(Channel ch) throws Exception { Netty4HttpChannel nettyHttpChannel = new Netty4HttpChannel(ch); ch.attr(HTTP_CHANNEL_KEY).set(nettyHttpChannel); ch.pipeline().addLast("byte_buf_sizer", byteBufSizer); ch.pipeline().addLast("read_timeout", new ReadTimeoutHandler(transport.readTimeoutMillis, TimeUnit.MILLISECONDS)); + + configurePipeline(ch); + transport.serverAcceptedChannel(nettyHttpChannel); + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + ExceptionsHelper.maybeDieOnAnotherThread(cause); + super.exceptionCaught(ctx, cause); + } + + protected void configurePipeline(Channel ch) { + final UpgradeCodecFactory upgradeCodecFactory = new UpgradeCodecFactory() { + @Override + public UpgradeCodec newUpgradeCodec(CharSequence protocol) { + if (AsciiString.contentEquals(Http2CodecUtil.HTTP_UPGRADE_PROTOCOL_NAME, protocol)) { + return new Http2ServerUpgradeCodec( + Http2FrameCodecBuilder.forServer().build(), + new Http2MultiplexHandler(createHttp2ChannelInitializer(ch.pipeline())) + ); + } else { + return null; + } + } + }; + + final HttpServerCodec sourceCodec = new HttpServerCodec( + handlingSettings.getMaxInitialLineLength(), + handlingSettings.getMaxHeaderSize(), + handlingSettings.getMaxChunkSize() + ); + + final HttpServerUpgradeHandler upgradeHandler = new HttpServerUpgradeHandler(sourceCodec, upgradeCodecFactory); + final CleartextHttp2ServerUpgradeHandler cleartextUpgradeHandler = new CleartextHttp2ServerUpgradeHandler( + sourceCodec, + upgradeHandler, + createHttp2ChannelInitializerPriorKnowledge() + ); + + ch.pipeline().addLast(cleartextUpgradeHandler).addLast(new SimpleChannelInboundHandler() { + @Override + protected void channelRead0(ChannelHandlerContext ctx, HttpMessage msg) throws Exception { + final HttpObjectAggregator aggregator = new HttpObjectAggregator(handlingSettings.getMaxContentLength()); + aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents); + + // If this handler is hit then no upgrade has been attempted and the client is just talking HTTP + final ChannelPipeline pipeline = ctx.pipeline(); + pipeline.addAfter(ctx.name(), "handler", getRequestHandler()); + pipeline.replace(this, "aggregator", aggregator); + + ch.pipeline().addLast("decoder_compress", new HttpContentDecompressor()); + ch.pipeline().addLast("encoder", new HttpResponseEncoder()); + if (handlingSettings.isCompression()) { + ch.pipeline() + .addAfter("aggregator", "encoder_compress", new HttpContentCompressor(handlingSettings.getCompressionLevel())); + } + ch.pipeline().addBefore("handler", "request_creator", requestCreator); + ch.pipeline().addBefore("handler", "response_creator", responseCreator); + ch.pipeline() + .addBefore("handler", "pipelining", new Netty4HttpPipeliningHandler(logger, transport.pipeliningMaxEvents)); + + ctx.fireChannelRead(ReferenceCountUtil.retain(msg)); + } + }); + } + + protected void configureDefaultHttpPipeline(ChannelPipeline pipeline) { final HttpRequestDecoder decoder = new HttpRequestDecoder( handlingSettings.getMaxInitialLineLength(), handlingSettings.getMaxHeaderSize(), handlingSettings.getMaxChunkSize() ); decoder.setCumulator(ByteToMessageDecoder.COMPOSITE_CUMULATOR); - ch.pipeline().addLast("decoder", decoder); - ch.pipeline().addLast("decoder_compress", new HttpContentDecompressor()); - ch.pipeline().addLast("encoder", new HttpResponseEncoder()); + pipeline.addLast("decoder", decoder); + pipeline.addLast("decoder_compress", new HttpContentDecompressor()); + pipeline.addLast("encoder", new HttpResponseEncoder()); final HttpObjectAggregator aggregator = new HttpObjectAggregator(handlingSettings.getMaxContentLength()); aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents); - ch.pipeline().addLast("aggregator", aggregator); + pipeline.addLast("aggregator", aggregator); if (handlingSettings.isCompression()) { - ch.pipeline().addLast("encoder_compress", new HttpContentCompressor(handlingSettings.getCompressionLevel())); + pipeline.addLast("encoder_compress", new HttpContentCompressor(handlingSettings.getCompressionLevel())); } - ch.pipeline().addLast("request_creator", requestCreator); - ch.pipeline().addLast("response_creator", responseCreator); - ch.pipeline().addLast("pipelining", new Netty4HttpPipeliningHandler(logger, transport.pipeliningMaxEvents)); - ch.pipeline().addLast("handler", requestHandler); - transport.serverAcceptedChannel(nettyHttpChannel); + pipeline.addLast("request_creator", requestCreator); + pipeline.addLast("response_creator", responseCreator); + pipeline.addLast("pipelining", new Netty4HttpPipeliningHandler(logger, transport.pipeliningMaxEvents)); + pipeline.addLast("handler", requestHandler); } - @Override - public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { - ExceptionsHelper.maybeDieOnAnotherThread(cause); - super.exceptionCaught(ctx, cause); + protected void configureDefaultHttp2Pipeline(ChannelPipeline pipeline) { + pipeline.addLast(Http2FrameCodecBuilder.forServer().build()) + .addLast(new Http2MultiplexHandler(createHttp2ChannelInitializer(pipeline))); + } + + private ChannelInitializer createHttp2ChannelInitializerPriorKnowledge() { + return new ChannelInitializer() { + @Override + protected void initChannel(Channel childChannel) throws Exception { + configureDefaultHttp2Pipeline(childChannel.pipeline()); + } + }; + } + + /** + * Http2MultiplexHandler creates new pipeline, we are preserving the old one in case some handlers need to be + * access (like for example opensearch-security plugin which accesses SSL handlers). + */ + private ChannelInitializer createHttp2ChannelInitializer(ChannelPipeline inboundPipeline) { + return new ChannelInitializer() { + @Override + protected void initChannel(Channel childChannel) throws Exception { + final Netty4HttpChannel nettyHttpChannel = new Netty4HttpChannel(childChannel, inboundPipeline); + childChannel.attr(HTTP_CHANNEL_KEY).set(nettyHttpChannel); + + final HttpObjectAggregator aggregator = new HttpObjectAggregator(handlingSettings.getMaxContentLength()); + aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents); + + childChannel.pipeline() + .addLast(new LoggingHandler(LogLevel.DEBUG)) + .addLast(new Http2StreamFrameToHttpObjectCodec(true)) + .addLast("byte_buf_sizer", byteBufSizer) + .addLast("read_timeout", new ReadTimeoutHandler(transport.readTimeoutMillis, TimeUnit.MILLISECONDS)) + .addLast("decoder_decompress", new HttpContentDecompressor()); + + if (handlingSettings.isCompression()) { + childChannel.pipeline() + .addLast("encoder_compress", new HttpContentCompressor(handlingSettings.getCompressionLevel())); + } + + childChannel.pipeline() + .addLast("aggregator", aggregator) + .addLast("request_creator", requestCreator) + .addLast("response_creator", responseCreator) + .addLast("pipelining", new Netty4HttpPipeliningHandler(logger, transport.pipeliningMaxEvents)) + .addLast("handler", getRequestHandler()); + } + }; } } diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java index a0100930c7dcb..c18fe6efc4736 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java @@ -117,7 +117,7 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, httpServerTransport.start(); final TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); - try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { + try (Netty4HttpClient nettyHttpClient = Netty4HttpClient.http()) { final Collection responses = nettyHttpClient.get( transportAddress.address(), "/_cluster/settings?pretty=%" diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java index 57f95a022a33f..6fdd698c117f2 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java @@ -37,14 +37,19 @@ import io.netty.buffer.Unpooled; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandler; +import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelOption; +import io.netty.channel.ChannelPromise; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.SocketChannel; import io.netty.handler.codec.http.DefaultFullHttpRequest; import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpClientCodec; +import io.netty.handler.codec.http.HttpClientUpgradeHandler; import io.netty.handler.codec.http.HttpContentDecompressor; import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpMethod; @@ -55,6 +60,17 @@ import io.netty.handler.codec.http.HttpResponse; import io.netty.handler.codec.http.HttpResponseDecoder; import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.codec.http2.DefaultHttp2Connection; +import io.netty.handler.codec.http2.DelegatingDecompressorFrameListener; +import io.netty.handler.codec.http2.Http2ClientUpgradeCodec; +import io.netty.handler.codec.http2.Http2Connection; +import io.netty.handler.codec.http2.Http2Settings; +import io.netty.handler.codec.http2.HttpConversionUtil; +import io.netty.handler.codec.http2.HttpToHttp2ConnectionHandler; +import io.netty.handler.codec.http2.HttpToHttp2ConnectionHandlerBuilder; +import io.netty.handler.codec.http2.InboundHttp2ToHttpAdapterBuilder; +import io.netty.util.AttributeKey; + import org.opensearch.common.collect.Tuple; import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.ByteSizeValue; @@ -70,6 +86,7 @@ import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.function.BiFunction; import static io.netty.handler.codec.http.HttpHeaderNames.HOST; import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; @@ -97,11 +114,32 @@ static Collection returnOpaqueIds(Collection responses } private final Bootstrap clientBootstrap; + private final BiFunction, AwaitableChannelInitializer> handlerFactory; + + Netty4HttpClient( + Bootstrap clientBootstrap, + BiFunction, AwaitableChannelInitializer> handlerFactory + ) { + this.clientBootstrap = clientBootstrap; + this.handlerFactory = handlerFactory; + } + + static Netty4HttpClient http() { + return new Netty4HttpClient( + new Bootstrap().channel(NettyAllocator.getChannelType()) + .option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()) + .group(new NioEventLoopGroup(1)), + CountDownLatchHandlerHttp::new + ); + } - Netty4HttpClient() { - clientBootstrap = new Bootstrap().channel(NettyAllocator.getChannelType()) - .option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()) - .group(new NioEventLoopGroup(1)); + static Netty4HttpClient http2() { + return new Netty4HttpClient( + new Bootstrap().channel(NettyAllocator.getChannelType()) + .option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()) + .group(new NioEventLoopGroup(1)), + CountDownLatchHandlerHttp2::new + ); } public List get(SocketAddress remoteAddress, String... uris) throws InterruptedException { @@ -110,6 +148,7 @@ public List get(SocketAddress remoteAddress, String... uris) t final HttpRequest httpRequest = new DefaultFullHttpRequest(HTTP_1_1, HttpMethod.GET, uris[i]); httpRequest.headers().add(HOST, "localhost"); httpRequest.headers().add("X-Opaque-ID", String.valueOf(i)); + httpRequest.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), "http"); requests.add(httpRequest); } return sendRequests(remoteAddress, requests); @@ -143,6 +182,7 @@ private List processRequestsWithBody( request.headers().add(HttpHeaderNames.HOST, "localhost"); request.headers().add(HttpHeaderNames.CONTENT_LENGTH, content.readableBytes()); request.headers().add(HttpHeaderNames.CONTENT_TYPE, "application/json"); + request.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), "http"); requests.add(request); } return sendRequests(remoteAddress, requests); @@ -153,12 +193,14 @@ private synchronized List sendRequests(final SocketAddress rem final CountDownLatch latch = new CountDownLatch(requests.size()); final List content = Collections.synchronizedList(new ArrayList<>(requests.size())); - clientBootstrap.handler(new CountDownLatchHandler(latch, content)); + final AwaitableChannelInitializer handler = handlerFactory.apply(latch, content); + clientBootstrap.handler(handler); ChannelFuture channelFuture = null; try { channelFuture = clientBootstrap.connect(remoteAddress); channelFuture.sync(); + handler.await(); for (HttpRequest request : requests) { channelFuture.channel().writeAndFlush(request); @@ -184,12 +226,12 @@ public void close() { /** * helper factory which adds returned data to a list and uses a count down latch to decide when done */ - private static class CountDownLatchHandler extends ChannelInitializer { + private static class CountDownLatchHandlerHttp extends AwaitableChannelInitializer { private final CountDownLatch latch; private final Collection content; - CountDownLatchHandler(final CountDownLatch latch, final Collection content) { + CountDownLatchHandlerHttp(final CountDownLatch latch, final Collection content) { this.latch = latch; this.content = content; } @@ -222,4 +264,145 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws E } + /** + * The channel initializer with the ability to await for initialization to be completed + * + */ + private static abstract class AwaitableChannelInitializer extends ChannelInitializer { + void await() { + // do nothing + } + } + + /** + * helper factory which adds returned data to a list and uses a count down latch to decide when done + */ + private static class CountDownLatchHandlerHttp2 extends AwaitableChannelInitializer { + + private final CountDownLatch latch; + private final Collection content; + private Http2SettingsHandler settingsHandler; + + CountDownLatchHandlerHttp2(final CountDownLatch latch, final Collection content) { + this.latch = latch; + this.content = content; + } + + @Override + protected void initChannel(SocketChannel ch) { + final int maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB).bytesAsInt(); + final Http2Connection connection = new DefaultHttp2Connection(false); + settingsHandler = new Http2SettingsHandler(ch.newPromise()); + + final ChannelInboundHandler responseHandler = new SimpleChannelInboundHandler() { + @Override + protected void channelRead0(ChannelHandlerContext ctx, HttpObject msg) { + final FullHttpResponse response = (FullHttpResponse) msg; + + // this is upgrade request, skipping it over + if (Boolean.TRUE.equals(ctx.channel().attr(AttributeKey.valueOf("upgrade")).getAndRemove())) { + return; + } + + // We copy the buffer manually to avoid a huge allocation on a pooled allocator. We have + // a test that tracks huge allocations, so we want to avoid them in this test code. + ByteBuf newContent = Unpooled.copiedBuffer(((FullHttpResponse) msg).content()); + content.add(response.replace(newContent)); + latch.countDown(); + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + super.exceptionCaught(ctx, cause); + latch.countDown(); + } + }; + + final HttpToHttp2ConnectionHandler connectionHandler = new HttpToHttp2ConnectionHandlerBuilder().connection(connection) + .frameListener( + new DelegatingDecompressorFrameListener( + connection, + new InboundHttp2ToHttpAdapterBuilder(connection).maxContentLength(maxContentLength).propagateSettings(true).build() + ) + ) + .build(); + + final HttpClientCodec sourceCodec = new HttpClientCodec(); + final Http2ClientUpgradeCodec upgradeCodec = new Http2ClientUpgradeCodec(connectionHandler); + final HttpClientUpgradeHandler upgradeHandler = new HttpClientUpgradeHandler(sourceCodec, upgradeCodec, maxContentLength); + + ch.pipeline().addLast(sourceCodec); + ch.pipeline().addLast(upgradeHandler); + ch.pipeline().addLast(new HttpContentDecompressor()); + ch.pipeline().addLast(new UpgradeRequestHandler(settingsHandler, responseHandler)); + } + + @Override + void await() { + try { + // Await for HTTP/2 settings being sent over before moving on to sending the requests + settingsHandler.awaitSettings(5, TimeUnit.SECONDS); + } catch (final Exception ex) { + throw new RuntimeException(ex); + } + } + } + + /** + * A handler that triggers the cleartext upgrade to HTTP/2 (h2c) by sending an + * initial HTTP request. + */ + private static class UpgradeRequestHandler extends ChannelInboundHandlerAdapter { + private final ChannelInboundHandler settingsHandler; + private final ChannelInboundHandler responseHandler; + + UpgradeRequestHandler(final ChannelInboundHandler settingsHandler, final ChannelInboundHandler responseHandler) { + this.settingsHandler = settingsHandler; + this.responseHandler = responseHandler; + } + + @Override + public void channelActive(ChannelHandlerContext ctx) throws Exception { + // The first request is HTTP/2 protocol upgrade (since we support only h2c there) + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + request.headers().add(HttpHeaderNames.HOST, "localhost"); + request.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), "http"); + + ctx.channel().attr(AttributeKey.newInstance("upgrade")).set(true); + ctx.writeAndFlush(request); + ctx.fireChannelActive(); + + ctx.pipeline().remove(this); + ctx.pipeline().addLast(settingsHandler); + ctx.pipeline().addLast(responseHandler); + } + } + + private static class Http2SettingsHandler extends SimpleChannelInboundHandler { + private ChannelPromise promise; + + Http2SettingsHandler(ChannelPromise promise) { + this.promise = promise; + } + + /** + * Wait for this handler to be added after the upgrade to HTTP/2, and for initial preface + * handshake to complete. + */ + void awaitSettings(long timeout, TimeUnit unit) throws Exception { + if (!promise.awaitUninterruptibly(timeout, unit)) { + throw new IllegalStateException("Timed out waiting for HTTP/2 settings"); + } + if (!promise.isSuccess()) { + throw new RuntimeException(promise.cause()); + } + } + + @Override + protected void channelRead0(ChannelHandlerContext ctx, Http2Settings msg) throws Exception { + promise.setSuccess(); + ctx.pipeline().remove(this); + } + } + } diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java index 029aed1f3cc89..cda66b8d828fa 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java @@ -109,7 +109,7 @@ public void testThatHttpPipeliningWorks() throws Exception { } } - try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { + try (Netty4HttpClient nettyHttpClient = Netty4HttpClient.http()) { Collection responses = nettyHttpClient.get(transportAddress.address(), requests.toArray(new String[] {})); try { Collection responseBodies = Netty4HttpClient.returnHttpResponseBodies(responses); @@ -163,9 +163,12 @@ private class CustomHttpChannelHandler extends Netty4HttpServerTransport.HttpCha @Override protected void initChannel(Channel ch) throws Exception { super.initChannel(ch); - ch.pipeline().replace("handler", "handler", new PossiblySlowUpstreamHandler(executorService)); } + @Override + public ChannelHandler getRequestHandler() { + return new PossiblySlowUpstreamHandler(executorService); + } } class PossiblySlowUpstreamHandler extends SimpleChannelInboundHandler { diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java index ec879e538fe20..eb96f14f10c70 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java @@ -202,7 +202,7 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, ) { transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); - try (Netty4HttpClient client = new Netty4HttpClient()) { + try (Netty4HttpClient client = Netty4HttpClient.http()) { final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); request.headers().set(HttpHeaderNames.EXPECT, expectation); HttpUtil.setContentLength(request, contentLength); @@ -322,7 +322,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); - try (Netty4HttpClient client = new Netty4HttpClient()) { + try (Netty4HttpClient client = Netty4HttpClient.http()) { final String url = "/" + new String(new byte[maxInitialLineLength], Charset.forName("UTF-8")); final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); @@ -384,7 +384,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); - try (Netty4HttpClient client = new Netty4HttpClient()) { + try (Netty4HttpClient client = Netty4HttpClient.http()) { DefaultFullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, randomFrom("deflate", "gzip")); long numOfHugeAllocations = getHugeAllocationCount(); @@ -454,7 +454,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); // Test pre-flight request - try (Netty4HttpClient client = new Netty4HttpClient()) { + try (Netty4HttpClient client = Netty4HttpClient.http()) { final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.OPTIONS, "/"); request.headers().add(CorsHandler.ORIGIN, "test-cors.org"); request.headers().add(CorsHandler.ACCESS_CONTROL_REQUEST_METHOD, "POST"); @@ -471,7 +471,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th } // Test short-circuited request - try (Netty4HttpClient client = new Netty4HttpClient()) { + try (Netty4HttpClient client = Netty4HttpClient.http()) { final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); request.headers().add(CorsHandler.ORIGIN, "google.com"); diff --git a/plugins/analysis-phonetic/licenses/commons-codec-1.13.jar.sha1 b/plugins/analysis-phonetic/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/plugins/analysis-phonetic/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/commons-codec-1.15.jar.sha1 b/plugins/analysis-phonetic/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/commons-codec-1.13.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/plugins/discovery-azure-classic/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/commons-codec-1.15.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/httpcore-4.4.12.jar.sha1 b/plugins/discovery-azure-classic/licenses/httpcore-4.4.12.jar.sha1 deleted file mode 100644 index 3c046171b30da..0000000000000 --- a/plugins/discovery-azure-classic/licenses/httpcore-4.4.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/httpcore-4.4.15.jar.sha1 b/plugins/discovery-azure-classic/licenses/httpcore-4.4.15.jar.sha1 new file mode 100644 index 0000000000000..42a03b5d7a376 --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/httpcore-4.4.15.jar.sha1 @@ -0,0 +1 @@ +7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/commons-codec-1.13.jar.sha1 b/plugins/discovery-ec2/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/plugins/discovery-ec2/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/commons-codec-1.15.jar.sha1 b/plugins/discovery-ec2/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/plugins/discovery-ec2/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/httpcore-4.4.12.jar.sha1 b/plugins/discovery-ec2/licenses/httpcore-4.4.12.jar.sha1 deleted file mode 100644 index 3c046171b30da..0000000000000 --- a/plugins/discovery-ec2/licenses/httpcore-4.4.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/httpcore-4.4.15.jar.sha1 b/plugins/discovery-ec2/licenses/httpcore-4.4.15.jar.sha1 new file mode 100644 index 0000000000000..42a03b5d7a376 --- /dev/null +++ b/plugins/discovery-ec2/licenses/httpcore-4.4.15.jar.sha1 @@ -0,0 +1 @@ +7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/commons-codec-1.13.jar.sha1 b/plugins/discovery-gce/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/plugins/discovery-gce/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/commons-codec-1.15.jar.sha1 b/plugins/discovery-gce/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/plugins/discovery-gce/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/httpcore-4.4.12.jar.sha1 b/plugins/discovery-gce/licenses/httpcore-4.4.12.jar.sha1 deleted file mode 100644 index 3c046171b30da..0000000000000 --- a/plugins/discovery-gce/licenses/httpcore-4.4.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/httpcore-4.4.15.jar.sha1 b/plugins/discovery-gce/licenses/httpcore-4.4.15.jar.sha1 new file mode 100644 index 0000000000000..42a03b5d7a376 --- /dev/null +++ b/plugins/discovery-gce/licenses/httpcore-4.4.15.jar.sha1 @@ -0,0 +1 @@ +7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 86694b9bc9da7..af9485c991f0c 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -79,7 +79,7 @@ dependencies { api "org.apache.poi:poi:${versions.poi}" api "org.apache.poi:poi-ooxml-lite:${versions.poi}" api "commons-codec:commons-codec:${versions.commonscodec}" - api 'org.apache.xmlbeans:xmlbeans:5.1.0' + api 'org.apache.xmlbeans:xmlbeans:5.1.1' api 'org.apache.commons:commons-collections4:4.4' // MS Office api "org.apache.poi:poi-scratchpad:${versions.poi}" diff --git a/plugins/ingest-attachment/licenses/commons-codec-1.13.jar.sha1 b/plugins/ingest-attachment/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-codec-1.15.jar.sha1 b/plugins/ingest-attachment/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/slf4j-api-1.6.2.jar.sha1 b/plugins/ingest-attachment/licenses/slf4j-api-1.6.2.jar.sha1 deleted file mode 100644 index a2f93ea55802b..0000000000000 --- a/plugins/ingest-attachment/licenses/slf4j-api-1.6.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8619e95939167fb37245b5670135e4feb0ec7d50 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/ingest-attachment/licenses/slf4j-api-1.7.36.jar.sha1 new file mode 100644 index 0000000000000..77b9917528382 --- /dev/null +++ b/plugins/ingest-attachment/licenses/slf4j-api-1.7.36.jar.sha1 @@ -0,0 +1 @@ +6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.1.0.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.1.0.jar.sha1 deleted file mode 100644 index 85f757b61048c..0000000000000 --- a/plugins/ingest-attachment/licenses/xmlbeans-5.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3534ab896663e6f6d8a2cf46882d7407641d7a31 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.1.1.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.1.1.jar.sha1 new file mode 100644 index 0000000000000..4d1d2ad0807e7 --- /dev/null +++ b/plugins/ingest-attachment/licenses/xmlbeans-5.1.1.jar.sha1 @@ -0,0 +1 @@ +48a369df0eccb509d46203104e4df9cb00f0f68b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.79.Final.jar.sha1 deleted file mode 100644 index a1753b194ea31..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c19c46f9529791964f636c93cfaca0556f0d5d0 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.80.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..2dab7f40b02b7 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +6926d2ea779f41071ecb1948d880dfbb3a6ee126 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 deleted file mode 100644 index f2989024cfce1..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0eeffab0cd5efb699d5e4ab9b694d32fef6694b3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..d96a286b98493 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +bf7b66834188ef1a6f6095291c6b81a1880798ba \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.79.Final.jar.sha1 deleted file mode 100644 index 913f0e7685c86..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -794a5937cdb1871c4ae350610752dec2929dc1d6 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.80.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..625344e6cfb0a --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +00025b767be3425f3b31a34ee095c85619169f17 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 deleted file mode 100644 index dbb072f3f665f..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -054aace8683de7893cf28d4aab72cd60f49b5700 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.80.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..c3184ec5ff7d3 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +9b3b42ff805723fb98120f5ab2019c53e71da91b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 deleted file mode 100644 index a5d1be00d9c29..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8eb9be9b6a66a03f5f4df67fe559cb676493d167 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.80.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..bb6a3502a729f --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +6b1602f80b6235b0b7d53bc5e9c1a6cd11c1b804 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 deleted file mode 100644 index 7f984663dfa85..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -731937caec938b77b39df932a8da8aaca8d5ec05 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..998e6e8560724 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +da3d7da1a8d317ae2c82b400fd255fe610c43ebe \ No newline at end of file diff --git a/plugins/repository-azure/licenses/slf4j-api-1.6.2.jar.sha1 b/plugins/repository-azure/licenses/slf4j-api-1.6.2.jar.sha1 deleted file mode 100644 index a2f93ea55802b..0000000000000 --- a/plugins/repository-azure/licenses/slf4j-api-1.6.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8619e95939167fb37245b5670135e4feb0ec7d50 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/repository-azure/licenses/slf4j-api-1.7.36.jar.sha1 new file mode 100644 index 0000000000000..77b9917528382 --- /dev/null +++ b/plugins/repository-azure/licenses/slf4j-api-1.7.36.jar.sha1 @@ -0,0 +1 @@ +6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/commons-codec-1.13.jar.sha1 b/plugins/repository-gcs/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/plugins/repository-gcs/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/commons-codec-1.15.jar.sha1 b/plugins/repository-gcs/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/plugins/repository-gcs/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-codec-1.13.jar.sha1 b/plugins/repository-hdfs/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-codec-1.15.jar.sha1 b/plugins/repository-hdfs/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.79.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.79.Final.jar.sha1 deleted file mode 100644 index 724950db96f09..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1c53cffaa14d61de523b167377843e35807292a7 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.80.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..ae6eb1d85f1ea --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +39e73b76a3ec65df731b371179e15f2c3e4e7575 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/slf4j-api-1.6.2.jar.sha1 b/plugins/repository-hdfs/licenses/slf4j-api-1.6.2.jar.sha1 deleted file mode 100644 index a2f93ea55802b..0000000000000 --- a/plugins/repository-hdfs/licenses/slf4j-api-1.6.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8619e95939167fb37245b5670135e4feb0ec7d50 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/repository-hdfs/licenses/slf4j-api-1.7.36.jar.sha1 new file mode 100644 index 0000000000000..77b9917528382 --- /dev/null +++ b/plugins/repository-hdfs/licenses/slf4j-api-1.7.36.jar.sha1 @@ -0,0 +1 @@ +6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/commons-codec-1.13.jar.sha1 b/plugins/repository-s3/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/plugins/repository-s3/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/commons-codec-1.15.jar.sha1 b/plugins/repository-s3/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/plugins/repository-s3/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/httpcore-4.4.12.jar.sha1 b/plugins/repository-s3/licenses/httpcore-4.4.12.jar.sha1 deleted file mode 100644 index 3c046171b30da..0000000000000 --- a/plugins/repository-s3/licenses/httpcore-4.4.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/httpcore-4.4.15.jar.sha1 b/plugins/repository-s3/licenses/httpcore-4.4.15.jar.sha1 new file mode 100644 index 0000000000000..42a03b5d7a376 --- /dev/null +++ b/plugins/repository-s3/licenses/httpcore-4.4.15.jar.sha1 @@ -0,0 +1 @@ +7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index a7e8c42a4e2d3..c5b401de60c8c 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -83,6 +83,12 @@ thirdPartyAudit { 'org.bouncycastle.cert.X509v3CertificateBuilder', 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', 'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder', + 'org.bouncycastle.openssl.PEMEncryptedKeyPair', + 'org.bouncycastle.openssl.PEMParser', + 'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter', + 'org.bouncycastle.openssl.jcajce.JceOpenSSLPKCS8DecryptorProviderBuilder', + 'org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder', + 'org.bouncycastle.pkcs.PKCS8EncryptedPrivateKeyInfo', // from io.netty.handler.ssl.JettyNpnSslEngine (netty) 'org.eclipse.jetty.npn.NextProtoNego$ClientProvider', diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.79.Final.jar.sha1 deleted file mode 100644 index 8e9e4d0b7f754..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c014412b599489b1db27c6bc08d8a46da94e397 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..471fe8b211df2 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +a087321a63d9991e25f7b7d24ef53edcbcb954ff \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.79.Final.jar.sha1 deleted file mode 100644 index c0920231d79a8..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -18f5b02af7ca611978bc28f2cb58cbb3b9b0f0ef \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..0f8e3bebe1532 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +4941821a158d16311665d8606aefa610ecf0f64c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.79.Final.jar.sha1 deleted file mode 100644 index a3f650da5abbd..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -882c70bc0a30a98bf3ce477f043e967ac026044c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..d18720d164335 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +efb23f9d5187d2f733595ef7930137f0cb2cec48 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.79.Final.jar.sha1 deleted file mode 100644 index faa7b099406a3..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2814bd465731355323aba0fdd22163bfce638a75 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..d256e77b7024c --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +3d43ce22863bc590e4e33fbdabbb58dc05f4c43d \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.79.Final.jar.sha1 deleted file mode 100644 index 8e314f164da69..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2dc22423c8ed19906615fb936a5fcb7db14a4e6c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..022ad6bc93dba --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +cf7029d2f9bc4eeae8ff15af7a528d06b518a017 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.79.Final.jar.sha1 deleted file mode 100644 index af550935bb911..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -55ecb1ff4464b56564a90824a741c3911264aaa4 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..ad0f71b569377 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +3bbb0d4bfbbab867e5b757b97a6e5e0d1348d94c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.79.Final.jar.sha1 deleted file mode 100644 index c6e18efb3ad3d..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6cc2b49749b4fbcc39c687027e04e65e857552a9 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..2bfb4f377d89b --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +57fcace7a1b8567aa39921c915d1b1ba78fd4d2d \ No newline at end of file diff --git a/release-notes/opensearch.release-notes-1.3.5.md b/release-notes/opensearch.release-notes-1.3.5.md new file mode 100644 index 0000000000000..fbf866bb6e112 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.5.md @@ -0,0 +1,9 @@ +## 2022-08-30 Version 1.3.5 Release Notes + +### Upgrades +* OpenJDK Update (July 2022 Patch releases) ([#4097](https://github.com/opensearch-project/OpenSearch/pull/4097)) +* Update Netty to 4.1.79.Final ([#3868](https://github.com/opensearch-project/OpenSearch/pull/3868)) + +### Bug Fixes +* OpenSearch crashes on closed client connection before search reply when total ops higher compared to expected ([#4143](https://github.com/opensearch-project/OpenSearch/pull/4143)) +* gradle check failing with java heap OutOfMemoryError ([#4150](https://github.com/opensearch-project/OpenSearch/pull/4150)) diff --git a/release-notes/opensearch.release-notes-2.2.1.md b/release-notes/opensearch.release-notes-2.2.1.md new file mode 100644 index 0000000000000..974ff8e09a426 --- /dev/null +++ b/release-notes/opensearch.release-notes-2.2.1.md @@ -0,0 +1,7 @@ +## 2022-08-30 Version 2.2.1 Release Notes + +### Upgrades +* Update Gradle to 7.5.1 ([#4211](https://github.com/opensearch-project/OpenSearch/pull/4211)) + +### Bug Fixes +* gradle check failing with java heap OutOfMemoryError ([#4150](https://github.com/opensearch-project/OpenSearch/pull/4150)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json b/rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json new file mode 100644 index 0000000000000..d3a2104c01bc0 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json @@ -0,0 +1,44 @@ + +{ + "create_pit":{ + "documentation":{ + "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", + "description":"Creates point in time context." + }, + "stability":"stable", + "url":{ + "paths":[ + { + "path":"/{index}/_search/point_in_time", + "methods":[ + "POST" + ], + "parts":{ + "index":{ + "type":"list", + "description":"A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices" + } + } + } + ] + }, + "params":{ + "allow_partial_pit_creation":{ + "type":"boolean", + "description":"Allow if point in time can be created with partial failures" + }, + "keep_alive":{ + "type":"string", + "description":"Specify the keep alive for point in time" + }, + "preference":{ + "type":"string", + "description":"Specify the node or shard the operation should be performed on (default: random)" + }, + "routing":{ + "type":"list", + "description":"A comma-separated list of specific routing values" + } + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json new file mode 100644 index 0000000000000..5ff01aa746df9 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json @@ -0,0 +1,19 @@ +{ + "delete_all_pits":{ + "documentation":{ + "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", + "description":"Deletes all active point in time searches." + }, + "stability":"stable", + "url":{ + "paths":[ + { + "path":"/_search/point_in_time/_all", + "methods":[ + "DELETE" + ] + } + ] + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json new file mode 100644 index 0000000000000..b54d9f76204f4 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json @@ -0,0 +1,23 @@ +{ + "delete_pit":{ + "documentation":{ + "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", + "description":"Deletes one or more point in time searches based on the IDs passed." + }, + "stability":"stable", + "url":{ + "paths":[ + { + "path":"/_search/point_in_time", + "methods":[ + "DELETE" + ] + } + ] + }, + "body":{ + "description":"A comma-separated list of pit IDs to clear", + "required":true + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml new file mode 100644 index 0000000000000..2023bcc8f5c87 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml @@ -0,0 +1,130 @@ +"Create PIT, Search with PIT ID and Delete": + - skip: + version: " - 2.9.99" + reason: "mode to be introduced later than 3.0" + - do: + indices.create: + index: test_pit + - do: + index: + index: test_pit + id: 42 + body: { foo: 1 } + + - do: + index: + index: test_pit + id: 43 + body: { foo: 2 } + + - do: + indices.refresh: {} + + - do: + create_pit: + allow_partial_pit_creation: true + index: test_pit + keep_alive: 23h + + - set: {pit_id: pit_id} + - match: { _shards.failed: 0} + - do: + search: + rest_total_hits_as_int: true + size: 1 + sort: foo + body: + query: + match_all: {} + pit: {"id": "$pit_id"} + + - match: {hits.total: 2 } + - length: {hits.hits: 1 } + - match: {hits.hits.0._id: "42" } + + - do: + index: + index: test_pit + id: 44 + body: { foo: 3 } + + - do: + indices.refresh: {} + + - do: + search: + rest_total_hits_as_int: true + size: 1 + sort: foo + body: + query: + match_all: {} + pit: {"id": "$pit_id", "keep_alive":"10m"} + + - match: {hits.total: 2 } + - length: {hits.hits: 1 } + - match: {hits.hits.0._id: "42" } + + + - do: + search: + rest_total_hits_as_int: true + index: test_pit + size: 1 + sort: foo + body: + query: + match_all: {} + + - match: {hits.total: 3 } + - length: {hits.hits: 1 } + + - do: + delete_pit: + body: + "pit_id": [$pit_id] + + - match: {pits.0.pit_id: $pit_id} + - match: {pits.0.successful: true } + +--- +"Delete all": + - skip: + version: " - 2.9.99" + reason: "mode to be introduced later than 3.0" + - do: + indices.create: + index: test_pit + - do: + index: + index: test_pit + id: 42 + body: { foo: 1 } + + - do: + index: + index: test_pit + id: 43 + body: { foo: 2 } + + - do: + indices.refresh: {} + + - do: + create_pit: + allow_partial_pit_creation: true + index: test_pit + keep_alive: 23h + + - set: {pit_id: pit_id} + - match: { _shards.failed: 0} + + - do: + delete_all_pits: {} + + - match: {pits.0.pit_id: $pit_id} + - match: {pits.0.successful: true } + + - do: + catch: missing + delete_all_pits: { } diff --git a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java index ee2067c591cef..960e17b76acb5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java @@ -40,6 +40,7 @@ import org.apache.lucene.store.FSDirectory; import org.apache.lucene.util.BytesRef; +import org.hamcrest.MatcherAssert; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -48,6 +49,7 @@ import org.opensearch.action.admin.indices.shards.IndicesShardStoresResponse; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.replication.TransportReplicationAction; import org.opensearch.client.Requests; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.health.ClusterHealthStatus; @@ -108,6 +110,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.FS; import static org.opensearch.common.util.CollectionUtils.iterableAsArrayList; @@ -698,6 +701,104 @@ public void testReplicaCorruption() throws Exception { ensureGreen(TimeValue.timeValueSeconds(60)); } + public void testPrimaryCorruptionDuringReplicationDoesNotFailReplicaShard() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(2); + final NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get(); + final List dataNodeStats = nodeStats.getNodes() + .stream() + .filter(stat -> stat.getNode().isDataNode()) + .collect(Collectors.toUnmodifiableList()); + MatcherAssert.assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2)); + + final NodeStats primaryNode = dataNodeStats.get(0); + final NodeStats replicaNode = dataNodeStats.get(1); + assertAcked( + prepareCreate("test").setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put("index.routing.allocation.include._name", primaryNode.getNode().getName()) + .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE) + .put("index.allocation.max_retries", Integer.MAX_VALUE) // keep on retrying + + ) + ); + ensureGreen(); + + // Add custom send behavior between primary and replica that will + // count down a latch to indicate that a replication operation is + // currently in flight, and then block on a second latch that will + // be released once the primary shard has been corrupted. + final CountDownLatch indexingInFlight = new CountDownLatch(1); + final CountDownLatch corruptionHasHappened = new CountDownLatch(1); + final MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance( + TransportService.class, + primaryNode.getNode().getName() + )); + mockTransportService.addSendBehavior( + internalCluster().getInstance(TransportService.class, replicaNode.getNode().getName()), + (connection, requestId, action, request, options) -> { + if (request instanceof TransportReplicationAction.ConcreteShardRequest) { + indexingInFlight.countDown(); + try { + corruptionHasHappened.await(); + } catch (InterruptedException e) { + logger.info("Interrupted while waiting for corruption"); + } + } + connection.sendRequest(requestId, action, request, options); + } + ); + + // Configure the modified data node as a replica + final Settings build = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "1") + .put("index.routing.allocation.include._name", primaryNode.getNode().getName() + "," + replicaNode.getNode().getName()) + .build(); + client().admin().indices().prepareUpdateSettings("test").setSettings(build).get(); + client().admin().cluster().prepareReroute().get(); + ensureGreen(); + + // Create a snapshot repository. This repo is used to take a snapshot after + // corrupting a file, which causes the node to notice the corrupt data and + // close the shard. + assertAcked( + client().admin() + .cluster() + .preparePutRepository("test-repo") + .setType("fs") + .setSettings( + Settings.builder() + .put("location", randomRepoPath().toAbsolutePath()) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + ) + ); + + client().prepareIndex("test").setSource("field", "value").execute(); + indexingInFlight.await(); + + // Corrupt a file on the primary then take a snapshot. Snapshot should + // finish in the PARTIAL state since the corrupted file will cause a checksum + // validation failure. + final ShardRouting corruptedShardRouting = corruptRandomPrimaryFile(); + logger.info("--> {} corrupted", corruptedShardRouting); + final CreateSnapshotResponse createSnapshotResponse = client().admin() + .cluster() + .prepareCreateSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .setIndices("test") + .get(); + final SnapshotState snapshotState = createSnapshotResponse.getSnapshotInfo().state(); + MatcherAssert.assertThat("Expect file corruption to cause PARTIAL snapshot state", snapshotState, equalTo(SnapshotState.PARTIAL)); + + // Unblock the blocked indexing thread now that corruption on the primary has been confirmed + corruptionHasHappened.countDown(); + + // Assert the cluster returns to green status because the replica will be promoted to primary + ensureGreen(); + } + private int numShards(String... index) { ClusterState state = client().admin().cluster().prepareState().get().getState(); GroupShardsIterator shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(index, false); diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java index 97eeb3b3a68b0..a311e4a0d9c14 100644 --- a/server/src/main/java/org/opensearch/OpenSearchException.java +++ b/server/src/main/java/org/opensearch/OpenSearchException.java @@ -68,6 +68,7 @@ import static java.util.Collections.singletonMap; import static java.util.Collections.unmodifiableMap; import static org.opensearch.Version.V_2_1_0; +import static org.opensearch.Version.V_3_0_0; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_UUID_NA_VALUE; import static org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.opensearch.common.xcontent.XContentParserUtils.ensureFieldName; @@ -1602,13 +1603,19 @@ private enum OpenSearchExceptionHandle { 161, V_2_1_0 ), + PRIMARY_SHARD_CLOSED_EXCEPTION( + org.opensearch.index.shard.PrimaryShardClosedException.class, + org.opensearch.index.shard.PrimaryShardClosedException::new, + 162, + V_3_0_0 + ), /** * TODO: Change the version number of check as per version in which this change will be merged. */ MASTER_TASK_THROTTLED_EXCEPTION( org.opensearch.cluster.service.MasterTaskThrottlingException.class, org.opensearch.cluster.service.MasterTaskThrottlingException::new, - 162, + 163, Version.V_3_0_0 ); diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index ba512d3fbcdd9..10e5f16419a7a 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -96,6 +96,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_1_1 = new Version(2010199, org.apache.lucene.util.Version.LUCENE_9_2_0); public static final Version V_2_2_0 = new Version(2020099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_2_1 = new Version(2020199, org.apache.lucene.util.Version.LUCENE_9_3_0); + public static final Version V_2_2_2 = new Version(2020299, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_3_0 = new Version(2030099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_4_0); public static final Version CURRENT = V_3_0_0; diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 797c5c38fada6..74be544123d9f 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -240,12 +240,14 @@ import org.opensearch.action.search.DeletePitAction; import org.opensearch.action.search.GetAllPitsAction; import org.opensearch.action.search.MultiSearchAction; +import org.opensearch.action.search.NodesGetAllPitsAction; import org.opensearch.action.search.SearchAction; import org.opensearch.action.search.SearchScrollAction; import org.opensearch.action.search.TransportClearScrollAction; import org.opensearch.action.search.TransportCreatePitAction; import org.opensearch.action.search.TransportDeletePitAction; import org.opensearch.action.search.TransportGetAllPitsAction; +import org.opensearch.action.search.TransportNodesGetAllPitsAction; import org.opensearch.action.search.TransportMultiSearchAction; import org.opensearch.action.search.TransportSearchAction; import org.opensearch.action.search.TransportSearchScrollAction; @@ -408,6 +410,8 @@ import org.opensearch.rest.action.ingest.RestSimulatePipelineAction; import org.opensearch.rest.action.search.RestClearScrollAction; import org.opensearch.rest.action.search.RestCountAction; +import org.opensearch.rest.action.search.RestCreatePitAction; +import org.opensearch.rest.action.search.RestDeletePitAction; import org.opensearch.rest.action.search.RestExplainAction; import org.opensearch.rest.action.search.RestMultiSearchAction; import org.opensearch.rest.action.search.RestSearchAction; @@ -674,6 +678,7 @@ public void reg actions.register(GetAllPitsAction.INSTANCE, TransportGetAllPitsAction.class); actions.register(DeletePitAction.INSTANCE, TransportDeletePitAction.class); actions.register(PitSegmentsAction.INSTANCE, TransportPitSegmentsAction.class); + actions.register(NodesGetAllPitsAction.INSTANCE, TransportNodesGetAllPitsAction.class); // Remote Store actions.register(RestoreRemoteStoreAction.INSTANCE, TransportRestoreRemoteStoreAction.class); @@ -849,6 +854,11 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestRepositoriesAction()); registerHandler.accept(new RestSnapshotAction()); registerHandler.accept(new RestTemplatesAction()); + + // Point in time API + registerHandler.accept(new RestCreatePitAction()); + registerHandler.accept(new RestDeletePitAction()); + for (ActionPlugin plugin : actionPlugins) { for (RestHandler handler : plugin.getRestHandlers( settings, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java index ee0b204c77aa3..9a7fae9f84a98 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.get; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.clustermanager.info.ClusterInfoRequest; +import org.opensearch.action.support.master.info.ClusterInfoRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.util.ArrayUtils; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java index ed106c44ea36a..3019191e5570e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.indices.get; -import org.opensearch.action.support.clustermanager.info.ClusterInfoRequestBuilder; +import org.opensearch.action.support.master.info.ClusterInfoRequestBuilder; import org.opensearch.client.OpenSearchClient; /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java index 1fd9323edd2f8..2c9bec8398b66 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.mapping.get; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.clustermanager.info.ClusterInfoRequest; +import org.opensearch.action.support.master.info.ClusterInfoRequest; import org.opensearch.common.io.stream.StreamInput; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java index 0a6d7cac79133..85bf8c2ffd9c6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.indices.mapping.get; -import org.opensearch.action.support.clustermanager.info.ClusterInfoRequestBuilder; +import org.opensearch.action.support.master.info.ClusterInfoRequestBuilder; import org.opensearch.client.OpenSearchClient; /** diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java b/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java index 943199812771a..5a167c5a6f160 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java @@ -65,11 +65,11 @@ public void writeTo(StreamOutput out) throws IOException { static { PARSER.declareBoolean(constructorArg(), new ParseField("successful")); - PARSER.declareString(constructorArg(), new ParseField("pitId")); + PARSER.declareString(constructorArg(), new ParseField("pit_id")); } private static final ParseField SUCCESSFUL = new ParseField("successful"); - private static final ParseField PIT_ID = new ParseField("pitId"); + private static final ParseField PIT_ID = new ParseField("pit_id"); @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java b/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java index 945fcfd17eb6c..926e9c19a33f5 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java @@ -48,6 +48,11 @@ public DeletePitRequest(List pitIds) { this.pitIds.addAll(pitIds); } + public void clearAndSetPitIds(List pitIds) { + this.pitIds.clear(); + this.pitIds.addAll(pitIds); + } + public DeletePitRequest() {} public List getPitIds() { diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java index b4ad2f6641087..340f9b842adbf 100644 --- a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java @@ -21,11 +21,22 @@ */ public class GetAllPitNodesRequest extends BaseNodesRequest { + // Security plugin intercepts and sets the response with permitted PIT contexts + private GetAllPitNodesResponse getAllPitNodesResponse; + @Inject public GetAllPitNodesRequest(DiscoveryNode... concreteNodes) { super(concreteNodes); } + public void setGetAllPitNodesResponse(GetAllPitNodesResponse getAllPitNodesResponse) { + this.getAllPitNodesResponse = getAllPitNodesResponse; + } + + public GetAllPitNodesResponse getGetAllPitNodesResponse() { + return getAllPitNodesResponse; + } + public GetAllPitNodesRequest(StreamInput in) throws IOException { super(in); } diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java index 4a454e7145eff..091447798cf5f 100644 --- a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java @@ -52,6 +52,14 @@ public GetAllPitNodesResponse( ); } + /** + * Copy constructor that explicitly sets the list pit infos + */ + public GetAllPitNodesResponse(List listPitInfos, GetAllPitNodesResponse response) { + super(response.getClusterName(), response.getNodes(), response.failures()); + pitInfos.addAll(listPitInfos); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/server/src/main/java/org/opensearch/action/search/NodesGetAllPitsAction.java b/server/src/main/java/org/opensearch/action/search/NodesGetAllPitsAction.java new file mode 100644 index 0000000000000..af41f7d49551c --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/NodesGetAllPitsAction.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionType; + +/** + * Action type for retrieving all PIT reader contexts from nodes + */ +public class NodesGetAllPitsAction extends ActionType { + public static final NodesGetAllPitsAction INSTANCE = new NodesGetAllPitsAction(); + public static final String NAME = "cluster:admin/point_in_time/read_from_nodes"; + + private NodesGetAllPitsAction() { + super(NAME, GetAllPitNodesResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/PitService.java b/server/src/main/java/org/opensearch/action/search/PitService.java index 0b79b77fd6014..ff068397ad94e 100644 --- a/server/src/main/java/org/opensearch/action/search/PitService.java +++ b/server/src/main/java/org/opensearch/action/search/PitService.java @@ -15,6 +15,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Strings; @@ -47,12 +48,19 @@ public class PitService { private final ClusterService clusterService; private final SearchTransportService searchTransportService; private final TransportService transportService; + private final NodeClient nodeClient; @Inject - public PitService(ClusterService clusterService, SearchTransportService searchTransportService, TransportService transportService) { + public PitService( + ClusterService clusterService, + SearchTransportService searchTransportService, + TransportService transportService, + NodeClient nodeClient + ) { this.clusterService = clusterService; this.searchTransportService = searchTransportService; this.transportService = transportService; + this.nodeClient = nodeClient; } /** @@ -144,6 +152,17 @@ public void onFailure(final Exception e) { }, size); } + /** + * This method returns indices associated for each pit + */ + public Map getIndicesForPits(List pitIds) { + Map pitToIndicesMap = new HashMap<>(); + for (String pitId : pitIds) { + pitToIndicesMap.put(pitId, SearchContextId.decode(nodeClient.getNamedWriteableRegistry(), pitId).getActualIndices()); + } + return pitToIndicesMap; + } + /** * Get all active point in time contexts */ @@ -156,7 +175,7 @@ public void getAllPits(ActionListener getAllPitsListener DiscoveryNode[] disNodesArr = nodes.toArray(new DiscoveryNode[nodes.size()]); transportService.sendRequest( transportService.getLocalNode(), - GetAllPitsAction.NAME, + NodesGetAllPitsAction.NAME, new GetAllPitNodesRequest(disNodesArr), new TransportResponseHandler() { diff --git a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java index f9e36c479dd54..19abe2361290d 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java @@ -57,7 +57,11 @@ public TransportDeletePitAction( @Override protected void doExecute(Task task, DeletePitRequest request, ActionListener listener) { List pitIds = request.getPitIds(); - if (pitIds.size() == 1 && "_all".equals(pitIds.get(0))) { + // when security plugin intercepts the request, if PITs are not present in the cluster the PIT IDs in request will be empty + // and in this case return empty response + if (pitIds.isEmpty()) { + listener.onResponse(new DeletePitResponse(new ArrayList<>())); + } else if (pitIds.size() == 1 && "_all".equals(pitIds.get(0))) { deleteAllPits(listener); } else { deletePits(listener, request); diff --git a/server/src/main/java/org/opensearch/action/search/TransportGetAllPitsAction.java b/server/src/main/java/org/opensearch/action/search/TransportGetAllPitsAction.java index 21a64e388fa7b..c8529c5b02bd4 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportGetAllPitsAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportGetAllPitsAction.java @@ -8,79 +8,31 @@ package org.opensearch.action.search; -import org.opensearch.action.FailedNodeException; +import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.TransportNodesAction; -import org.opensearch.cluster.service.ClusterService; +import org.opensearch.action.support.HandledTransportAction; import org.opensearch.common.inject.Inject; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.search.SearchService; -import org.opensearch.threadpool.ThreadPool; +import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; -import java.io.IOException; -import java.util.List; - /** - * Transport action to get all active PIT contexts across all nodes + * Transport action to get all active PIT contexts across the cluster */ -public class TransportGetAllPitsAction extends TransportNodesAction< - GetAllPitNodesRequest, - GetAllPitNodesResponse, - GetAllPitNodeRequest, - GetAllPitNodeResponse> { - private final SearchService searchService; +public class TransportGetAllPitsAction extends HandledTransportAction { + private final PitService pitService; @Inject - public TransportGetAllPitsAction( - ThreadPool threadPool, - ClusterService clusterService, - TransportService transportService, - ActionFilters actionFilters, - SearchService searchService - ) { - super( - GetAllPitsAction.NAME, - threadPool, - clusterService, - transportService, - actionFilters, - GetAllPitNodesRequest::new, - GetAllPitNodeRequest::new, - ThreadPool.Names.SAME, - GetAllPitNodeResponse.class - ); - this.searchService = searchService; - } - - @Override - protected GetAllPitNodesResponse newResponse( - GetAllPitNodesRequest request, - List getAllPitNodeRespons, - List failures - ) { - return new GetAllPitNodesResponse(clusterService.getClusterName(), getAllPitNodeRespons, failures); - } - - @Override - protected GetAllPitNodeRequest newNodeRequest(GetAllPitNodesRequest request) { - return new GetAllPitNodeRequest(); - } - - @Override - protected GetAllPitNodeResponse newNodeResponse(StreamInput in) throws IOException { - return new GetAllPitNodeResponse(in); + public TransportGetAllPitsAction(ActionFilters actionFilters, TransportService transportService, PitService pitService) { + super(GetAllPitsAction.NAME, transportService, actionFilters, in -> new GetAllPitNodesRequest(in)); + this.pitService = pitService; } - /** - * This retrieves all active PITs in the node - */ - @Override - protected GetAllPitNodeResponse nodeOperation(GetAllPitNodeRequest request) { - GetAllPitNodeResponse nodeResponse = new GetAllPitNodeResponse( - transportService.getLocalNode(), - searchService.getAllPITReaderContexts() - ); - return nodeResponse; + protected void doExecute(Task task, GetAllPitNodesRequest request, ActionListener listener) { + // If security plugin intercepts the request, it'll replace all PIT IDs with permitted PIT IDs + if (request.getGetAllPitNodesResponse() != null) { + listener.onResponse(request.getGetAllPitNodesResponse()); + } else { + pitService.getAllPits(listener); + } } } diff --git a/server/src/main/java/org/opensearch/action/search/TransportNodesGetAllPitsAction.java b/server/src/main/java/org/opensearch/action/search/TransportNodesGetAllPitsAction.java new file mode 100644 index 0000000000000..520830cd293f0 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/TransportNodesGetAllPitsAction.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.nodes.TransportNodesAction; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.search.SearchService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.List; + +/** + * Transport action to get all active PIT contexts across all nodes + */ +public class TransportNodesGetAllPitsAction extends TransportNodesAction< + GetAllPitNodesRequest, + GetAllPitNodesResponse, + GetAllPitNodeRequest, + GetAllPitNodeResponse> { + private final SearchService searchService; + + @Inject + public TransportNodesGetAllPitsAction( + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + SearchService searchService + ) { + super( + NodesGetAllPitsAction.NAME, + threadPool, + clusterService, + transportService, + actionFilters, + GetAllPitNodesRequest::new, + GetAllPitNodeRequest::new, + ThreadPool.Names.SAME, + GetAllPitNodeResponse.class + ); + this.searchService = searchService; + } + + @Override + protected GetAllPitNodesResponse newResponse( + GetAllPitNodesRequest request, + List getAllPitNodeRespons, + List failures + ) { + return new GetAllPitNodesResponse(clusterService.getClusterName(), getAllPitNodeRespons, failures); + } + + @Override + protected GetAllPitNodeRequest newNodeRequest(GetAllPitNodesRequest request) { + return new GetAllPitNodeRequest(); + } + + @Override + protected GetAllPitNodeResponse newNodeResponse(StreamInput in) throws IOException { + return new GetAllPitNodeResponse(in); + } + + /** + * This retrieves all active PITs in the node + */ + @Override + protected GetAllPitNodeResponse nodeOperation(GetAllPitNodeRequest request) { + GetAllPitNodeResponse nodeResponse = new GetAllPitNodeResponse( + transportService.getLocalNode(), + searchService.getAllPITReaderContexts() + ); + return nodeResponse; + } +} diff --git a/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java b/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java index b305c4c8c83a7..7087b64758888 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java +++ b/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java @@ -35,6 +35,7 @@ import org.opensearch.action.support.RetryableAction; import org.opensearch.common.lease.Releasable; import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.index.shard.PrimaryShardClosedException; import org.opensearch.index.shard.IndexShardClosedException; import org.opensearch.index.shard.ReplicationGroup; import org.opensearch.index.shard.ShardId; @@ -45,6 +46,7 @@ import java.util.Map; import java.util.Set; import java.util.function.Consumer; +import java.util.function.Supplier; /** * Pending Replication Actions @@ -121,7 +123,7 @@ synchronized void acceptNewTrackedAllocationIds(Set trackedAllocationIds } } - cancelActions(toCancel, "Replica left ReplicationGroup"); + cancelActions(toCancel, () -> new IndexShardClosedException(shardId, "Replica left ReplicationGroup")); } @Override @@ -129,15 +131,11 @@ public synchronized void close() { ArrayList>> toCancel = new ArrayList<>(onGoingReplicationActions.values()); onGoingReplicationActions.clear(); - cancelActions(toCancel, "Primary closed."); + cancelActions(toCancel, () -> new PrimaryShardClosedException(shardId)); } - private void cancelActions(ArrayList>> toCancel, String message) { + private void cancelActions(ArrayList>> toCancel, Supplier exceptionSupplier) { threadPool.executor(ThreadPool.Names.GENERIC) - .execute( - () -> toCancel.stream() - .flatMap(Collection::stream) - .forEach(action -> action.cancel(new IndexShardClosedException(shardId, message))) - ); + .execute(() -> toCancel.stream().flatMap(Collection::stream).forEach(action -> action.cancel(exceptionSupplier.get()))); } } diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java index 39fb89bc48568..7fc810808f560 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java @@ -52,6 +52,7 @@ import org.opensearch.index.IndexingPressureService; import org.opensearch.index.engine.Engine; import org.opensearch.index.mapper.MapperParsingException; +import org.opensearch.index.shard.PrimaryShardClosedException; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; import org.opensearch.index.translog.Translog; @@ -514,15 +515,20 @@ public void failShardIfNeeded( if (TransportActions.isShardNotAvailableException(exception) == false) { logger.warn(new ParameterizedMessage("[{}] {}", replica.shardId(), message), exception); } - shardStateAction.remoteShardFailed( - replica.shardId(), - replica.allocationId().getId(), - primaryTerm, - true, - message, - exception, - listener - ); + // If a write action fails due to the closure of the primary shard + // then the replicas should not be marked as failed since they are + // still up-to-date with the (now closed) primary shard + if (exception instanceof PrimaryShardClosedException == false) { + shardStateAction.remoteShardFailed( + replica.shardId(), + replica.allocationId().getId(), + primaryTerm, + true, + message, + exception, + listener + ); + } } @Override diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index f8604caeab414..e52a2ba39ed52 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -70,7 +70,6 @@ import org.opensearch.index.shard.SearchOperationListener; import org.opensearch.index.similarity.SimilarityService; import org.opensearch.index.store.FsDirectoryFactory; -import org.opensearch.index.store.RemoteDirectoryFactory; import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; @@ -487,7 +486,7 @@ public IndexService newIndexService( NamedWriteableRegistry namedWriteableRegistry, BooleanSupplier idFieldDataEnabled, ValuesSourceRegistry valuesSourceRegistry, - RemoteDirectoryFactory remoteDirectoryFactory + IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory ) throws IOException { final IndexEventListener eventListener = freeze(); Function> readerWrapperFactory = indexReaderWrapper diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 67a8e691fda0d..670af1f1c6fd9 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -48,8 +48,6 @@ import org.apache.lucene.search.Sort; import org.apache.lucene.search.UsageTrackingQueryCachingPolicy; import org.apache.lucene.store.AlreadyClosedException; -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.util.SetOnce; import org.apache.lucene.util.ThreadInterruptedException; import org.opensearch.Assertions; @@ -3228,8 +3226,7 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro final List internalRefreshListener = new ArrayList<>(); internalRefreshListener.add(new RefreshMetricUpdater(refreshMetric)); if (isRemoteStoreEnabled()) { - Directory remoteDirectory = ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate(); - internalRefreshListener.add(new RemoteStoreRefreshListener(store.directory(), remoteDirectory)); + internalRefreshListener.add(new RemoteStoreRefreshListener(this)); } if (this.checkpointPublisher != null && indexSettings.isSegRepEnabled() && shardRouting.primary()) { internalRefreshListener.add(new CheckpointRefreshListener(this, this.checkpointPublisher)); diff --git a/server/src/main/java/org/opensearch/index/shard/PrimaryShardClosedException.java b/server/src/main/java/org/opensearch/index/shard/PrimaryShardClosedException.java new file mode 100644 index 0000000000000..d1b2bf9079289 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/shard/PrimaryShardClosedException.java @@ -0,0 +1,26 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.index.shard; + +import java.io.IOException; + +import org.opensearch.common.io.stream.StreamInput; + +/** + * Exception to indicate failures are caused due to the closure of the primary + * shard. + * + * @opensearch.internal + */ +public class PrimaryShardClosedException extends IndexShardClosedException { + public PrimaryShardClosedException(ShardId shardId) { + super(shardId, "Primary closed"); + } + + public PrimaryShardClosedException(StreamInput in) throws IOException { + super(in); + } +} diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index 4b549ec485c0e..0d32e8d56e4d2 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -11,32 +11,54 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.index.IndexFileNames; +import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.index.engine.EngineException; +import org.opensearch.index.store.RemoteSegmentStoreDirectory; import java.io.IOException; -import java.nio.file.NoSuchFileException; -import java.util.Arrays; -import java.util.HashSet; +import java.util.Collection; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; /** * RefreshListener implementation to upload newly created segment files to the remote store + * + * @opensearch.internal */ -public class RemoteStoreRefreshListener implements ReferenceManager.RefreshListener { +public final class RemoteStoreRefreshListener implements ReferenceManager.RefreshListener { + // Visible for testing + static final Set EXCLUDE_FILES = Set.of("write.lock"); + // Visible for testing + static final int LAST_N_METADATA_FILES_TO_KEEP = 10; + private final IndexShard indexShard; private final Directory storeDirectory; - private final Directory remoteDirectory; - // ToDo: This can be a map with metadata of the uploaded file as value of the map (GitHub #3398) - private final Set filesUploadedToRemoteStore; + private final RemoteSegmentStoreDirectory remoteDirectory; + private final Map localSegmentChecksumMap; + private long primaryTerm; private static final Logger logger = LogManager.getLogger(RemoteStoreRefreshListener.class); - public RemoteStoreRefreshListener(Directory storeDirectory, Directory remoteDirectory) throws IOException { - this.storeDirectory = storeDirectory; - this.remoteDirectory = remoteDirectory; - // ToDo: Handle failures in reading list of files (GitHub #3397) - this.filesUploadedToRemoteStore = new HashSet<>(Arrays.asList(remoteDirectory.listAll())); + public RemoteStoreRefreshListener(IndexShard indexShard) { + this.indexShard = indexShard; + this.storeDirectory = indexShard.store().directory(); + this.remoteDirectory = (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore().directory()) + .getDelegate()).getDelegate(); + this.primaryTerm = indexShard.getOperationPrimaryTerm(); + localSegmentChecksumMap = new HashMap<>(); } @Override @@ -46,42 +68,112 @@ public void beforeRefresh() throws IOException { /** * Upload new segment files created as part of the last refresh to the remote segment store. - * The method also deletes segment files from remote store which are not part of local filesystem. + * This method also uploads remote_segments_metadata file which contains metadata of each segment file uploaded. * @param didRefresh true if the refresh opened a new reference - * @throws IOException in case of I/O error in reading list of local files */ @Override - public void afterRefresh(boolean didRefresh) throws IOException { - if (didRefresh) { - Set localFiles = Set.of(storeDirectory.listAll()); - localFiles.stream().filter(file -> !filesUploadedToRemoteStore.contains(file)).forEach(file -> { - try { - remoteDirectory.copyFrom(storeDirectory, file, file, IOContext.DEFAULT); - filesUploadedToRemoteStore.add(file); - } catch (NoSuchFileException e) { - logger.info( - () -> new ParameterizedMessage("The file {} does not exist anymore. It can happen in case of temp files", file), - e - ); - } catch (IOException e) { - // ToDO: Handle transient and permanent un-availability of the remote store (GitHub #3397) - logger.warn(() -> new ParameterizedMessage("Exception while uploading file {} to the remote segment store", file), e); - } - }); + public void afterRefresh(boolean didRefresh) { + synchronized (this) { + try { + if (indexShard.shardRouting.primary()) { + if (this.primaryTerm != indexShard.getOperationPrimaryTerm()) { + this.primaryTerm = indexShard.getOperationPrimaryTerm(); + this.remoteDirectory.init(); + } + try { + String lastCommittedLocalSegmentFileName = SegmentInfos.getLastCommitSegmentsFileName(storeDirectory); + if (!remoteDirectory.containsFile( + lastCommittedLocalSegmentFileName, + getChecksumOfLocalFile(lastCommittedLocalSegmentFileName) + )) { + deleteStaleCommits(); + } + try (GatedCloseable segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { + SegmentInfos segmentInfos = segmentInfosGatedCloseable.get(); + Collection refreshedLocalFiles = segmentInfos.files(true); + + List segmentInfosFiles = refreshedLocalFiles.stream() + .filter(file -> file.startsWith(IndexFileNames.SEGMENTS)) + .collect(Collectors.toList()); + Optional latestSegmentInfos = segmentInfosFiles.stream() + .max(Comparator.comparingLong(IndexFileNames::parseGeneration)); - Set remoteFilesToBeDeleted = new HashSet<>(); - // ToDo: Instead of deleting files in sync, mark them and delete in async/periodic flow (GitHub #3142) - filesUploadedToRemoteStore.stream().filter(file -> !localFiles.contains(file)).forEach(file -> { - try { - remoteDirectory.deleteFile(file); - remoteFilesToBeDeleted.add(file); - } catch (IOException e) { - // ToDO: Handle transient and permanent un-availability of the remote store (GitHub #3397) - logger.warn(() -> new ParameterizedMessage("Exception while deleting file {} from the remote segment store", file), e); + if (latestSegmentInfos.isPresent()) { + refreshedLocalFiles.addAll(SegmentInfos.readCommit(storeDirectory, latestSegmentInfos.get()).files(true)); + segmentInfosFiles.stream() + .filter(file -> !file.equals(latestSegmentInfos.get())) + .forEach(refreshedLocalFiles::remove); + + boolean uploadStatus = uploadNewSegments(refreshedLocalFiles); + if (uploadStatus) { + remoteDirectory.uploadMetadata( + refreshedLocalFiles, + storeDirectory, + indexShard.getOperationPrimaryTerm(), + segmentInfos.getGeneration() + ); + localSegmentChecksumMap.keySet() + .stream() + .filter(file -> !refreshedLocalFiles.contains(file)) + .collect(Collectors.toSet()) + .forEach(localSegmentChecksumMap::remove); + } + } + } catch (EngineException e) { + logger.warn("Exception while reading SegmentInfosSnapshot", e); + } + } catch (IOException e) { + // We don't want to fail refresh if upload of new segments fails. The missed segments will be re-tried + // in the next refresh. This should not affect durability of the indexed data after remote trans-log integration. + logger.warn("Exception while uploading new segments to the remote segment store", e); + } } - }); + } catch (Throwable t) { + logger.error("Exception in RemoteStoreRefreshListener.afterRefresh()", t); + } + } + } + + // Visible for testing + boolean uploadNewSegments(Collection localFiles) throws IOException { + AtomicBoolean uploadSuccess = new AtomicBoolean(true); + localFiles.stream().filter(file -> !EXCLUDE_FILES.contains(file)).filter(file -> { + try { + return !remoteDirectory.containsFile(file, getChecksumOfLocalFile(file)); + } catch (IOException e) { + logger.info( + "Exception while reading checksum of local segment file: {}, ignoring the exception and re-uploading the file", + file + ); + return true; + } + }).forEach(file -> { + try { + remoteDirectory.copyFrom(storeDirectory, file, file, IOContext.DEFAULT); + } catch (IOException e) { + uploadSuccess.set(false); + // ToDO: Handle transient and permanent un-availability of the remote store (GitHub #3397) + logger.warn(() -> new ParameterizedMessage("Exception while uploading file {} to the remote segment store", file), e); + } + }); + return uploadSuccess.get(); + } + + private String getChecksumOfLocalFile(String file) throws IOException { + if (!localSegmentChecksumMap.containsKey(file)) { + try (IndexInput indexInput = storeDirectory.openInput(file, IOContext.DEFAULT)) { + String checksum = Long.toString(CodecUtil.retrieveChecksum(indexInput)); + localSegmentChecksumMap.put(file, checksum); + } + } + return localSegmentChecksumMap.get(file); + } - remoteFilesToBeDeleted.forEach(filesUploadedToRemoteStore::remove); + private void deleteStaleCommits() { + try { + remoteDirectory.deleteStaleSegments(LAST_N_METADATA_FILES_TO_KEEP); + } catch (IOException e) { + logger.info("Exception while deleting stale commits from remote segment store, will retry delete post next commit", e); } } } diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index 1190e8e6ab3d2..06916c4cc87fe 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -449,7 +449,12 @@ private void recoverFromRemoteStore(IndexShard indexShard) throws IndexShardReco } indexShard.preRecovery(); indexShard.prepareForIndexRecovery(); - final Directory remoteDirectory = remoteStore.directory(); + assert remoteStore.directory() instanceof FilterDirectory : "Store.directory is not an instance of FilterDirectory"; + FilterDirectory remoteStoreDirectory = (FilterDirectory) remoteStore.directory(); + assert remoteStoreDirectory.getDelegate() instanceof FilterDirectory + : "Store.directory is not enclosing an instance of FilterDirectory"; + FilterDirectory byteSizeCachingStoreDirectory = (FilterDirectory) remoteStoreDirectory.getDelegate(); + final Directory remoteDirectory = byteSizeCachingStoreDirectory.getDelegate(); final Store store = indexShard.store(); final Directory storeDirectory = store.directory(); store.incRef(); diff --git a/server/src/main/java/org/opensearch/index/store/RemoteIndexInput.java b/server/src/main/java/org/opensearch/index/store/RemoteIndexInput.java index 8f8d5dd5418ae..2c809563ca961 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteIndexInput.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteIndexInput.java @@ -27,27 +27,37 @@ public class RemoteIndexInput extends IndexInput { private final InputStream inputStream; private final long size; + private long filePointer; public RemoteIndexInput(String name, InputStream inputStream, long size) { super(name); this.inputStream = inputStream; this.size = size; + this.filePointer = 0; } @Override public byte readByte() throws IOException { byte[] buffer = new byte[1]; - inputStream.read(buffer); + int numberOfBytesRead = inputStream.read(buffer); + if (numberOfBytesRead != -1) { + filePointer += numberOfBytesRead; + } return buffer[0]; } @Override public void readBytes(byte[] b, int offset, int len) throws IOException { int bytesRead = inputStream.read(b, offset, len); - while (bytesRead > 0 && bytesRead < len) { - len -= bytesRead; - offset += bytesRead; - bytesRead = inputStream.read(b, offset, len); + if (bytesRead == len) { + filePointer += bytesRead; + } else { + while (bytesRead > 0 && bytesRead < len) { + filePointer += bytesRead; + len -= bytesRead; + offset += bytesRead; + bytesRead = inputStream.read(b, offset, len); + } } } @@ -61,11 +71,6 @@ public long length() { return size; } - @Override - public void seek(long pos) throws IOException { - inputStream.skip(pos); - } - /** * Guaranteed to throw an exception and leave the RemoteIndexInput unmodified. * This method is not implemented as it is not used for the file transfer to/from the remote store. @@ -73,10 +78,18 @@ public void seek(long pos) throws IOException { * @throws UnsupportedOperationException always */ @Override - public long getFilePointer() { + public void seek(long pos) throws IOException { throw new UnsupportedOperationException(); } + /** + * Returns the current position in this file in terms of number of bytes read so far. + */ + @Override + public long getFilePointer() { + return filePointer; + } + /** * Guaranteed to throw an exception and leave the RemoteIndexInput unmodified. * This method is not implemented as it is not used for the file transfer to/from the remote store. diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index d7d6b29d08bfc..505ad6fafd550 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -24,9 +24,13 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashMap; +import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; /** @@ -132,8 +136,9 @@ private Map readMetadataFile(String metadataFil /** * Metadata of a segment that is uploaded to remote segment store. */ - static class UploadedSegmentMetadata { - private static final String SEPARATOR = "::"; + public static class UploadedSegmentMetadata { + // Visible for testing + static final String SEPARATOR = "::"; private final String originalFilename; private final String uploadedFilename; private final String checksum; @@ -366,7 +371,69 @@ private String getLocalSegmentFilename(String remoteFilename) { } // Visible for testing - Map getSegmentsUploadedToRemoteStore() { - return this.segmentsUploadedToRemoteStore; + public Map getSegmentsUploadedToRemoteStore() { + return Collections.unmodifiableMap(this.segmentsUploadedToRemoteStore); + } + + /** + * Delete stale segment and metadata files + * One metadata file is kept per commit (refresh updates the same file). To read segments uploaded to remote store, + * we just need to read the latest metadata file. All the stale metadata files can be safely deleted. + * @param lastNMetadataFilesToKeep number of metadata files to keep + * @throws IOException in case of I/O error while reading from / writing to remote segment store + */ + public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException { + Collection metadataFiles = remoteMetadataDirectory.listFilesByPrefix(MetadataFilenameUtils.METADATA_PREFIX); + List sortedMetadataFileList = metadataFiles.stream().sorted(METADATA_FILENAME_COMPARATOR).collect(Collectors.toList()); + if (sortedMetadataFileList.size() <= lastNMetadataFilesToKeep) { + logger.info( + "Number of commits in remote segment store={}, lastNMetadataFilesToKeep={}", + sortedMetadataFileList.size(), + lastNMetadataFilesToKeep + ); + return; + } + List latestNMetadataFiles = sortedMetadataFileList.subList( + sortedMetadataFileList.size() - lastNMetadataFilesToKeep, + sortedMetadataFileList.size() + ); + Map activeSegmentFilesMetadataMap = new HashMap<>(); + Set activeSegmentRemoteFilenames = new HashSet<>(); + for (String metadataFile : latestNMetadataFiles) { + Map segmentMetadataMap = readMetadataFile(metadataFile); + activeSegmentFilesMetadataMap.putAll(segmentMetadataMap); + activeSegmentRemoteFilenames.addAll( + segmentMetadataMap.values().stream().map(metadata -> metadata.uploadedFilename).collect(Collectors.toSet()) + ); + } + for (String metadataFile : sortedMetadataFileList.subList(0, sortedMetadataFileList.size() - lastNMetadataFilesToKeep)) { + Map staleSegmentFilesMetadataMap = readMetadataFile(metadataFile); + Set staleSegmentRemoteFilenames = staleSegmentFilesMetadataMap.values() + .stream() + .map(metadata -> metadata.uploadedFilename) + .collect(Collectors.toSet()); + AtomicBoolean deletionSuccessful = new AtomicBoolean(true); + staleSegmentRemoteFilenames.stream().filter(file -> !activeSegmentRemoteFilenames.contains(file)).forEach(file -> { + try { + remoteDataDirectory.deleteFile(file); + if (!activeSegmentFilesMetadataMap.containsKey(getLocalSegmentFilename(file))) { + segmentsUploadedToRemoteStore.remove(getLocalSegmentFilename(file)); + } + } catch (NoSuchFileException e) { + logger.info("Segment file {} corresponding to metadata file {} does not exist in remote", file, metadataFile); + } catch (IOException e) { + deletionSuccessful.set(false); + logger.info( + "Exception while deleting segment file {} corresponding to metadata file {}. Deletion will be re-tried", + file, + metadataFile + ); + } + }); + if (deletionSuccessful.get()) { + logger.info("Deleting stale metadata file {} from remote segment store", metadataFile); + remoteMetadataDirectory.deleteFile(metadataFile); + } + } } } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java similarity index 58% rename from server/src/main/java/org/opensearch/index/store/RemoteDirectoryFactory.java rename to server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java index 62f398cdad207..e77eb52bd3891 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java @@ -27,11 +27,11 @@ * * @opensearch.internal */ -public class RemoteDirectoryFactory implements IndexStorePlugin.RemoteDirectoryFactory { +public class RemoteSegmentStoreDirectoryFactory implements IndexStorePlugin.RemoteDirectoryFactory { private final Supplier repositoriesService; - public RemoteDirectoryFactory(Supplier repositoriesService) { + public RemoteSegmentStoreDirectoryFactory(Supplier repositoriesService) { this.repositoriesService = repositoriesService; } @@ -39,13 +39,23 @@ public RemoteDirectoryFactory(Supplier repositoriesService) public Directory newDirectory(String repositoryName, IndexSettings indexSettings, ShardPath path) throws IOException { try (Repository repository = repositoriesService.get().repository(repositoryName)) { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; - BlobPath blobPath = new BlobPath(); - blobPath = blobPath.add(indexSettings.getIndex().getName()).add(String.valueOf(path.getShardId().getId())); - BlobContainer blobContainer = ((BlobStoreRepository) repository).blobStore().blobContainer(blobPath); - return new RemoteDirectory(blobContainer); + BlobPath commonBlobPath = ((BlobStoreRepository) repository).basePath(); + commonBlobPath = commonBlobPath.add(indexSettings.getIndex().getUUID()) + .add(String.valueOf(path.getShardId().getId())) + .add("segments"); + + RemoteDirectory dataDirectory = createRemoteDirectory(repository, commonBlobPath, "data"); + RemoteDirectory metadataDirectory = createRemoteDirectory(repository, commonBlobPath, "metadata"); + + return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory); } catch (RepositoryMissingException e) { throw new IllegalArgumentException("Repository should be created before creating index with remote_store enabled setting", e); } } + private RemoteDirectory createRemoteDirectory(Repository repository, BlobPath commonBlobPath, String extention) { + BlobPath extendedPath = commonBlobPath.add(extention); + BlobContainer dataBlobContainer = ((BlobStoreRepository) repository).blobStore().blobContainer(extendedPath); + return new RemoteDirectory(dataBlobContainer); + } } diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index fdb609ba7bbff..6808803ee0988 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -132,7 +132,6 @@ import org.opensearch.index.shard.IndexingOperationListener; import org.opensearch.index.shard.IndexingStats; import org.opensearch.index.shard.ShardId; -import org.opensearch.index.store.RemoteDirectoryFactory; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; @@ -266,7 +265,7 @@ public class IndicesService extends AbstractLifecycleComponent private final Set danglingIndicesToWrite = Sets.newConcurrentHashSet(); private final boolean nodeWriteDanglingIndicesInfo; private final ValuesSourceRegistry valuesSourceRegistry; - private final RemoteDirectoryFactory remoteDirectoryFactory; + private final IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory; @Override protected void doStart() { @@ -295,7 +294,7 @@ public IndicesService( Map directoryFactories, ValuesSourceRegistry valuesSourceRegistry, Map recoveryStateFactories, - RemoteDirectoryFactory remoteDirectoryFactory + IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory ) { this.settings = settings; this.threadPool = threadPool; diff --git a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java index 8884ef2cddd0a..15a9bf9e4c492 100644 --- a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java @@ -81,6 +81,7 @@ import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryListener; import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.indices.replication.SegmentReplicationSourceService; import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationState; @@ -152,6 +153,7 @@ public IndicesClusterStateService( final ThreadPool threadPool, final PeerRecoveryTargetService recoveryTargetService, final SegmentReplicationTargetService segmentReplicationTargetService, + final SegmentReplicationSourceService segmentReplicationSourceService, final ShardStateAction shardStateAction, final NodeMappingRefreshAction nodeMappingRefreshAction, final RepositoriesService repositoriesService, @@ -170,6 +172,7 @@ public IndicesClusterStateService( threadPool, checkpointPublisher, segmentReplicationTargetService, + segmentReplicationSourceService, recoveryTargetService, shardStateAction, nodeMappingRefreshAction, @@ -191,6 +194,7 @@ public IndicesClusterStateService( final ThreadPool threadPool, final SegmentReplicationCheckpointPublisher checkpointPublisher, final SegmentReplicationTargetService segmentReplicationTargetService, + final SegmentReplicationSourceService segmentReplicationSourceService, final PeerRecoveryTargetService recoveryTargetService, final ShardStateAction shardStateAction, final NodeMappingRefreshAction nodeMappingRefreshAction, @@ -211,6 +215,7 @@ public IndicesClusterStateService( // if segrep feature flag is not enabled, don't wire the target serivce as an IndexEventListener. if (FeatureFlags.isEnabled(FeatureFlags.REPLICATION_TYPE)) { indexEventListeners.add(segmentReplicationTargetService); + indexEventListeners.add(segmentReplicationSourceService); } this.builtInIndexListener = Collections.unmodifiableList(indexEventListeners); this.indicesService = indicesService; diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java index 652f3c9a55f53..7acc6b8b54fdd 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java @@ -177,51 +177,6 @@ public boolean reset(CancellableThreads newTargetCancellableThreads) throws IOEx return false; } - /** - * cancel the recovery. calling this method will clean temporary files and release the store - * unless this object is in use (in which case it will be cleaned once all ongoing users call - * {@link #decRef()} - *

- * if {@link #cancellableThreads()} was used, the threads will be interrupted. - */ - public void cancel(String reason) { - if (finished.compareAndSet(false, true)) { - try { - logger.debug("recovery canceled (reason: [{}])", reason); - cancellableThreads.cancel(reason); - } finally { - // release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now - decRef(); - } - } - } - - /** - * fail the recovery and call listener - * - * @param e exception that encapsulating the failure - * @param sendShardFailure indicates whether to notify the cluster-manager of the shard failure - */ - public void fail(RecoveryFailedException e, boolean sendShardFailure) { - super.fail(e, sendShardFailure); - } - - /** mark the current recovery as done */ - public void markAsDone() { - if (finished.compareAndSet(false, true)) { - assert multiFileWriter.tempFileNames.isEmpty() : "not all temporary files are renamed"; - try { - // this might still throw an exception ie. if the shard is CLOSED due to some other event. - // it's safer to decrement the reference in a try finally here. - indexShard.postRecovery("peer recovery done"); - } finally { - // release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now - decRef(); - } - listener.onDone(state()); - } - } - @Override protected void closeInternal() { try { @@ -246,8 +201,6 @@ protected String getPrefix() { @Override protected void onDone() { assert multiFileWriter.tempFileNames.isEmpty() : "not all temporary files are renamed"; - // this might still throw an exception ie. if the shard is CLOSED due to some other event. - // it's safer to decrement the reference in a try finally here. indexShard.postRecovery("peer recovery done"); } diff --git a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java index dfebe5f7cabf2..828aa29192fe3 100644 --- a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java +++ b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java @@ -37,7 +37,6 @@ * @opensearch.internal */ class OngoingSegmentReplications { - private final RecoverySettings recoverySettings; private final IndicesService indicesService; private final Map copyStateMap; @@ -161,6 +160,20 @@ synchronized void cancel(IndexShard shard, String reason) { cancelHandlers(handler -> handler.getCopyState().getShard().shardId().equals(shard.shardId()), reason); } + /** + * Cancel all Replication events for the given allocation ID, intended to be called when a primary is shutting down. + * + * @param allocationId {@link String} - Allocation ID. + * @param reason {@link String} - Reason for the cancel + */ + synchronized void cancel(String allocationId, String reason) { + final SegmentReplicationSourceHandler handler = allocationIdToHandlers.remove(allocationId); + if (handler != null) { + handler.cancel(reason); + removeCopyState(handler.getCopyState()); + } + } + /** * Cancel any ongoing replications for a given {@link DiscoveryNode} * @@ -168,7 +181,6 @@ synchronized void cancel(IndexShard shard, String reason) { */ void cancelReplication(DiscoveryNode node) { cancelHandlers(handler -> handler.getTargetNode().equals(node), "Node left"); - } /** @@ -243,11 +255,7 @@ private void cancelHandlers(Predicate p .map(SegmentReplicationSourceHandler::getAllocationId) .collect(Collectors.toList()); for (String allocationId : allocationIds) { - final SegmentReplicationSourceHandler handler = allocationIdToHandlers.remove(allocationId); - if (handler != null) { - handler.cancel(reason); - removeCopyState(handler.getCopyState()); - } + cancel(allocationId, reason); } } } diff --git a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java index 08dc0b97b31d5..aa0b5416dd0ff 100644 --- a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java @@ -87,4 +87,10 @@ public void getSegmentFiles( ); transportClient.executeRetryableAction(GET_SEGMENT_FILES, request, responseListener, reader); } + + @Override + public void cancel() { + transportClient.cancel(); + } + } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java index 8628a266ea7d0..b2e7487fff4b2 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java @@ -9,6 +9,7 @@ package org.opensearch.indices.replication; import org.opensearch.action.ActionListener; +import org.opensearch.common.util.CancellableThreads.ExecutionCancelledException; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; @@ -47,4 +48,9 @@ void getSegmentFiles( Store store, ActionListener listener ); + + /** + * Cancel any ongoing requests, should resolve any ongoing listeners with onFailure with a {@link ExecutionCancelledException}. + */ + default void cancel() {} } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java index 2d21653c1924c..022d90b41d8ee 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java @@ -113,6 +113,16 @@ public synchronized void sendFiles(GetSegmentFilesRequest request, ActionListene final Closeable releaseResources = () -> IOUtils.close(resources); try { timer.start(); + cancellableThreads.setOnCancel((reason, beforeCancelEx) -> { + final RuntimeException e = new CancellableThreads.ExecutionCancelledException( + "replication was canceled reason [" + reason + "]" + ); + if (beforeCancelEx != null) { + e.addSuppressed(beforeCancelEx); + } + IOUtils.closeWhileHandlingException(releaseResources, () -> future.onFailure(e)); + throw e; + }); final Consumer onFailure = e -> { assert Transports.assertNotTransportThread(SegmentReplicationSourceHandler.this + "[onFailure]"); IOUtils.closeWhileHandlingException(releaseResources, () -> future.onFailure(e)); @@ -153,6 +163,7 @@ public synchronized void sendFiles(GetSegmentFilesRequest request, ActionListene final MultiChunkTransfer transfer = segmentFileTransferHandler .createTransfer(shard.store(), storeFileMetadata, () -> 0, sendFileStep); resources.add(transfer); + cancellableThreads.checkForCancel(); transfer.start(); sendFileStep.whenComplete(r -> { diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java index 0cee731fde2cb..db3f87201b774 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java @@ -15,6 +15,7 @@ import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterStateListener; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; import org.opensearch.common.component.AbstractLifecycleComponent; @@ -42,7 +43,25 @@ * * @opensearch.internal */ -public final class SegmentReplicationSourceService extends AbstractLifecycleComponent implements ClusterStateListener, IndexEventListener { +public class SegmentReplicationSourceService extends AbstractLifecycleComponent implements ClusterStateListener, IndexEventListener { + + // Empty Implementation, only required while Segment Replication is under feature flag. + public static final SegmentReplicationSourceService NO_OP = new SegmentReplicationSourceService() { + @Override + public void clusterChanged(ClusterChangedEvent event) { + // NoOp; + } + + @Override + public void beforeIndexShardClosed(ShardId shardId, IndexShard indexShard, Settings indexSettings) { + // NoOp; + } + + @Override + public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting oldRouting, ShardRouting newRouting) { + // NoOp; + } + }; private static final Logger logger = LogManager.getLogger(SegmentReplicationSourceService.class); private final RecoverySettings recoverySettings; @@ -62,6 +81,14 @@ public static class Actions { private final OngoingSegmentReplications ongoingSegmentReplications; + // Used only for empty implementation. + private SegmentReplicationSourceService() { + recoverySettings = null; + ongoingSegmentReplications = null; + transportService = null; + indicesService = null; + } + public SegmentReplicationSourceService( IndicesService indicesService, TransportService transportService, @@ -163,10 +190,25 @@ protected void doClose() throws IOException { } + /** + * + * Cancels any replications on this node to a replica shard that is about to be closed. + */ @Override public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { if (indexShard != null) { ongoingSegmentReplications.cancel(indexShard, "shard is closed"); } } + + /** + * Cancels any replications on this node to a replica that has been promoted as primary. + */ + @Override + public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting oldRouting, ShardRouting newRouting) { + if (indexShard != null && oldRouting.primary() == false && newRouting.primary()) { + ongoingSegmentReplications.cancel(indexShard.routingEntry().allocationId().getId(), "Relocating primary shard."); + } + } + } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java index f865ba1332186..2e2e6df007c5c 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java @@ -35,7 +35,8 @@ public enum Stage { GET_CHECKPOINT_INFO((byte) 3), FILE_DIFF((byte) 4), GET_FILES((byte) 5), - FINALIZE_REPLICATION((byte) 6); + FINALIZE_REPLICATION((byte) 6), + CANCELLED((byte) 7); private static final Stage[] STAGES = new Stage[Stage.values().length]; @@ -118,6 +119,10 @@ protected void validateAndSetStage(Stage expected, Stage next) { "can't move replication to stage [" + next + "]. current stage: [" + stage + "] (expected [" + expected + "])" ); } + stopTimersAndSetStage(next); + } + + private void stopTimersAndSetStage(Stage next) { // save the timing data for the current step stageTimer.stop(); timingData.add(new Tuple<>(stage.name(), stageTimer.time())); @@ -155,6 +160,14 @@ public void setStage(Stage stage) { overallTimer.stop(); timingData.add(new Tuple<>("OVERALL", overallTimer.time())); break; + case CANCELLED: + if (this.stage == Stage.DONE) { + throw new IllegalStateException("can't move replication to Cancelled state from Done."); + } + stopTimersAndSetStage(Stage.CANCELLED); + overallTimer.stop(); + timingData.add(new Tuple<>("OVERALL", overallTimer.time())); + break; default: throw new IllegalArgumentException("unknown SegmentReplicationState.Stage [" + stage + "]"); } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index a658ffc09d590..d1d6104a416ca 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -17,6 +17,7 @@ import org.apache.lucene.store.ByteBuffersDataInput; import org.apache.lucene.store.ByteBuffersIndexInput; import org.apache.lucene.store.ChecksumIndexInput; +import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; @@ -103,7 +104,15 @@ public String description() { @Override public void notifyListener(OpenSearchException e, boolean sendShardFailure) { - listener.onFailure(state(), e, sendShardFailure); + // Cancellations still are passed to our SegmentReplicationListner as failures, if we have failed because of cancellation + // update the stage. + final Throwable cancelledException = ExceptionsHelper.unwrap(e, CancellableThreads.ExecutionCancelledException.class); + if (cancelledException != null) { + state.setStage(SegmentReplicationState.Stage.CANCELLED); + listener.onFailure(state(), (CancellableThreads.ExecutionCancelledException) cancelledException, sendShardFailure); + } else { + listener.onFailure(state(), e, sendShardFailure); + } } @Override @@ -134,6 +143,14 @@ public void writeFileChunk( * @param listener {@link ActionListener} listener. */ public void startReplication(ActionListener listener) { + cancellableThreads.setOnCancel((reason, beforeCancelEx) -> { + // This method only executes when cancellation is triggered by this node and caught by a call to checkForCancel, + // SegmentReplicationSource does not share CancellableThreads. + final CancellableThreads.ExecutionCancelledException executionCancelledException = + new CancellableThreads.ExecutionCancelledException("replication was canceled reason [" + reason + "]"); + notifyListener(executionCancelledException, false); + throw executionCancelledException; + }); state.setStage(SegmentReplicationState.Stage.REPLICATING); final StepListener checkpointInfoListener = new StepListener<>(); final StepListener getFilesListener = new StepListener<>(); @@ -141,6 +158,7 @@ public void startReplication(ActionListener listener) { logger.trace("[shardId {}] Replica starting replication [id {}]", shardId().getId(), getId()); // Get list of files to copy from this checkpoint. + cancellableThreads.checkForCancel(); state.setStage(SegmentReplicationState.Stage.GET_CHECKPOINT_INFO); source.getCheckpointMetadata(getId(), checkpoint, checkpointInfoListener); @@ -154,6 +172,7 @@ public void startReplication(ActionListener listener) { private void getFiles(CheckpointInfoResponse checkpointInfo, StepListener getFilesListener) throws IOException { + cancellableThreads.checkForCancel(); state.setStage(SegmentReplicationState.Stage.FILE_DIFF); final Store.MetadataSnapshot snapshot = checkpointInfo.getSnapshot(); Store.MetadataSnapshot localMetadata = getMetadataSnapshot(); @@ -188,12 +207,14 @@ private void getFiles(CheckpointInfoResponse checkpointInfo, StepListener listener) { - state.setStage(SegmentReplicationState.Stage.FINALIZE_REPLICATION); ActionListener.completeWith(listener, () -> { + cancellableThreads.checkForCancel(); + state.setStage(SegmentReplicationState.Stage.FINALIZE_REPLICATION); multiFileWriter.renameAllTempFiles(); final Store store = store(); store.incRef(); @@ -261,4 +282,10 @@ Store.MetadataSnapshot getMetadataSnapshot() throws IOException { } return store.getMetadata(indexShard.getSegmentInfosSnapshot().get()); } + + @Override + protected void onCancel(String reason) { + cancellableThreads.cancel(reason); + source.cancel(); + } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index a79ce195ad83b..9e6b66dc4d7d6 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -11,10 +11,13 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; +import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Nullable; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.CancellableThreads; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; @@ -64,6 +67,11 @@ public void beforeIndexShardClosed(ShardId shardId, IndexShard indexShard, Setti public synchronized void onNewCheckpoint(ReplicationCheckpoint receivedCheckpoint, IndexShard replicaShard) { // noOp; } + + @Override + public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting oldRouting, ShardRouting newRouting) { + // noOp; + } }; // Used only for empty implementation. @@ -74,6 +82,10 @@ private SegmentReplicationTargetService() { sourceFactory = null; } + public ReplicationRef get(long replicationId) { + return onGoingReplications.get(replicationId); + } + /** * The internal actions * @@ -102,6 +114,9 @@ public SegmentReplicationTargetService( ); } + /** + * Cancel any replications on this node for a replica that is about to be closed. + */ @Override public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { if (indexShard != null) { @@ -109,11 +124,22 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh } } + /** + * Cancel any replications on this node for a replica that has just been promoted as the new primary. + */ + @Override + public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting oldRouting, ShardRouting newRouting) { + if (oldRouting != null && oldRouting.primary() == false && newRouting.primary()) { + onGoingReplications.cancelForShard(indexShard.shardId(), "shard has been promoted to primary"); + } + } + /** * Invoked when a new checkpoint is received from a primary shard. * It checks if a new checkpoint should be processed or not and starts replication if needed. - * @param receivedCheckpoint received checkpoint that is checked for processing - * @param replicaShard replica shard on which checkpoint is received + * + * @param receivedCheckpoint received checkpoint that is checked for processing + * @param replicaShard replica shard on which checkpoint is received */ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedCheckpoint, final IndexShard replicaShard) { logger.trace(() -> new ParameterizedMessage("Replica received new replication checkpoint from primary [{}]", receivedCheckpoint)); @@ -180,12 +206,19 @@ public void onReplicationFailure(SegmentReplicationState state, OpenSearchExcept } } - public void startReplication( + public SegmentReplicationTarget startReplication( final ReplicationCheckpoint checkpoint, final IndexShard indexShard, final SegmentReplicationListener listener ) { - startReplication(new SegmentReplicationTarget(checkpoint, indexShard, sourceFactory.get(indexShard), listener)); + final SegmentReplicationTarget target = new SegmentReplicationTarget( + checkpoint, + indexShard, + sourceFactory.get(indexShard), + listener + ); + startReplication(target); + return target; } // pkg-private for integration tests @@ -248,7 +281,17 @@ public void onResponse(Void o) { @Override public void onFailure(Exception e) { - onGoingReplications.fail(replicationId, new OpenSearchException("Segment Replication failed", e), true); + Throwable cause = ExceptionsHelper.unwrapCause(e); + if (cause instanceof CancellableThreads.ExecutionCancelledException) { + if (onGoingReplications.getTarget(replicationId) != null) { + // if the target still exists in our collection, the primary initiated the cancellation, fail the replication + // but do not fail the shard. Cancellations initiated by this node from Index events will be removed with + // onGoingReplications.cancel and not appear in the collection when this listener resolves. + onGoingReplications.fail(replicationId, (CancellableThreads.ExecutionCancelledException) cause, false); + } + } else { + onGoingReplications.fail(replicationId, new OpenSearchException("Segment Replication failed", e), true); + } } }); } diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java index 501ff46eeb2ff..42f4572fef3e4 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java @@ -155,7 +155,7 @@ public void markAsDone() { public void cancel(String reason) { if (finished.compareAndSet(false, true)) { try { - logger.debug("replication cancelled (reason: [{}])", reason); + logger.debug("replication/recovery cancelled (reason: [{}])", reason); onCancel(reason); } finally { // release the initial reference. replication files will be cleaned as soon as ref count goes to zero, potentially now diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index d3f0912cab638..92e9815313fa0 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -39,12 +39,12 @@ import org.opensearch.common.util.FeatureFlags; import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; import org.opensearch.index.IndexingPressureService; -import org.opensearch.index.store.RemoteDirectoryFactory; import org.opensearch.indices.replication.SegmentReplicationSourceFactory; import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.SegmentReplicationSourceService; import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.threadpool.RunnableTaskExecutionListener; +import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.watcher.ResourceWatcherService; import org.opensearch.Assertions; import org.opensearch.Build; @@ -629,7 +629,9 @@ protected Node( rerouteServiceReference.set(rerouteService); clusterService.setRerouteService(rerouteService); - final RemoteDirectoryFactory remoteDirectoryFactory = new RemoteDirectoryFactory(repositoriesServiceReference::get); + final IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory = new RemoteSegmentStoreDirectoryFactory( + repositoriesServiceReference::get + ); final IndicesService indicesService = new IndicesService( settings, @@ -967,6 +969,7 @@ protected Node( .toInstance(new SegmentReplicationSourceService(indicesService, transportService, recoverySettings)); } else { b.bind(SegmentReplicationTargetService.class).toInstance(SegmentReplicationTargetService.NO_OP); + b.bind(SegmentReplicationSourceService.class).toInstance(SegmentReplicationSourceService.NO_OP); } } b.bind(HttpServerTransport.class).toInstance(httpServerTransport); @@ -1110,6 +1113,9 @@ public Node start() throws NodeValidationException { assert transportService.getLocalNode().equals(localNodeFactory.getNode()) : "transportService has a different local node than the factory provided"; injector.getInstance(PeerRecoverySourceService.class).start(); + if (FeatureFlags.isEnabled(REPLICATION_TYPE)) { + injector.getInstance(SegmentReplicationSourceService.class).start(); + } // Load (and maybe upgrade) the metadata stored on disk final GatewayMetaState gatewayMetaState = injector.getInstance(GatewayMetaState.class); @@ -1285,6 +1291,9 @@ public synchronized void close() throws IOException { // close filter/fielddata caches after indices toClose.add(injector.getInstance(IndicesStore.class)); toClose.add(injector.getInstance(PeerRecoverySourceService.class)); + if (FeatureFlags.isEnabled(REPLICATION_TYPE)) { + toClose.add(injector.getInstance(SegmentReplicationSourceService.class)); + } toClose.add(() -> stopWatch.stop().start("cluster")); toClose.add(injector.getInstance(ClusterService.class)); toClose.add(() -> stopWatch.stop().start("node_connections_service")); diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestCreatePitAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestCreatePitAction.java new file mode 100644 index 0000000000000..9439670880015 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/search/RestCreatePitAction.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.search; + +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.Strings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestStatusToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static java.util.Arrays.asList; +import static java.util.Collections.unmodifiableList; +import static org.opensearch.rest.RestRequest.Method.POST; + +/** + * Rest action for creating PIT context + */ +public class RestCreatePitAction extends BaseRestHandler { + public static String ALLOW_PARTIAL_PIT_CREATION = "allow_partial_pit_creation"; + public static String KEEP_ALIVE = "keep_alive"; + + @Override + public String getName() { + return "create_pit_action"; + } + + @Override + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + boolean allowPartialPitCreation = request.paramAsBoolean(ALLOW_PARTIAL_PIT_CREATION, true); + String[] indices = Strings.splitStringByCommaToArray(request.param("index")); + TimeValue keepAlive = request.paramAsTime(KEEP_ALIVE, null); + CreatePitRequest createPitRequest = new CreatePitRequest(keepAlive, allowPartialPitCreation, indices); + createPitRequest.setIndicesOptions(IndicesOptions.fromRequest(request, createPitRequest.indicesOptions())); + createPitRequest.setPreference(request.param("preference")); + createPitRequest.setRouting(request.param("routing")); + + return channel -> client.createPit(createPitRequest, new RestStatusToXContentListener<>(channel)); + } + + @Override + public List routes() { + return unmodifiableList(asList(new Route(POST, "/{index}/_search/point_in_time"))); + } + +} diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestDeletePitAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestDeletePitAction.java new file mode 100644 index 0000000000000..452e66f8f5018 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/search/RestDeletePitAction.java @@ -0,0 +1,60 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.search; + +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestStatusToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static java.util.Arrays.asList; +import static java.util.Collections.unmodifiableList; +import static org.opensearch.rest.RestRequest.Method.DELETE; + +/** + * Rest action for deleting PIT contexts + */ +public class RestDeletePitAction extends BaseRestHandler { + + @Override + public String getName() { + return "delete_pit_action"; + } + + @Override + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + String allPitIdsQualifier = "_all"; + final DeletePitRequest deletePITRequest; + if (request.path().contains(allPitIdsQualifier)) { + deletePITRequest = new DeletePitRequest(asList(allPitIdsQualifier)); + } else { + deletePITRequest = new DeletePitRequest(); + request.withContentOrSourceParamParserOrNull((xContentParser -> { + if (xContentParser != null) { + try { + deletePITRequest.fromXContent(xContentParser); + } catch (IOException e) { + throw new IllegalArgumentException("Failed to parse request body", e); + } + } + })); + } + return channel -> client.deletePits(deletePITRequest, new RestStatusToXContentListener(channel)); + } + + @Override + public List routes() { + return unmodifiableList(asList(new Route(DELETE, "/_search/point_in_time"), new Route(DELETE, "/_search/point_in_time/_all"))); + } +} diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index 4bd95da193668..04fab85c163a9 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -881,6 +881,7 @@ public void createPitReaderContext(ShardId shardId, TimeValue keepAlive, ActionL shard.awaitShardSearchActive(ignored -> { Engine.SearcherSupplier searcherSupplier = null; ReaderContext readerContext = null; + Releasable decreasePitContexts = openPitContexts::decrementAndGet; try { if (openPitContexts.incrementAndGet() > maxOpenPitContext) { throw new OpenSearchRejectedExecutionException( @@ -902,15 +903,16 @@ public void createPitReaderContext(ShardId shardId, TimeValue keepAlive, ActionL searchOperationListener.onNewPitContext(finalReaderContext); readerContext.addOnClose(() -> { - openPitContexts.decrementAndGet(); searchOperationListener.onFreeReaderContext(finalReaderContext); searchOperationListener.onFreePitContext(finalReaderContext); }); + readerContext.addOnClose(decreasePitContexts); // add the newly created pit reader context to active readers putReaderContext(readerContext); readerContext = null; listener.onResponse(finalReaderContext.id()); } catch (Exception exc) { + Releasables.closeWhileHandlingException(decreasePitContexts); Releasables.closeWhileHandlingException(searcherSupplier, readerContext); listener.onFailure(exc); } diff --git a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java index d4d59a527ed2c..8660b1d9e202d 100644 --- a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java @@ -81,6 +81,7 @@ import org.opensearch.index.seqno.RetentionLeaseNotFoundException; import org.opensearch.index.shard.IllegalIndexShardStateException; import org.opensearch.index.shard.IndexShardState; +import org.opensearch.index.shard.PrimaryShardClosedException; import org.opensearch.index.shard.ShardId; import org.opensearch.index.shard.ShardNotInPrimaryModeException; import org.opensearch.indices.IndexTemplateMissingException; @@ -859,7 +860,8 @@ public void testIds() { ids.put(159, NodeHealthCheckFailureException.class); ids.put(160, NoSeedNodeLeftException.class); ids.put(161, ReplicationFailedException.class); - ids.put(162, MasterTaskThrottlingException.class); + ids.put(162, PrimaryShardClosedException.class); + ids.put(163, MasterTaskThrottlingException.class); Map, Integer> reverse = new HashMap<>(); for (Map.Entry> entry : ids.entrySet()) { diff --git a/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexRequestTests.java new file mode 100644 index 0000000000000..f0d3db71c27b7 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexRequestTests.java @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.get; + +import org.opensearch.action.support.master.info.ClusterInfoRequest; +import org.opensearch.test.OpenSearchTestCase; + +import static org.hamcrest.Matchers.is; + +public class GetIndexRequestTests extends OpenSearchTestCase { + public void testGetIndexRequestExtendsClusterInfoRequestOfDeprecatedClassPath() { + GetIndexRequest getIndexRequest = new GetIndexRequest().indices("test"); + assertThat(getIndexRequest instanceof ClusterInfoRequest, is(true)); + } +} diff --git a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java index a5c6e1c12b79c..c03c27f7d7e4d 100644 --- a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java +++ b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java @@ -14,6 +14,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; import org.opensearch.action.StepListener; +import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -70,6 +71,8 @@ public class CreatePitControllerTests extends OpenSearchTestCase { ClusterService clusterServiceMock = null; private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + Settings settings = Settings.builder().put("node.name", CreatePitControllerTests.class.getSimpleName()).build(); + NodeClient client = new NodeClient(settings, threadPool); @Override public void tearDown() throws Exception { @@ -219,7 +222,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, @@ -308,7 +311,7 @@ public void sendFreePITContexts( CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, @@ -406,7 +409,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, @@ -494,7 +497,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod }; CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, diff --git a/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java index 7a1d9a6fe963c..bdc0440a89f69 100644 --- a/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java @@ -13,6 +13,7 @@ import org.opensearch.action.support.ActionFilter; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; @@ -62,6 +63,7 @@ public class TransportDeletePitActionTests extends OpenSearchTestCase { ClusterService clusterServiceMock = null; Settings settings = Settings.builder().put("node.name", TransportMultiSearchActionTests.class.getSimpleName()).build(); private ThreadPool threadPool = new ThreadPool(settings); + NodeClient client = new NodeClient(settings, threadPool); @Override public void tearDown() throws Exception { @@ -165,7 +167,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -229,7 +231,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService) { + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client) { @Override public void getAllPits(ActionListener getAllPitsListener) { ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); @@ -312,7 +314,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -371,7 +373,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -439,7 +441,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -505,7 +507,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService) { + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client) { @Override public void getAllPits(ActionListener getAllPitsListener) { ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); @@ -581,7 +583,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService) { + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client) { @Override public void getAllPits(ActionListener getAllPitsListener) { ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); @@ -661,7 +663,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService) { + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client) { @Override public void getAllPits(ActionListener getAllPitsListener) { ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); diff --git a/server/src/test/java/org/opensearch/action/support/replication/PendingReplicationActionsTests.java b/server/src/test/java/org/opensearch/action/support/replication/PendingReplicationActionsTests.java index ec0cefed842cd..66d3b843529ab 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/PendingReplicationActionsTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/PendingReplicationActionsTests.java @@ -38,6 +38,7 @@ import org.opensearch.common.UUIDs; import org.opensearch.common.unit.TimeValue; import org.opensearch.index.shard.IndexShardClosedException; +import org.opensearch.index.shard.PrimaryShardClosedException; import org.opensearch.index.shard.ShardId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; @@ -102,7 +103,7 @@ public void testAllocationIdActionWillBeCancelledOnClose() { pendingReplication.addPendingAction(allocationId, action); action.run(); pendingReplication.close(); - expectThrows(IndexShardClosedException.class, future::actionGet); + expectThrows(PrimaryShardClosedException.class, future::actionGet); } private class TestAction extends RetryableAction { diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java index 4da32a890fd0e..137aca4966936 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java @@ -32,6 +32,7 @@ package org.opensearch.action.support.replication; +import org.hamcrest.MatcherAssert; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; @@ -57,6 +58,7 @@ import org.opensearch.index.IndexService; import org.opensearch.index.IndexingPressureService; import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.PrimaryShardClosedException; import org.opensearch.index.shard.ShardId; import org.opensearch.index.shard.ShardNotFoundException; import org.opensearch.index.translog.Translog; @@ -91,6 +93,7 @@ import java.util.stream.Collectors; import static java.util.Collections.emptyMap; +import static org.hamcrest.Matchers.emptyArray; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.equalTo; @@ -395,6 +398,48 @@ public void testReplicaProxy() throws InterruptedException, ExecutionException { } } + public void testPrimaryClosedDoesNotFailShard() { + final CapturingTransport transport = new CapturingTransport(); + final TransportService transportService = transport.createTransportService( + clusterService.getSettings(), + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> clusterService.localNode(), + null, + Collections.emptySet() + ); + transportService.start(); + transportService.acceptIncomingRequests(); + final ShardStateAction shardStateAction = new ShardStateAction(clusterService, transportService, null, null, threadPool); + final TestAction action = new TestAction( + Settings.EMPTY, + "internal:testAction", + transportService, + clusterService, + shardStateAction, + threadPool + ); + final String index = "test"; + final ShardId shardId = new ShardId(index, "_na_", 0); + final ClusterState state = ClusterStateCreationUtils.stateWithActivePrimary(index, true, 1, 0); + ClusterServiceUtils.setState(clusterService, state); + final long primaryTerm = state.metadata().index(index).primaryTerm(0); + final ShardRouting shardRouting = state.routingTable().shardRoutingTable(shardId).replicaShards().get(0); + + // Assert that failShardIfNeeded is a no-op for the PrimaryShardClosedException failure + final AtomicInteger callbackCount = new AtomicInteger(0); + action.newReplicasProxy() + .failShardIfNeeded( + shardRouting, + primaryTerm, + "test", + new PrimaryShardClosedException(shardId), + ActionListener.wrap(callbackCount::incrementAndGet) + ); + MatcherAssert.assertThat(transport.getCapturedRequestsAndClear(), emptyArray()); + MatcherAssert.assertThat(callbackCount.get(), equalTo(0)); + } + private class TestAction extends TransportWriteAction { private final boolean withDocumentFailureOnPrimary; diff --git a/server/src/test/java/org/opensearch/index/IndexModuleTests.java b/server/src/test/java/org/opensearch/index/IndexModuleTests.java index 45d93a5a12847..6bfdd9ae16773 100644 --- a/server/src/test/java/org/opensearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/opensearch/index/IndexModuleTests.java @@ -89,7 +89,7 @@ import org.opensearch.index.similarity.NonNegativeScoresSimilarity; import org.opensearch.index.similarity.SimilarityService; import org.opensearch.index.store.FsDirectoryFactory; -import org.opensearch.index.store.RemoteDirectoryFactory; +import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.indices.IndicesModule; import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.analysis.AnalysisModule; @@ -234,7 +234,7 @@ private IndexService newIndexService(IndexModule module) throws IOException { writableRegistry(), () -> false, null, - new RemoteDirectoryFactory(() -> repositoriesService) + new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService) ); } diff --git a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java index 1fe1a37dedae0..0008afcc901c7 100644 --- a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java @@ -112,10 +112,14 @@ public void testUpdateSegments() throws Exception { final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore) ) { // add docs to the primary engine. - List operations = generateHistoryOnReplica(between(1, 500), randomBoolean(), randomBoolean(), randomBoolean()) - .stream() - .filter(op -> op.operationType().equals(Engine.Operation.TYPE.INDEX)) - .collect(Collectors.toList()); + List operations = generateHistoryOnReplica( + between(1, 500), + randomBoolean(), + randomBoolean(), + randomBoolean(), + Engine.Operation.TYPE.INDEX + ); + for (Engine.Operation op : operations) { applyOperation(engine, op); applyOperation(nrtEngine, op); diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 8c00ab97a46ea..662afa80f65fc 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -2689,8 +2689,9 @@ public void testRestoreShardFromRemoteStore() throws IOException { storeDirectory.deleteFile(file); } + assertEquals(0, storeDirectory.listAll().length); + Directory remoteDirectory = ((FilterDirectory) ((FilterDirectory) target.remoteStore().directory()).getDelegate()).getDelegate(); - ((BaseDirectoryWrapper) remoteDirectory).setCheckIndexOnClose(false); // extra0 file is added as a part of https://lucene.apache.org/core/7_2_1/test-framework/org/apache/lucene/mockfile/ExtrasFS.html // Safe to remove without impacting the test diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java index af92d821a9043..6b05d67836272 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -8,132 +8,209 @@ package org.opensearch.index.shard; +import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.Directory; -import org.apache.lucene.store.IOContext; -import org.opensearch.test.OpenSearchTestCase; +import org.apache.lucene.store.FilterDirectory; +import org.apache.lucene.tests.store.BaseDirectoryWrapper; +import org.junit.After; +import org.opensearch.action.ActionListener; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.engine.InternalEngineFactory; +import org.opensearch.index.store.RemoteSegmentStoreDirectory; +import org.opensearch.index.store.Store; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; -import java.nio.file.NoSuchFileException; +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.CountDownLatch; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.doThrow; +public class RemoteStoreRefreshListenerTests extends IndexShardTestCase { + private IndexShard indexShard; + private RemoteStoreRefreshListener remoteStoreRefreshListener; -public class RemoteStoreRefreshListenerTests extends OpenSearchTestCase { - private Directory storeDirectory; - private Directory remoteDirectory; + public void setup(boolean primary, int numberOfDocs) throws IOException { + indexShard = newStartedShard( + primary, + Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true).build(), + new InternalEngineFactory() + ); - private RemoteStoreRefreshListener remoteStoreRefreshListener; + indexDocs(1, numberOfDocs); + indexShard.refresh("test"); - public void setup(String[] remoteFiles) throws IOException { - storeDirectory = mock(Directory.class); - remoteDirectory = mock(Directory.class); - when(remoteDirectory.listAll()).thenReturn(remoteFiles); - remoteStoreRefreshListener = new RemoteStoreRefreshListener(storeDirectory, remoteDirectory); + remoteStoreRefreshListener = new RemoteStoreRefreshListener(indexShard); } - public void testAfterRefreshFalse() throws IOException { - setup(new String[0]); - remoteStoreRefreshListener.afterRefresh(false); - verify(storeDirectory, times(0)).listAll(); + private void indexDocs(int startDocId, int numberOfDocs) throws IOException { + for (int i = startDocId; i < startDocId + numberOfDocs; i++) { + indexDoc(indexShard, "_doc", Integer.toString(i)); + } } - public void testAfterRefreshTrueNoLocalFiles() throws IOException { - setup(new String[0]); + @After + public void tearDown() throws Exception { + Directory storeDirectory = ((FilterDirectory) ((FilterDirectory) indexShard.store().directory()).getDelegate()).getDelegate(); + ((BaseDirectoryWrapper) storeDirectory).setCheckIndexOnClose(false); + closeShards(indexShard); + super.tearDown(); + } - when(storeDirectory.listAll()).thenReturn(new String[0]); + public void testAfterRefresh() throws IOException { + setup(true, 3); + assertDocs(indexShard, "1", "2", "3"); - remoteStoreRefreshListener.afterRefresh(true); - verify(storeDirectory).listAll(); - verify(remoteDirectory, times(0)).copyFrom(any(), any(), any(), any()); - verify(remoteDirectory, times(0)).deleteFile(any()); - } + try (Store remoteStore = indexShard.remoteStore()) { + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = + (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate(); - public void testAfterRefreshOnlyUploadFiles() throws IOException { - setup(new String[0]); + verifyUploadedSegments(remoteSegmentStoreDirectory); - String[] localFiles = new String[] { "segments_1", "0.si", "0.cfs", "0.cfe" }; - when(storeDirectory.listAll()).thenReturn(localFiles); + // This is to check if reading data from remote segment store works as well. + remoteSegmentStoreDirectory.init(); - remoteStoreRefreshListener.afterRefresh(true); - verify(storeDirectory).listAll(); - verify(remoteDirectory).copyFrom(storeDirectory, "segments_1", "segments_1", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "0.si", "0.si", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "0.cfs", "0.cfs", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "0.cfe", "0.cfe", IOContext.DEFAULT); - verify(remoteDirectory, times(0)).deleteFile(any()); + verifyUploadedSegments(remoteSegmentStoreDirectory); + } } - public void testAfterRefreshOnlyUploadAndDelete() throws IOException { - setup(new String[] { "0.si", "0.cfs" }); + public void testAfterCommit() throws IOException { + setup(true, 3); + assertDocs(indexShard, "1", "2", "3"); + flushShard(indexShard); - String[] localFiles = new String[] { "segments_1", "1.si", "1.cfs", "1.cfe" }; - when(storeDirectory.listAll()).thenReturn(localFiles); + try (Store remoteStore = indexShard.remoteStore()) { + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = + (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate(); - remoteStoreRefreshListener.afterRefresh(true); - verify(storeDirectory).listAll(); - verify(remoteDirectory).copyFrom(storeDirectory, "segments_1", "segments_1", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "1.si", "1.si", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "1.cfs", "1.cfs", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "1.cfe", "1.cfe", IOContext.DEFAULT); - verify(remoteDirectory).deleteFile("0.si"); - verify(remoteDirectory).deleteFile("0.cfs"); + verifyUploadedSegments(remoteSegmentStoreDirectory); + + // This is to check if reading data from remote segment store works as well. + remoteSegmentStoreDirectory.init(); + + verifyUploadedSegments(remoteSegmentStoreDirectory); + } } - public void testAfterRefreshOnlyDelete() throws IOException { - setup(new String[] { "0.si", "0.cfs" }); + public void testRefreshAfterCommit() throws IOException { + setup(true, 3); + assertDocs(indexShard, "1", "2", "3"); + flushShard(indexShard); - String[] localFiles = new String[] { "0.si" }; - when(storeDirectory.listAll()).thenReturn(localFiles); + indexDocs(4, 4); + indexShard.refresh("test"); - remoteStoreRefreshListener.afterRefresh(true); - verify(storeDirectory).listAll(); - verify(remoteDirectory, times(0)).copyFrom(any(), any(), any(), any()); - verify(remoteDirectory).deleteFile("0.cfs"); - } + indexDocs(8, 4); + indexShard.refresh("test"); - public void testAfterRefreshTempLocalFile() throws IOException { - setup(new String[0]); + try (Store remoteStore = indexShard.remoteStore()) { + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = + (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate(); - String[] localFiles = new String[] { "segments_1", "0.si", "0.cfs.tmp" }; - when(storeDirectory.listAll()).thenReturn(localFiles); - doThrow(new NoSuchFileException("0.cfs.tmp")).when(remoteDirectory) - .copyFrom(storeDirectory, "0.cfs.tmp", "0.cfs.tmp", IOContext.DEFAULT); + verifyUploadedSegments(remoteSegmentStoreDirectory); - remoteStoreRefreshListener.afterRefresh(true); - verify(storeDirectory).listAll(); - verify(remoteDirectory).copyFrom(storeDirectory, "segments_1", "segments_1", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "0.si", "0.si", IOContext.DEFAULT); - verify(remoteDirectory, times(0)).deleteFile(any()); + // This is to check if reading data from remote segment store works as well. + remoteSegmentStoreDirectory.init(); + + verifyUploadedSegments(remoteSegmentStoreDirectory); + } } - public void testAfterRefreshConsecutive() throws IOException { - setup(new String[0]); + public void testAfterMultipleCommits() throws IOException { + setup(true, 3); + assertDocs(indexShard, "1", "2", "3"); - String[] localFiles = new String[] { "segments_1", "0.si", "0.cfs", "0.cfe" }; - when(storeDirectory.listAll()).thenReturn(localFiles); - doThrow(new IOException("0.cfs")).when(remoteDirectory).copyFrom(storeDirectory, "0.cfs", "0.cfe", IOContext.DEFAULT); - doThrow(new IOException("0.cfe")).when(remoteDirectory).copyFrom(storeDirectory, "0.cfe", "0.cfe", IOContext.DEFAULT); + for (int i = 0; i < RemoteStoreRefreshListener.LAST_N_METADATA_FILES_TO_KEEP + 3; i++) { + indexDocs(4 * (i + 1), 4); + flushShard(indexShard); + } + try (Store remoteStore = indexShard.remoteStore()) { + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = + (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate(); + + verifyUploadedSegments(remoteSegmentStoreDirectory); + + // This is to check if reading data from remote segment store works as well. + remoteSegmentStoreDirectory.init(); + + verifyUploadedSegments(remoteSegmentStoreDirectory); + } + } + + public void testReplica() throws IOException { + setup(false, 3); remoteStoreRefreshListener.afterRefresh(true); - verify(storeDirectory).listAll(); - verify(remoteDirectory).copyFrom(storeDirectory, "segments_1", "segments_1", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "0.si", "0.si", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "0.cfs", "0.cfs", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "0.cfe", "0.cfe", IOContext.DEFAULT); - verify(remoteDirectory, times(0)).deleteFile(any()); - String[] localFilesSecondRefresh = new String[] { "segments_1", "0.cfs", "1.cfs", "1.cfe" }; - when(storeDirectory.listAll()).thenReturn(localFilesSecondRefresh); + try (Store remoteStore = indexShard.remoteStore()) { + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = + (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate(); + + assertEquals(0, remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().size()); + } + } + public void testReplicaPromotion() throws IOException, InterruptedException { + setup(false, 3); remoteStoreRefreshListener.afterRefresh(true); - verify(remoteDirectory).copyFrom(storeDirectory, "0.cfs", "0.cfs", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "1.cfs", "1.cfs", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "1.cfe", "1.cfe", IOContext.DEFAULT); - verify(remoteDirectory).deleteFile("0.si"); + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = + (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore().directory()).getDelegate()) + .getDelegate(); + + assertEquals(0, remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().size()); + + final ShardRouting replicaRouting = indexShard.routingEntry(); + promoteReplica( + indexShard, + Collections.singleton(replicaRouting.allocationId().getId()), + new IndexShardRoutingTable.Builder(replicaRouting.shardId()).addShard(replicaRouting).build() + ); + + // The following logic is referenced from IndexShardTests.testPrimaryFillsSeqNoGapsOnPromotion + // ToDo: Add wait logic as part of promoteReplica() + final CountDownLatch latch = new CountDownLatch(1); + indexShard.acquirePrimaryOperationPermit(new ActionListener<>() { + @Override + public void onResponse(Releasable releasable) { + releasable.close(); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError(e); + } + }, ThreadPool.Names.GENERIC, ""); + + latch.await(); + + indexDocs(4, 4); + indexShard.refresh("test"); + remoteStoreRefreshListener.afterRefresh(true); + + verifyUploadedSegments(remoteSegmentStoreDirectory); + + // This is to check if reading data from remote segment store works as well. + remoteSegmentStoreDirectory.init(); + + verifyUploadedSegments(remoteSegmentStoreDirectory); + } + + private void verifyUploadedSegments(RemoteSegmentStoreDirectory remoteSegmentStoreDirectory) throws IOException { + Map uploadedSegments = remoteSegmentStoreDirectory + .getSegmentsUploadedToRemoteStore(); + try (GatedCloseable segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { + SegmentInfos segmentInfos = segmentInfosGatedCloseable.get(); + for (String file : segmentInfos.files(true)) { + if (!RemoteStoreRefreshListener.EXCLUDE_FILES.contains(file)) { + assertTrue(uploadedSegments.containsKey(file)); + } + } + } } } diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index 23371a39871c7..88a3bdad53d0c 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -8,11 +8,18 @@ package org.opensearch.index.shard; +import org.junit.Assert; +import org.opensearch.OpenSearchException; +import org.opensearch.action.ActionListener; import org.opensearch.action.delete.DeleteRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.DocIdSeqNoAndSource; @@ -21,12 +28,28 @@ import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; +import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.replication.CheckpointInfoResponse; +import org.opensearch.indices.replication.GetSegmentFilesResponse; +import org.opensearch.indices.replication.SegmentReplicationSource; +import org.opensearch.indices.replication.SegmentReplicationSourceFactory; +import org.opensearch.indices.replication.SegmentReplicationState; +import org.opensearch.indices.replication.SegmentReplicationTarget; +import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.CopyState; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.Collections; import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import static java.util.Arrays.asList; import static org.hamcrest.Matchers.equalTo; @@ -34,6 +57,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class SegmentReplicationIndexShardTests extends OpenSearchIndexLevelReplicationTestCase { @@ -241,6 +265,213 @@ public void testNRTReplicaPromotedAsPrimary() throws Exception { } } + public void testReplicaPromotedWhileReplicating() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard oldPrimary = shards.getPrimary(); + final IndexShard nextPrimary = shards.getReplicas().get(0); + + final int numDocs = shards.indexDocs(randomInt(10)); + oldPrimary.refresh("Test"); + shards.syncGlobalCheckpoint(); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + SegmentReplicationSource source = new SegmentReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + resolveCheckpointInfoResponseListener(listener, oldPrimary); + ShardRouting oldRouting = nextPrimary.shardRouting; + try { + shards.promoteReplicaToPrimary(nextPrimary); + } catch (IOException e) { + Assert.fail("Promotion should not fail"); + } + targetService.shardRoutingChanged(nextPrimary, oldRouting, nextPrimary.shardRouting); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ) { + listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); + } + }; + when(sourceFactory.get(any())).thenReturn(source); + startReplicationAndAssertCancellation(nextPrimary, targetService); + // wait for replica to finish being promoted, and assert doc counts. + final CountDownLatch latch = new CountDownLatch(1); + nextPrimary.acquirePrimaryOperationPermit(new ActionListener<>() { + @Override + public void onResponse(Releasable releasable) { + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError(e); + } + }, ThreadPool.Names.GENERIC, ""); + latch.await(); + assertEquals(nextPrimary.getEngine().getClass(), InternalEngine.class); + nextPrimary.refresh("test"); + + oldPrimary.close("demoted", false); + oldPrimary.store().close(); + IndexShard newReplica = shards.addReplicaWithExistingPath(oldPrimary.shardPath(), oldPrimary.routingEntry().currentNodeId()); + shards.recoverReplica(newReplica); + + assertDocCount(nextPrimary, numDocs); + assertDocCount(newReplica, numDocs); + + nextPrimary.refresh("test"); + replicateSegments(nextPrimary, shards.getReplicas()); + final List docsAfterRecovery = getDocIdAndSeqNos(shards.getPrimary()); + for (IndexShard shard : shards.getReplicas()) { + assertThat(shard.routingEntry().toString(), getDocIdAndSeqNos(shard), equalTo(docsAfterRecovery)); + } + } + } + + public void testReplicaClosesWhileReplicating_AfterGetCheckpoint() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + final int numDocs = shards.indexDocs(randomInt(10)); + primary.refresh("Test"); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + SegmentReplicationSource source = new SegmentReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + // trigger a cancellation by closing the replica. + targetService.beforeIndexShardClosed(replica.shardId, replica, Settings.EMPTY); + resolveCheckpointInfoResponseListener(listener, primary); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ) { + Assert.fail("Should not be reached"); + } + }; + when(sourceFactory.get(any())).thenReturn(source); + startReplicationAndAssertCancellation(replica, targetService); + + shards.removeReplica(replica); + closeShards(replica); + } + } + + public void testReplicaClosesWhileReplicating_AfterGetSegmentFiles() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + final int numDocs = shards.indexDocs(randomInt(10)); + primary.refresh("Test"); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + SegmentReplicationSource source = new SegmentReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + resolveCheckpointInfoResponseListener(listener, primary); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ) { + // randomly resolve the listener, indicating the source has resolved. + listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); + targetService.beforeIndexShardClosed(replica.shardId, replica, Settings.EMPTY); + } + }; + when(sourceFactory.get(any())).thenReturn(source); + startReplicationAndAssertCancellation(replica, targetService); + + shards.removeReplica(replica); + closeShards(replica); + } + } + + public void testPrimaryCancelsExecution() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + final int numDocs = shards.indexDocs(randomInt(10)); + primary.refresh("Test"); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + SegmentReplicationSource source = new SegmentReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + listener.onFailure(new CancellableThreads.ExecutionCancelledException("Cancelled")); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ) {} + }; + when(sourceFactory.get(any())).thenReturn(source); + startReplicationAndAssertCancellation(replica, targetService); + + shards.removeReplica(replica); + closeShards(replica); + } + } + + private SegmentReplicationTargetService newTargetService(SegmentReplicationSourceFactory sourceFactory) { + return new SegmentReplicationTargetService( + threadPool, + new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), + mock(TransportService.class), + sourceFactory + ); + } + /** * Assert persisted and searchable doc counts. This method should not be used while docs are concurrently indexed because * it asserts point in time seqNos are relative to the doc counts. @@ -253,4 +484,48 @@ private void assertDocCounts(IndexShard indexShard, int expectedPersistedDocCoun // processed cp should be 1 less than our searchable doc count. assertEquals(expectedSearchableDocCount - 1, indexShard.getProcessedLocalCheckpoint()); } + + private void resolveCheckpointInfoResponseListener(ActionListener listener, IndexShard primary) { + try { + final CopyState copyState = new CopyState(ReplicationCheckpoint.empty(primary.shardId), primary); + listener.onResponse( + new CheckpointInfoResponse( + copyState.getCheckpoint(), + copyState.getMetadataSnapshot(), + copyState.getInfosBytes(), + copyState.getPendingDeleteFiles() + ) + ); + } catch (IOException e) { + logger.error("Unexpected error computing CopyState", e); + Assert.fail("Failed to compute copyState"); + } + } + + private void startReplicationAndAssertCancellation(IndexShard replica, SegmentReplicationTargetService targetService) + throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + final SegmentReplicationTarget target = targetService.startReplication( + ReplicationCheckpoint.empty(replica.shardId), + replica, + new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + Assert.fail("Replication should not complete"); + } + + @Override + public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { + assertTrue(e instanceof CancellableThreads.ExecutionCancelledException); + assertFalse(sendShardFailure); + assertEquals(SegmentReplicationState.Stage.CANCELLED, state.getStage()); + latch.countDown(); + } + } + ); + + latch.await(2, TimeUnit.SECONDS); + assertEquals("Should have resolved listener with failure", 0, latch.getCount()); + assertNull(targetService.get(target.getId())); + } } diff --git a/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java b/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java index 273d3c7e37c56..cd35349e33b59 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java @@ -44,6 +44,7 @@ public void testReadByte() throws IOException { when(inputStream.read()).thenReturn(10); assertEquals(10, remoteIndexInput.readByte()); + assertEquals(1, remoteIndexInput.getFilePointer()); verify(inputStream).read(any()); } @@ -52,13 +53,19 @@ public void testReadByteIOException() throws IOException { when(inputStream.read(any())).thenThrow(new IOException("Error reading")); assertThrows(IOException.class, () -> remoteIndexInput.readByte()); + assertEquals(0, remoteIndexInput.getFilePointer()); } public void testReadBytes() throws IOException { - byte[] buffer = new byte[10]; - remoteIndexInput.readBytes(buffer, 10, 20); + byte[] buffer = new byte[20]; + when(inputStream.read(eq(buffer), anyInt(), anyInt())).thenReturn(10).thenReturn(3).thenReturn(6).thenReturn(-1); + remoteIndexInput.readBytes(buffer, 0, 20); - verify(inputStream).read(buffer, 10, 20); + verify(inputStream).read(buffer, 0, 20); + verify(inputStream).read(buffer, 10, 10); + verify(inputStream).read(buffer, 13, 7); + verify(inputStream).read(buffer, 19, 1); + assertEquals(19, remoteIndexInput.getFilePointer()); } public void testReadBytesMultipleIterations() throws IOException { @@ -95,20 +102,14 @@ public void testLength() { assertEquals(FILESIZE, remoteIndexInput.length()); } - public void testSeek() throws IOException { - remoteIndexInput.seek(10); - - verify(inputStream).skip(10); - } - - public void testSeekIOException() throws IOException { - when(inputStream.skip(10)).thenThrow(new IOException("Error reading")); - - assertThrows(IOException.class, () -> remoteIndexInput.seek(10)); + public void testSeek() { + assertThrows(UnsupportedOperationException.class, () -> remoteIndexInput.seek(100L)); } - public void testGetFilePointer() { - assertThrows(UnsupportedOperationException.class, () -> remoteIndexInput.getFilePointer()); + public void testGetFilePointer() throws IOException { + when(inputStream.read(any(), eq(0), eq(8))).thenReturn(8); + remoteIndexInput.readBytes(new byte[8], 0, 8); + assertEquals(8, remoteIndexInput.getFilePointer()); } public void testSlice() { diff --git a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryFactoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java similarity index 70% rename from server/src/test/java/org/opensearch/index/store/RemoteDirectoryFactoryTests.java rename to server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java index e8357d2c184bf..0105d0dc309c2 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.store.Directory; import org.junit.Before; import org.mockito.ArgumentCaptor; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; @@ -27,29 +28,31 @@ import java.io.IOException; import java.nio.file.Path; import java.util.Collections; +import java.util.List; import java.util.function.Supplier; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.times; -public class RemoteDirectoryFactoryTests extends OpenSearchTestCase { +public class RemoteSegmentStoreDirectoryFactoryTests extends OpenSearchTestCase { private Supplier repositoriesServiceSupplier; private RepositoriesService repositoriesService; - private RemoteDirectoryFactory remoteDirectoryFactory; + private RemoteSegmentStoreDirectoryFactory remoteSegmentStoreDirectoryFactory; @Before public void setup() { repositoriesServiceSupplier = mock(Supplier.class); repositoriesService = mock(RepositoriesService.class); when(repositoriesServiceSupplier.get()).thenReturn(repositoriesService); - remoteDirectoryFactory = new RemoteDirectoryFactory(repositoriesServiceSupplier); + remoteSegmentStoreDirectoryFactory = new RemoteSegmentStoreDirectoryFactory(repositoriesServiceSupplier); } public void testNewDirectory() throws IOException { - Settings settings = Settings.builder().build(); + Settings settings = Settings.builder().put(IndexMetadata.SETTING_INDEX_UUID, "uuid_1").build(); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("foo", settings); Path tempDir = createTempDir().resolve(indexSettings.getUUID()).resolve("0"); ShardPath shardPath = new ShardPath(false, tempDir, tempDir, new ShardId(indexSettings.getIndex(), 0)); @@ -57,20 +60,21 @@ public void testNewDirectory() throws IOException { BlobStore blobStore = mock(BlobStore.class); BlobContainer blobContainer = mock(BlobContainer.class); when(repository.blobStore()).thenReturn(blobStore); + when(repository.basePath()).thenReturn(new BlobPath().add("base_path")); when(blobStore.blobContainer(any())).thenReturn(blobContainer); when(blobContainer.listBlobs()).thenReturn(Collections.emptyMap()); when(repositoriesService.repository("remote_store_repository")).thenReturn(repository); - try (Directory directory = remoteDirectoryFactory.newDirectory("remote_store_repository", indexSettings, shardPath)) { - assertTrue(directory instanceof RemoteDirectory); + try (Directory directory = remoteSegmentStoreDirectoryFactory.newDirectory("remote_store_repository", indexSettings, shardPath)) { + assertTrue(directory instanceof RemoteSegmentStoreDirectory); ArgumentCaptor blobPathCaptor = ArgumentCaptor.forClass(BlobPath.class); - verify(blobStore).blobContainer(blobPathCaptor.capture()); - BlobPath blobPath = blobPathCaptor.getValue(); - assertEquals("foo/0/", blobPath.buildAsString()); + verify(blobStore, times(2)).blobContainer(blobPathCaptor.capture()); + List blobPaths = blobPathCaptor.getAllValues(); + assertEquals("base_path/uuid_1/0/segments/data/", blobPaths.get(0).buildAsString()); + assertEquals("base_path/uuid_1/0/segments/metadata/", blobPaths.get(1).buildAsString()); - directory.listAll(); - verify(blobContainer).listBlobs(); + verify(blobContainer).listBlobsByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX); verify(repositoriesService).repository("remote_store_repository"); } } @@ -85,7 +89,7 @@ public void testNewDirectoryRepositoryDoesNotExist() { assertThrows( IllegalArgumentException.class, - () -> remoteDirectoryFactory.newDirectory("remote_store_repository", indexSettings, shardPath) + () -> remoteSegmentStoreDirectoryFactory.newDirectory("remote_store_repository", indexSettings, shardPath) ); } diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 4eabfa74625f2..96f14616fb54b 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -15,6 +15,7 @@ import org.apache.lucene.store.IndexOutput; import org.apache.lucene.tests.util.LuceneTestCase; import org.junit.Before; +import org.opensearch.common.UUIDs; import org.opensearch.common.collect.Set; import org.opensearch.test.OpenSearchTestCase; @@ -129,26 +130,52 @@ public void testInitNoMetadataFile() throws IOException { private Map getDummyMetadata(String prefix, int commitGeneration) { Map metadata = new HashMap<>(); - metadata.put(prefix + ".cfe", prefix + ".cfe::" + prefix + ".cfe__qrt::" + randomIntBetween(1000, 5000)); - metadata.put(prefix + ".cfs", prefix + ".cfs::" + prefix + ".cfs__zxd::" + randomIntBetween(1000, 5000)); - metadata.put(prefix + ".si", prefix + ".si::" + prefix + ".si__yui::" + randomIntBetween(1000, 5000)); + + metadata.put(prefix + ".cfe", prefix + ".cfe::" + prefix + ".cfe__" + UUIDs.base64UUID() + "::" + randomIntBetween(1000, 5000)); + metadata.put(prefix + ".cfs", prefix + ".cfs::" + prefix + ".cfs__" + UUIDs.base64UUID() + "::" + randomIntBetween(1000, 5000)); + metadata.put(prefix + ".si", prefix + ".si::" + prefix + ".si__" + UUIDs.base64UUID() + "::" + randomIntBetween(1000, 5000)); metadata.put( "segments_" + commitGeneration, - "segments_" + commitGeneration + "::segments_" + commitGeneration + "__exv::" + randomIntBetween(1000, 5000) + "segments_" + + commitGeneration + + "::segments_" + + commitGeneration + + "__" + + UUIDs.base64UUID() + + "::" + + randomIntBetween(1000, 5000) ); return metadata; } - private void populateMetadata() throws IOException { + private Map> populateMetadata() throws IOException { List metadataFiles = List.of("metadata__1__5__abc", "metadata__1__6__pqr", "metadata__2__1__zxv"); when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenReturn( metadataFiles ); - IndexInput indexInput = mock(IndexInput.class); - Map dummyMetadata = getDummyMetadata("_0", 1); - when(indexInput.readMapOfStrings()).thenReturn(dummyMetadata); - when(remoteMetadataDirectory.openInput("metadata__2__1__zxv", IOContext.DEFAULT)).thenReturn(indexInput); + Map> metadataFilenameContentMapping = Map.of( + "metadata__1__5__abc", + getDummyMetadata("_0", 1), + "metadata__1__6__pqr", + getDummyMetadata("_0", 1), + "metadata__2__1__zxv", + getDummyMetadata("_0", 1) + ); + + IndexInput indexInput1 = mock(IndexInput.class); + when(indexInput1.readMapOfStrings()).thenReturn(metadataFilenameContentMapping.get("metadata__1__5__abc")); + when(remoteMetadataDirectory.openInput("metadata__1__5__abc", IOContext.DEFAULT)).thenReturn(indexInput1); + + IndexInput indexInput2 = mock(IndexInput.class); + when(indexInput2.readMapOfStrings()).thenReturn(metadataFilenameContentMapping.get("metadata__1__6__pqr")); + when(remoteMetadataDirectory.openInput("metadata__1__6__pqr", IOContext.DEFAULT)).thenReturn(indexInput2); + + IndexInput indexInput3 = mock(IndexInput.class); + when(indexInput3.readMapOfStrings()).thenReturn(metadataFilenameContentMapping.get("metadata__2__1__zxv")); + when(remoteMetadataDirectory.openInput("metadata__2__1__zxv", IOContext.DEFAULT)).thenReturn(indexInput3); + + return metadataFilenameContentMapping; } public void testInit() throws IOException { @@ -291,20 +318,39 @@ public void testCopyFromException() throws IOException { } public void testContainsFile() throws IOException { - populateMetadata(); + List metadataFiles = List.of("metadata__1__5__abc"); + when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenReturn( + metadataFiles + ); + + Map metadata = new HashMap<>(); + metadata.put("_0.cfe", "_0.cfe::_0.cfe__" + UUIDs.base64UUID() + "::1234"); + metadata.put("_0.cfs", "_0.cfs::_0.cfs__" + UUIDs.base64UUID() + "::2345"); + + Map> metadataFilenameContentMapping = Map.of("metadata__1__5__abc", metadata); + + IndexInput indexInput1 = mock(IndexInput.class); + when(indexInput1.readMapOfStrings()).thenReturn(metadataFilenameContentMapping.get("metadata__1__5__abc")); + when(remoteMetadataDirectory.openInput("metadata__1__5__abc", IOContext.DEFAULT)).thenReturn(indexInput1); + remoteSegmentStoreDirectory.init(); - // This is not the correct way to add files but the other way is to open up access to fields in UploadedSegmentMetadata Map uploadedSegmentMetadataMap = remoteSegmentStoreDirectory .getSegmentsUploadedToRemoteStore(); - uploadedSegmentMetadataMap.put( - "_100.si", - new RemoteSegmentStoreDirectory.UploadedSegmentMetadata("_100.si", "_100.si__uuid1", "1234") + + assertThrows( + UnsupportedOperationException.class, + () -> uploadedSegmentMetadataMap.put( + "_100.si", + new RemoteSegmentStoreDirectory.UploadedSegmentMetadata("_100.si", "_100.si__uuid1", "1234") + ) ); - assertTrue(remoteSegmentStoreDirectory.containsFile("_100.si", "1234")); - assertFalse(remoteSegmentStoreDirectory.containsFile("_100.si", "2345")); - assertFalse(remoteSegmentStoreDirectory.containsFile("_200.si", "1234")); + assertTrue(remoteSegmentStoreDirectory.containsFile("_0.cfe", "1234")); + assertTrue(remoteSegmentStoreDirectory.containsFile("_0.cfs", "2345")); + assertFalse(remoteSegmentStoreDirectory.containsFile("_0.cfe", "1234000")); + assertFalse(remoteSegmentStoreDirectory.containsFile("_0.cfs", "2345000")); + assertFalse(remoteSegmentStoreDirectory.containsFile("_0.si", "23")); } public void testUploadMetadataEmpty() throws IOException { @@ -336,4 +382,84 @@ public void testUploadMetadataNonEmpty() throws IOException { String metadataString = remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().get("_0.si").toString(); verify(indexOutput).writeMapOfStrings(Map.of("_0.si", metadataString)); } + + public void testDeleteStaleCommitsException() throws IOException { + when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenThrow( + new IOException("Error reading") + ); + + assertThrows(IOException.class, () -> remoteSegmentStoreDirectory.deleteStaleSegments(5)); + } + + public void testDeleteStaleCommitsWithinThreshold() throws IOException { + populateMetadata(); + + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=5 here so that none of the metadata files will be deleted + remoteSegmentStoreDirectory.deleteStaleSegments(5); + + verify(remoteMetadataDirectory, times(0)).openInput(any(String.class), eq(IOContext.DEFAULT)); + } + + public void testDeleteStaleCommitsActualDelete() throws IOException { + Map> metadataFilenameContentMapping = populateMetadata(); + remoteSegmentStoreDirectory.init(); + + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted + remoteSegmentStoreDirectory.deleteStaleSegments(2); + + for (String metadata : metadataFilenameContentMapping.get("metadata__1__5__abc").values()) { + String uploadedFilename = metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]; + verify(remoteDataDirectory).deleteFile(uploadedFilename); + } + ; + verify(remoteMetadataDirectory).deleteFile("metadata__1__5__abc"); + } + + public void testDeleteStaleCommitsActualDeleteIOException() throws IOException { + Map> metadataFilenameContentMapping = populateMetadata(); + remoteSegmentStoreDirectory.init(); + + String segmentFileWithException = metadataFilenameContentMapping.get("metadata__1__5__abc") + .values() + .stream() + .findAny() + .get() + .split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]; + doThrow(new IOException("Error")).when(remoteDataDirectory).deleteFile(segmentFileWithException); + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted + remoteSegmentStoreDirectory.deleteStaleSegments(2); + + for (String metadata : metadataFilenameContentMapping.get("metadata__1__5__abc").values()) { + String uploadedFilename = metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]; + verify(remoteDataDirectory).deleteFile(uploadedFilename); + } + ; + verify(remoteMetadataDirectory, times(0)).deleteFile("metadata__1__5__abc"); + } + + public void testDeleteStaleCommitsActualDeleteNoSuchFileException() throws IOException { + Map> metadataFilenameContentMapping = populateMetadata(); + remoteSegmentStoreDirectory.init(); + + String segmentFileWithException = metadataFilenameContentMapping.get("metadata__1__5__abc") + .values() + .stream() + .findAny() + .get() + .split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]; + doThrow(new NoSuchFileException(segmentFileWithException)).when(remoteDataDirectory).deleteFile(segmentFileWithException); + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted + remoteSegmentStoreDirectory.deleteStaleSegments(2); + + for (String metadata : metadataFilenameContentMapping.get("metadata__1__5__abc").values()) { + String uploadedFilename = metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]; + verify(remoteDataDirectory).deleteFile(uploadedFilename); + } + ; + verify(remoteMetadataDirectory).deleteFile("metadata__1__5__abc"); + } } diff --git a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 1f2360abde2ad..22481b5a7b99f 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -66,6 +66,7 @@ import org.opensearch.index.shard.PrimaryReplicaSyncer; import org.opensearch.index.shard.ShardId; import org.opensearch.indices.recovery.PeerRecoveryTargetService; +import org.opensearch.indices.replication.SegmentReplicationSourceService; import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.repositories.RepositoriesService; @@ -572,6 +573,7 @@ private IndicesClusterStateService createIndicesClusterStateService( threadPool, SegmentReplicationCheckpointPublisher.EMPTY, SegmentReplicationTargetService.NO_OP, + SegmentReplicationSourceService.NO_OP, recoveryTargetService, shardStateAction, null, diff --git a/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java b/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java index 38c55620e1223..f49ee0471b5e8 100644 --- a/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java @@ -14,6 +14,8 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.CancellableThreads; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.IndexService; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; @@ -31,6 +33,8 @@ import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; @@ -154,6 +158,51 @@ public void testCancelReplication() throws IOException { assertEquals(0, replications.cachedCopyStateSize()); } + public void testCancelReplication_AfterSendFilesStarts() throws IOException, InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + OngoingSegmentReplications replications = new OngoingSegmentReplications(mockIndicesService, recoverySettings); + // add a doc and refresh so primary has more than one segment. + indexDoc(primary, "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); + primary.refresh("Test"); + final CheckpointInfoRequest request = new CheckpointInfoRequest( + 1L, + replica.routingEntry().allocationId().getId(), + primaryDiscoveryNode, + testCheckpoint + ); + final FileChunkWriter segmentSegmentFileChunkWriter = (fileMetadata, position, content, lastChunk, totalTranslogOps, listener) -> { + // cancel the replication as soon as the writer starts sending files. + replications.cancel(replica.routingEntry().allocationId().getId(), "Test"); + }; + final CopyState copyState = replications.prepareForReplication(request, segmentSegmentFileChunkWriter); + assertEquals(1, replications.size()); + assertEquals(1, replications.cachedCopyStateSize()); + getSegmentFilesRequest = new GetSegmentFilesRequest( + 1L, + replica.routingEntry().allocationId().getId(), + replicaDiscoveryNode, + new ArrayList<>(copyState.getMetadataSnapshot().asMap().values()), + testCheckpoint + ); + replications.startSegmentCopy(getSegmentFilesRequest, new ActionListener<>() { + @Override + public void onResponse(GetSegmentFilesResponse getSegmentFilesResponse) { + Assert.fail("Expected onFailure to be invoked."); + } + + @Override + public void onFailure(Exception e) { + assertEquals(CancellableThreads.ExecutionCancelledException.class, e.getClass()); + assertEquals(0, copyState.refCount()); + assertEquals(0, replications.size()); + assertEquals(0, replications.cachedCopyStateSize()); + latch.countDown(); + } + }); + latch.await(2, TimeUnit.SECONDS); + assertEquals("listener should have resolved with failure", 0, latch.getCount()); + } + public void testMultipleReplicasUseSameCheckpoint() throws IOException { IndexShard secondReplica = newShard(primary.shardId(), false); recoverReplica(secondReplica, primary, true); diff --git a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java index 6bce74be569c3..323445bee1274 100644 --- a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java @@ -9,12 +9,14 @@ package org.opensearch.indices.replication; import org.apache.lucene.util.Version; +import org.junit.Assert; import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.CancellableThreads; import org.opensearch.core.internal.io.IOUtils; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; @@ -28,6 +30,8 @@ import java.util.Arrays; import java.util.Collections; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import static org.mockito.Mockito.mock; @@ -126,6 +130,39 @@ public void testGetSegmentFiles() { assertTrue(capturedRequest.request instanceof GetSegmentFilesRequest); } + public void testGetSegmentFiles_CancelWhileRequestOpen() throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( + indexShard.shardId(), + PRIMARY_TERM, + SEGMENTS_GEN, + SEQ_NO, + VERSION + ); + StoreFileMetadata testMetadata = new StoreFileMetadata("testFile", 1L, "checksum", Version.LATEST); + replicationSource.getSegmentFiles( + REPLICATION_ID, + checkpoint, + Arrays.asList(testMetadata), + mock(Store.class), + new ActionListener<>() { + @Override + public void onResponse(GetSegmentFilesResponse getSegmentFilesResponse) { + Assert.fail("onFailure response expected."); + } + + @Override + public void onFailure(Exception e) { + assertEquals(e.getClass(), CancellableThreads.ExecutionCancelledException.class); + latch.countDown(); + } + } + ); + replicationSource.cancel(); + latch.await(2, TimeUnit.SECONDS); + assertEquals("listener should have resolved in a failure", 0, latch.getCount()); + } + private DiscoveryNode newDiscoveryNode(String nodeName) { return new DiscoveryNode( nodeName, diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java index 2c52772649acc..a6e169dbc3d61 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java @@ -18,6 +18,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.CancellableThreads; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.index.store.StoreFileMetadata; @@ -28,6 +29,8 @@ import java.io.IOException; import java.util.Collections; import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import static org.mockito.Mockito.mock; @@ -197,4 +200,47 @@ public void testReplicationAlreadyRunning() throws IOException { handler.sendFiles(getSegmentFilesRequest, mock(ActionListener.class)); Assert.assertThrows(OpenSearchException.class, () -> { handler.sendFiles(getSegmentFilesRequest, mock(ActionListener.class)); }); } + + public void testCancelReplication() throws IOException, InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + chunkWriter = mock(FileChunkWriter.class); + + final ReplicationCheckpoint latestReplicationCheckpoint = primary.getLatestReplicationCheckpoint(); + final CopyState copyState = new CopyState(latestReplicationCheckpoint, primary); + SegmentReplicationSourceHandler handler = new SegmentReplicationSourceHandler( + localNode, + chunkWriter, + threadPool, + copyState, + primary.routingEntry().allocationId().getId(), + 5000, + 1 + ); + + final GetSegmentFilesRequest getSegmentFilesRequest = new GetSegmentFilesRequest( + 1L, + replica.routingEntry().allocationId().getId(), + replicaDiscoveryNode, + Collections.emptyList(), + latestReplicationCheckpoint + ); + + // cancel before xfer starts. Cancels during copy will be tested in SegmentFileTransferHandlerTests, that uses the same + // cancellableThreads. + handler.cancel("test"); + handler.sendFiles(getSegmentFilesRequest, new ActionListener<>() { + @Override + public void onResponse(GetSegmentFilesResponse getSegmentFilesResponse) { + Assert.fail("Expected failure."); + } + + @Override + public void onFailure(Exception e) { + assertEquals(CancellableThreads.ExecutionCancelledException.class, e.getClass()); + latch.countDown(); + } + }); + latch.await(2, TimeUnit.SECONDS); + assertEquals("listener should have resolved with failure", 0, latch.getCount()); + } } diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index d3a6d1a97dacc..7d9b0f09f21cd 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -9,21 +9,22 @@ package org.opensearch.indices.replication; import org.junit.Assert; -import org.mockito.ArgumentCaptor; import org.mockito.Mockito; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; -import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; -import org.opensearch.indices.replication.common.ReplicationLuceneIndex; -import org.opensearch.transport.TransportService; +import org.opensearch.indices.replication.common.ReplicationType; import java.io.IOException; +import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -35,12 +36,12 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.times; import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.timeout; import static org.mockito.Mockito.eq; public class SegmentReplicationTargetServiceTests extends IndexShardTestCase { - private IndexShard indexShard; + private IndexShard replicaShard; + private IndexShard primaryShard; private ReplicationCheckpoint checkpoint; private SegmentReplicationSource replicationSource; private SegmentReplicationTargetService sut; @@ -52,20 +53,20 @@ public class SegmentReplicationTargetServiceTests extends IndexShardTestCase { public void setUp() throws Exception { super.setUp(); final Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_REPLICATION_TYPE, "SEGMENT") + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put("node.name", SegmentReplicationTargetServiceTests.class.getSimpleName()) .build(); final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); - final TransportService transportService = mock(TransportService.class); - indexShard = newStartedShard(false, settings); - checkpoint = new ReplicationCheckpoint(indexShard.shardId(), 0L, 0L, 0L, 0L); + primaryShard = newStartedShard(true); + replicaShard = newShard(false, settings, new NRTReplicationEngineFactory()); + recoverReplica(replicaShard, primaryShard, true); + checkpoint = new ReplicationCheckpoint(replicaShard.shardId(), 0L, 0L, 0L, 0L); SegmentReplicationSourceFactory replicationSourceFactory = mock(SegmentReplicationSourceFactory.class); replicationSource = mock(SegmentReplicationSource.class); - when(replicationSourceFactory.get(indexShard)).thenReturn(replicationSource); + when(replicationSourceFactory.get(replicaShard)).thenReturn(replicationSource); - sut = new SegmentReplicationTargetService(threadPool, recoverySettings, transportService, replicationSourceFactory); - initialCheckpoint = indexShard.getLatestReplicationCheckpoint(); + sut = prepareForReplication(primaryShard); + initialCheckpoint = replicaShard.getLatestReplicationCheckpoint(); aheadCheckpoint = new ReplicationCheckpoint( initialCheckpoint.getShardId(), initialCheckpoint.getPrimaryTerm(), @@ -77,44 +78,58 @@ public void setUp() throws Exception { @Override public void tearDown() throws Exception { - closeShards(indexShard); + closeShards(primaryShard, replicaShard); super.tearDown(); } - public void testTargetReturnsSuccess_listenerCompletes() { - final SegmentReplicationTarget target = new SegmentReplicationTarget( - checkpoint, - indexShard, - replicationSource, - new SegmentReplicationTargetService.SegmentReplicationListener() { - @Override - public void onReplicationDone(SegmentReplicationState state) { - assertEquals(SegmentReplicationState.Stage.DONE, state.getStage()); - } + public void testsSuccessfulReplication_listenerCompletes() throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + sut.startReplication(checkpoint, replicaShard, new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + assertEquals(SegmentReplicationState.Stage.DONE, state.getStage()); + latch.countDown(); + } - @Override - public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { - Assert.fail(); - } + @Override + public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { + logger.error("Unexpected error", e); + Assert.fail("Test should succeed"); } - ); - final SegmentReplicationTarget spy = Mockito.spy(target); - doAnswer(invocation -> { - // set up stage correctly so the transition in markAsDone succeeds on listener completion - moveTargetToFinalStage(target); - final ActionListener listener = invocation.getArgument(0); - listener.onResponse(null); - return null; - }).when(spy).startReplication(any()); - sut.startReplication(spy); + }); + latch.await(2, TimeUnit.SECONDS); + assertEquals(0, latch.getCount()); } - public void testTargetThrowsException() { + public void testReplicationFails() throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); final OpenSearchException expectedError = new OpenSearchException("Fail"); + SegmentReplicationSource source = new SegmentReplicationSource() { + + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + listener.onFailure(expectedError); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ) { + Assert.fail("Should not be called"); + } + }; final SegmentReplicationTarget target = new SegmentReplicationTarget( checkpoint, - indexShard, - replicationSource, + replicaShard, + source, new SegmentReplicationTargetService.SegmentReplicationListener() { @Override public void onReplicationDone(SegmentReplicationState state) { @@ -123,24 +138,21 @@ public void onReplicationDone(SegmentReplicationState state) { @Override public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { - assertEquals(SegmentReplicationState.Stage.INIT, state.getStage()); + // failures leave state object in last entered stage. + assertEquals(SegmentReplicationState.Stage.GET_CHECKPOINT_INFO, state.getStage()); assertEquals(expectedError, e.getCause()); - assertTrue(sendShardFailure); + latch.countDown(); } } ); - final SegmentReplicationTarget spy = Mockito.spy(target); - doAnswer(invocation -> { - final ActionListener listener = invocation.getArgument(0); - listener.onFailure(expectedError); - return null; - }).when(spy).startReplication(any()); - sut.startReplication(spy); + sut.startReplication(target); + latch.await(2, TimeUnit.SECONDS); + assertEquals(0, latch.getCount()); } public void testAlreadyOnNewCheckpoint() { SegmentReplicationTargetService spy = spy(sut); - spy.onNewCheckpoint(indexShard.getLatestReplicationCheckpoint(), indexShard); + spy.onNewCheckpoint(replicaShard.getLatestReplicationCheckpoint(), replicaShard); verify(spy, times(0)).startReplication(any(), any(), any()); } @@ -149,7 +161,7 @@ public void testShardAlreadyReplicating() throws InterruptedException { SegmentReplicationTargetService serviceSpy = spy(sut); final SegmentReplicationTarget target = new SegmentReplicationTarget( checkpoint, - indexShard, + replicaShard, replicationSource, mock(SegmentReplicationTargetService.SegmentReplicationListener.class) ); @@ -161,7 +173,7 @@ public void testShardAlreadyReplicating() throws InterruptedException { doAnswer(invocation -> { final ActionListener listener = invocation.getArgument(0); // a new checkpoint arrives before we've completed. - serviceSpy.onNewCheckpoint(aheadCheckpoint, indexShard); + serviceSpy.onNewCheckpoint(aheadCheckpoint, replicaShard); listener.onResponse(null); latch.countDown(); return null; @@ -173,12 +185,12 @@ public void testShardAlreadyReplicating() throws InterruptedException { // wait for the new checkpoint to arrive, before the listener completes. latch.await(30, TimeUnit.SECONDS); - verify(serviceSpy, times(0)).startReplication(eq(aheadCheckpoint), eq(indexShard), any()); + verify(serviceSpy, times(0)).startReplication(eq(aheadCheckpoint), eq(replicaShard), any()); } public void testNewCheckpointBehindCurrentCheckpoint() { SegmentReplicationTargetService spy = spy(sut); - spy.onNewCheckpoint(checkpoint, indexShard); + spy.onNewCheckpoint(checkpoint, replicaShard); verify(spy, times(0)).startReplication(any(), any(), any()); } @@ -190,22 +202,6 @@ public void testShardNotStarted() throws IOException { closeShards(shard); } - public void testNewCheckpoint_validationPassesAndReplicationFails() throws IOException { - allowShardFailures(); - SegmentReplicationTargetService spy = spy(sut); - IndexShard spyShard = spy(indexShard); - ArgumentCaptor captor = ArgumentCaptor.forClass( - SegmentReplicationTargetService.SegmentReplicationListener.class - ); - doNothing().when(spy).startReplication(any(), any(), any()); - spy.onNewCheckpoint(aheadCheckpoint, spyShard); - verify(spy, times(1)).startReplication(any(), any(), captor.capture()); - SegmentReplicationTargetService.SegmentReplicationListener listener = captor.getValue(); - listener.onFailure(new SegmentReplicationState(new ReplicationLuceneIndex()), new OpenSearchException("testing"), true); - verify(spyShard).failShard(any(), any()); - closeShard(indexShard, false); - } - /** * here we are starting a new shard in PrimaryMode and testing that we don't process a checkpoint on shard when it is in PrimaryMode. */ @@ -215,71 +211,10 @@ public void testRejectCheckpointOnShardPrimaryMode() throws IOException { // Starting a new shard in PrimaryMode. IndexShard primaryShard = newStartedShard(true); IndexShard spyShard = spy(primaryShard); - doNothing().when(spy).startReplication(any(), any(), any()); spy.onNewCheckpoint(aheadCheckpoint, spyShard); // Verify that checkpoint is not processed as shard is in PrimaryMode. verify(spy, times(0)).startReplication(any(), any(), any()); closeShards(primaryShard); } - - public void testReplicationOnDone() throws IOException { - SegmentReplicationTargetService spy = spy(sut); - IndexShard spyShard = spy(indexShard); - ReplicationCheckpoint cp = indexShard.getLatestReplicationCheckpoint(); - ReplicationCheckpoint newCheckpoint = new ReplicationCheckpoint( - cp.getShardId(), - cp.getPrimaryTerm(), - cp.getSegmentsGen(), - cp.getSeqNo(), - cp.getSegmentInfosVersion() + 1 - ); - ReplicationCheckpoint anotherNewCheckpoint = new ReplicationCheckpoint( - cp.getShardId(), - cp.getPrimaryTerm(), - cp.getSegmentsGen(), - cp.getSeqNo(), - cp.getSegmentInfosVersion() + 2 - ); - ArgumentCaptor captor = ArgumentCaptor.forClass( - SegmentReplicationTargetService.SegmentReplicationListener.class - ); - doNothing().when(spy).startReplication(any(), any(), any()); - spy.onNewCheckpoint(newCheckpoint, spyShard); - spy.onNewCheckpoint(anotherNewCheckpoint, spyShard); - verify(spy, times(1)).startReplication(eq(newCheckpoint), any(), captor.capture()); - verify(spy, times(1)).onNewCheckpoint(eq(anotherNewCheckpoint), any()); - SegmentReplicationTargetService.SegmentReplicationListener listener = captor.getValue(); - listener.onDone(new SegmentReplicationState(new ReplicationLuceneIndex())); - doNothing().when(spy).onNewCheckpoint(any(), any()); - verify(spy, timeout(0).times(2)).onNewCheckpoint(eq(anotherNewCheckpoint), any()); - closeShard(indexShard, false); - - } - - public void testBeforeIndexShardClosed_CancelsOngoingReplications() { - final SegmentReplicationTarget target = new SegmentReplicationTarget( - checkpoint, - indexShard, - replicationSource, - mock(SegmentReplicationTargetService.SegmentReplicationListener.class) - ); - final SegmentReplicationTarget spy = Mockito.spy(target); - sut.startReplication(spy); - sut.beforeIndexShardClosed(indexShard.shardId(), indexShard, Settings.EMPTY); - verify(spy, times(1)).cancel(any()); - } - - /** - * Move the {@link SegmentReplicationTarget} object through its {@link SegmentReplicationState.Stage} values in order - * until the final, non-terminal stage. - */ - private void moveTargetToFinalStage(SegmentReplicationTarget target) { - SegmentReplicationState.Stage[] stageValues = SegmentReplicationState.Stage.values(); - assertEquals(target.state().getStage(), SegmentReplicationState.Stage.INIT); - // Skip the first two stages (DONE and INIT) and iterate until the last value - for (int i = 2; i < stageValues.length; i++) { - target.state().setStage(stageValues[i]); - } - } } diff --git a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java index a10f004b2ee97..9a28f1800847e 100644 --- a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java +++ b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java @@ -14,6 +14,10 @@ import org.opensearch.action.search.CreatePitController; import org.opensearch.action.search.CreatePitRequest; import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitAction; +import org.opensearch.action.search.DeletePitInfo; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; import org.opensearch.action.search.PitTestsUtil; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; @@ -33,6 +37,8 @@ import java.util.concurrent.ExecutionException; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.blankOrNullString; +import static org.hamcrest.Matchers.not; import static org.opensearch.action.search.PitTestsUtil.assertSegments; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; @@ -282,6 +288,52 @@ public void testMaxOpenPitContexts() throws Exception { validatePitStats("index", 0, maxPitContexts, 0); } + public void testCreatePitMoreThanMaxOpenPitContexts() throws Exception { + createIndex("index"); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + SearchService service = getInstanceFromNode(SearchService.class); + + try { + for (int i = 0; i < 1000; i++) { + client().execute(CreatePitAction.INSTANCE, request).get(); + } + } catch (Exception ex) { + assertTrue( + ex.getMessage() + .contains( + "Trying to create too many Point In Time contexts. " + + "Must be less than or equal to: [" + + SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY) + + "]. " + + "This limit can be set by changing the [search.max_open_pit_context] setting." + ) + ); + } + final int maxPitContexts = SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY); + validatePitStats("index", maxPitContexts, 0, 0); + // deleteall + DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); + + /** + * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context + * not found exceptions don't result in failures ( as deletion in one node is successful ) + */ + ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + DeletePitResponse deletePITResponse = execute.get(); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertThat(deletePitInfo.getPitId(), not(blankOrNullString())); + assertTrue(deletePitInfo.isSuccessful()); + } + validatePitStats("index", 0, maxPitContexts, 0); + client().execute(CreatePitAction.INSTANCE, request).get(); + validatePitStats("index", 1, maxPitContexts, 0); + service.doClose(); + validatePitStats("index", 0, maxPitContexts + 1, 0); + } + public void testOpenPitContextsConcurrently() throws Exception { createIndex("index"); final int maxPitContexts = SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY); diff --git a/server/src/test/java/org/opensearch/search/pit/RestCreatePitActionTests.java b/server/src/test/java/org/opensearch/search/pit/RestCreatePitActionTests.java new file mode 100644 index 0000000000000..5ca384daedbff --- /dev/null +++ b/server/src/test/java/org/opensearch/search/pit/RestCreatePitActionTests.java @@ -0,0 +1,78 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pit; + +import org.apache.lucene.util.SetOnce; +import org.opensearch.action.ActionListener; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.search.RestCreatePitAction; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.client.NoOpNodeClient; +import org.opensearch.test.rest.FakeRestChannel; +import org.opensearch.test.rest.FakeRestRequest; + +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +/** + * Tests to verify behavior of create pit rest action + */ +public class RestCreatePitActionTests extends OpenSearchTestCase { + public void testRestCreatePit() throws Exception { + SetOnce createPitCalled = new SetOnce<>(); + RestCreatePitAction action = new RestCreatePitAction(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void createPit(CreatePitRequest request, ActionListener listener) { + createPitCalled.set(true); + assertThat(request.getKeepAlive().getStringRep(), equalTo("1m")); + assertFalse(request.shouldAllowPartialPitCreation()); + } + }) { + Map params = new HashMap<>(); + params.put("keep_alive", "1m"); + params.put("allow_partial_pit_creation", "false"); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params) + .withMethod(RestRequest.Method.POST) + .build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + action.handleRequest(request, channel, nodeClient); + + assertThat(createPitCalled.get(), equalTo(true)); + } + } + + public void testRestCreatePitDefaultPartialCreation() throws Exception { + SetOnce createPitCalled = new SetOnce<>(); + RestCreatePitAction action = new RestCreatePitAction(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void createPit(CreatePitRequest request, ActionListener listener) { + createPitCalled.set(true); + assertThat(request.getKeepAlive().getStringRep(), equalTo("1m")); + assertTrue(request.shouldAllowPartialPitCreation()); + } + }) { + Map params = new HashMap<>(); + params.put("keep_alive", "1m"); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params) + .withMethod(RestRequest.Method.POST) + .build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + action.handleRequest(request, channel, nodeClient); + + assertThat(createPitCalled.get(), equalTo(true)); + } + } +} diff --git a/server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java b/server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java new file mode 100644 index 0000000000000..0bfa16aafe1e3 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java @@ -0,0 +1,133 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pit; + +import org.apache.lucene.util.SetOnce; +import org.opensearch.action.ActionListener; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.bytes.BytesArray; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.search.RestDeletePitAction; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.client.NoOpNodeClient; +import org.opensearch.test.rest.FakeRestChannel; +import org.opensearch.test.rest.FakeRestRequest; + +import java.util.Collections; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +/** + * Tests to verify the behavior of rest delete pit action for list delete and delete all PIT endpoints + */ +public class RestDeletePitActionTests extends OpenSearchTestCase { + public void testParseDeletePitRequestWithInvalidJsonThrowsException() throws Exception { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( + new BytesArray("{invalid_json}"), + XContentType.JSON + ).build(); + Exception e = expectThrows(IllegalArgumentException.class, () -> action.prepareRequest(request, null)); + assertThat(e.getMessage(), equalTo("Failed to parse request body")); + } + + public void testDeletePitWithBody() throws Exception { + SetOnce pitCalled = new SetOnce<>(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void deletePits(DeletePitRequest request, ActionListener listener) { + pitCalled.set(true); + assertThat(request.getPitIds(), hasSize(1)); + assertThat(request.getPitIds().get(0), equalTo("BODY")); + } + }) { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( + new BytesArray("{\"pit_id\": [\"BODY\"]}"), + XContentType.JSON + ).build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + action.handleRequest(request, channel, nodeClient); + + assertThat(pitCalled.get(), equalTo(true)); + } + } + + public void testDeleteAllPit() throws Exception { + SetOnce pitCalled = new SetOnce<>(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void deletePits(DeletePitRequest request, ActionListener listener) { + pitCalled.set(true); + assertThat(request.getPitIds(), hasSize(1)); + assertThat(request.getPitIds().get(0), equalTo("_all")); + } + }) { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath("/_all").build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + action.handleRequest(request, channel, nodeClient); + + assertThat(pitCalled.get(), equalTo(true)); + } + } + + public void testDeleteAllPitWithBody() { + SetOnce pitCalled = new SetOnce<>(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void deletePits(DeletePitRequest request, ActionListener listener) { + pitCalled.set(true); + assertThat(request.getPitIds(), hasSize(1)); + assertThat(request.getPitIds().get(0), equalTo("_all")); + } + }) { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( + new BytesArray("{\"pit_id\": [\"BODY\"]}"), + XContentType.JSON + ).withPath("/_all").build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> action.handleRequest(request, channel, nodeClient) + ); + assertTrue(ex.getMessage().contains("request [GET /_all] does not support having a body")); + } + } + + public void testDeletePitQueryStringParamsShouldThrowException() { + SetOnce pitCalled = new SetOnce<>(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void deletePits(DeletePitRequest request, ActionListener listener) { + pitCalled.set(true); + assertThat(request.getPitIds(), hasSize(2)); + assertThat(request.getPitIds().get(0), equalTo("QUERY_STRING")); + assertThat(request.getPitIds().get(1), equalTo("QUERY_STRING_1")); + } + }) { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams( + Collections.singletonMap("pit_id", "QUERY_STRING,QUERY_STRING_1") + ).build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> action.handleRequest(request, channel, nodeClient) + ); + assertTrue(ex.getMessage().contains("unrecognized param")); + } + } +} diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 4b8eec70f2c1a..ff4005d9bcedf 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -172,7 +172,7 @@ import org.opensearch.index.seqno.GlobalCheckpointSyncAction; import org.opensearch.index.seqno.RetentionLeaseSyncer; import org.opensearch.index.shard.PrimaryReplicaSyncer; -import org.opensearch.index.store.RemoteDirectoryFactory; +import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.indices.IndicesModule; import org.opensearch.indices.IndicesService; import org.opensearch.indices.ShardLimitValidator; @@ -185,6 +185,7 @@ import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.replication.SegmentReplicationSourceFactory; +import org.opensearch.indices.replication.SegmentReplicationSourceService; import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.ingest.IngestService; @@ -1826,7 +1827,7 @@ public void onFailure(final Exception e) { emptyMap(), null, emptyMap(), - new RemoteDirectoryFactory(() -> repositoriesService) + new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService) ); final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); snapshotShardsService = new SnapshotShardsService( @@ -1857,6 +1858,7 @@ public void onFailure(final Exception e) { transportService, new SegmentReplicationSourceFactory(transportService, recoverySettings, clusterService) ), + SegmentReplicationSourceService.NO_OP, shardStateAction, new NodeMappingRefreshAction(transportService, metadataMappingService), repositoriesService, diff --git a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java index 174747d306ff5..af754d77560cc 100644 --- a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java @@ -1070,6 +1070,22 @@ public List generateHistoryOnReplica( boolean allowGapInSeqNo, boolean allowDuplicate, boolean includeNestedDocs + ) throws Exception { + return generateHistoryOnReplica( + numOps, + allowGapInSeqNo, + allowDuplicate, + includeNestedDocs, + randomFrom(Engine.Operation.TYPE.values()) + ); + } + + public List generateHistoryOnReplica( + int numOps, + boolean allowGapInSeqNo, + boolean allowDuplicate, + boolean includeNestedDocs, + Engine.Operation.TYPE opType ) throws Exception { long seqNo = 0; final int maxIdValue = randomInt(numOps * 2); @@ -1077,7 +1093,6 @@ public List generateHistoryOnReplica( CheckedBiFunction nestedParsedDocFactory = nestedParsedDocFactory(); for (int i = 0; i < numOps; i++) { final String id = Integer.toString(randomInt(maxIdValue)); - final Engine.Operation.TYPE opType = randomFrom(Engine.Operation.TYPE.values()); final boolean isNestedDoc = includeNestedDocs && opType == Engine.Operation.TYPE.INDEX && randomBoolean(); final int nestedValues = between(0, 3); final long startTime = threadPool.relativeTimeInNanos(); diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index f446538acccbb..1b40cb4f2dfa3 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -59,12 +59,15 @@ import org.opensearch.common.CheckedFunction; import org.opensearch.common.Nullable; import org.opensearch.common.UUIDs; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.fs.FsBlobContainer; +import org.opensearch.common.blobstore.fs.FsBlobStore; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.internal.io.IOUtils; @@ -88,6 +91,8 @@ import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.similarity.SimilarityService; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; +import org.opensearch.index.store.RemoteDirectory; +import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.translog.InternalTranslogFactory; @@ -106,7 +111,10 @@ import org.opensearch.indices.replication.CheckpointInfoResponse; import org.opensearch.indices.replication.GetSegmentFilesResponse; import org.opensearch.indices.replication.SegmentReplicationSource; +import org.opensearch.indices.replication.SegmentReplicationSourceFactory; +import org.opensearch.indices.replication.SegmentReplicationState; import org.opensearch.indices.replication.SegmentReplicationTarget; +import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.CopyState; @@ -121,8 +129,11 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.ArrayList; +import java.nio.file.Path; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; @@ -139,7 +150,9 @@ import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import static org.opensearch.cluster.routing.TestShardRouting.newShardRouting; /** @@ -532,7 +545,10 @@ protected IndexShard newShard( ShardId shardId = shardPath.getShardId(); NodeEnvironment.NodePath remoteNodePath = new NodeEnvironment.NodePath(createTempDir()); ShardPath remoteShardPath = new ShardPath(false, remoteNodePath.resolve(shardId), remoteNodePath.resolve(shardId), shardId); - storeProvider = is -> createStore(is, remoteShardPath); + RemoteDirectory dataDirectory = newRemoteDirectory(remoteShardPath.resolveIndex()); + RemoteDirectory metadataDirectory = newRemoteDirectory(remoteShardPath.resolveIndex()); + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory); + storeProvider = is -> createStore(shardId, is, remoteSegmentStoreDirectory); remoteStore = storeProvider.apply(indexSettings); } indexShard = new IndexShard( @@ -570,6 +586,13 @@ protected IndexShard newShard( return indexShard; } + private RemoteDirectory newRemoteDirectory(Path f) throws IOException { + FsBlobStore fsBlobStore = new FsBlobStore(1024, f, false); + BlobPath blobPath = new BlobPath(); + BlobContainer fsBlobContainer = new FsBlobContainer(fsBlobStore, blobPath, f); + return new RemoteDirectory(fsBlobContainer); + } + /** * Takes an existing shard, closes it and starts a new initialing shard at the same location * @@ -1154,35 +1177,40 @@ public static Engine.Warmer createTestWarmer(IndexSettings indexSettings) { } /** - * Segment Replication specific test method - Replicate segments to a list of replicas from a given primary. - * This test will use a real {@link SegmentReplicationTarget} for each replica with a mock {@link SegmentReplicationSource} that - * writes all segments directly to the target. + * Segment Replication specific test method - Creates a {@link SegmentReplicationTargetService} to perform replications that has + * been configured to return the given primaryShard's current segments. + * + * @param primaryShard {@link IndexShard} - The primary shard to replicate from. */ - public final void replicateSegments(IndexShard primaryShard, List replicaShards) throws IOException, InterruptedException { - final CountDownLatch countDownLatch = new CountDownLatch(replicaShards.size()); - Store.MetadataSnapshot primaryMetadata; - try (final GatedCloseable segmentInfosSnapshot = primaryShard.getSegmentInfosSnapshot()) { - final SegmentInfos primarySegmentInfos = segmentInfosSnapshot.get(); - primaryMetadata = primaryShard.store().getMetadata(primarySegmentInfos); - } - final CopyState copyState = new CopyState(ReplicationCheckpoint.empty(primaryShard.shardId), primaryShard); - - final ReplicationCollection replicationCollection = new ReplicationCollection<>(logger, threadPool); - final SegmentReplicationSource source = new SegmentReplicationSource() { + public final SegmentReplicationTargetService prepareForReplication(IndexShard primaryShard) { + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = new SegmentReplicationTargetService( + threadPool, + new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), + mock(TransportService.class), + sourceFactory + ); + final SegmentReplicationSource replicationSource = new SegmentReplicationSource() { @Override public void getCheckpointMetadata( long replicationId, ReplicationCheckpoint checkpoint, ActionListener listener ) { - listener.onResponse( - new CheckpointInfoResponse( - copyState.getCheckpoint(), - copyState.getMetadataSnapshot(), - copyState.getInfosBytes(), - copyState.getPendingDeleteFiles() - ) - ); + try { + final CopyState copyState = new CopyState(ReplicationCheckpoint.empty(primaryShard.shardId), primaryShard); + listener.onResponse( + new CheckpointInfoResponse( + copyState.getCheckpoint(), + copyState.getMetadataSnapshot(), + copyState.getInfosBytes(), + copyState.getPendingDeleteFiles() + ) + ); + } catch (IOException e) { + logger.error("Unexpected error computing CopyState", e); + Assert.fail("Failed to compute copyState"); + } } @Override @@ -1194,9 +1222,7 @@ public void getSegmentFiles( ActionListener listener ) { try ( - final ReplicationCollection.ReplicationRef replicationRef = replicationCollection.get( - replicationId - ) + final ReplicationCollection.ReplicationRef replicationRef = targetService.get(replicationId) ) { writeFileChunks(replicationRef.get(), primaryShard, filesToFetch.toArray(new StoreFileMetadata[] {})); } catch (IOException e) { @@ -1205,15 +1231,43 @@ public void getSegmentFiles( listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); } }; + when(sourceFactory.get(any())).thenReturn(replicationSource); + return targetService; + } + + /** + * Segment Replication specific test method - Replicate segments to a list of replicas from a given primary. + * This test will use a real {@link SegmentReplicationTarget} for each replica with a mock {@link SegmentReplicationSource} that + * writes all segments directly to the target. + * @param primaryShard - {@link IndexShard} The current primary shard. + * @param replicaShards - Replicas that will be updated. + * @return {@link List} List of target components orchestrating replication. + */ + public final List replicateSegments(IndexShard primaryShard, List replicaShards) + throws IOException, InterruptedException { + final SegmentReplicationTargetService targetService = prepareForReplication(primaryShard); + return replicateSegments(targetService, primaryShard, replicaShards); + } + public final List replicateSegments( + SegmentReplicationTargetService targetService, + IndexShard primaryShard, + List replicaShards + ) throws IOException, InterruptedException { + final CountDownLatch countDownLatch = new CountDownLatch(replicaShards.size()); + Store.MetadataSnapshot primaryMetadata; + try (final GatedCloseable segmentInfosSnapshot = primaryShard.getSegmentInfosSnapshot()) { + final SegmentInfos primarySegmentInfos = segmentInfosSnapshot.get(); + primaryMetadata = primaryShard.store().getMetadata(primarySegmentInfos); + } + List ids = new ArrayList<>(); for (IndexShard replica : replicaShards) { - final SegmentReplicationTarget target = new SegmentReplicationTarget( + final SegmentReplicationTarget target = targetService.startReplication( ReplicationCheckpoint.empty(replica.shardId), replica, - source, - new ReplicationListener() { + new SegmentReplicationTargetService.SegmentReplicationListener() { @Override - public void onDone(ReplicationState state) { + public void onReplicationDone(SegmentReplicationState state) { try (final GatedCloseable snapshot = replica.getSegmentInfosSnapshot()) { final SegmentInfos replicaInfos = snapshot.get(); final Store.MetadataSnapshot replicaMetadata = replica.store().getMetadata(replicaInfos); @@ -1224,31 +1278,22 @@ public void onDone(ReplicationState state) { assertEquals(primaryMetadata.getCommitUserData(), replicaMetadata.getCommitUserData()); } catch (Exception e) { throw ExceptionsHelper.convertToRuntime(e); + } finally { + countDownLatch.countDown(); } - countDownLatch.countDown(); } @Override - public void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { + public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { logger.error("Unexpected replication failure in test", e); Assert.fail("test replication should not fail: " + e); } } ); - replicationCollection.start(target, TimeValue.timeValueMillis(5000)); - target.startReplication(new ActionListener<>() { - @Override - public void onResponse(Void o) { - replicationCollection.markAsDone(target.getId()); - } - - @Override - public void onFailure(Exception e) { - replicationCollection.fail(target.getId(), new OpenSearchException("Segment Replication failed", e), true); - } - }); + ids.add(target); + countDownLatch.await(1, TimeUnit.SECONDS); } - countDownLatch.await(3, TimeUnit.SECONDS); + return ids; } private void writeFileChunks(SegmentReplicationTarget target, IndexShard primary, StoreFileMetadata[] files) throws IOException {