From db7f4b08480f2a8941b22a5fa958cf5b9fff32b9 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Wed, 23 Mar 2022 07:26:18 -0500 Subject: [PATCH 001/653] [Upgrade] Lucene 9.1 release (#2560) Upgrades to the official 9.1 release Signed-off-by: Nicholas Walter Knize --- buildSrc/version.properties | 2 +- .../lucene-expressions-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 - .../lang-expression/licenses/lucene-expressions-9.1.0.jar.sha1 | 1 + .../lucene-analysis-icu-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 - .../analysis-icu/licenses/lucene-analysis-icu-9.1.0.jar.sha1 | 1 + ...lucene-analysis-kuromoji-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 - .../licenses/lucene-analysis-kuromoji-9.1.0.jar.sha1 | 1 + .../lucene-analysis-nori-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 - .../analysis-nori/licenses/lucene-analysis-nori-9.1.0.jar.sha1 | 1 + ...lucene-analysis-phonetic-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 - .../licenses/lucene-analysis-phonetic-9.1.0.jar.sha1 | 1 + .../lucene-analysis-smartcn-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 - .../licenses/lucene-analysis-smartcn-9.1.0.jar.sha1 | 1 + .../lucene-analysis-stempel-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 - .../licenses/lucene-analysis-stempel-9.1.0.jar.sha1 | 1 + ...cene-analysis-morfologik-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 - .../licenses/lucene-analysis-morfologik-9.1.0.jar.sha1 | 1 + .../lucene-analysis-common-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 - server/licenses/lucene-analysis-common-9.1.0.jar.sha1 | 1 + .../lucene-backward-codecs-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 - server/licenses/lucene-backward-codecs-9.1.0.jar.sha1 | 1 + server/licenses/lucene-core-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 - server/licenses/lucene-core-9.1.0.jar.sha1 | 1 + .../lucene-grouping-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 - server/licenses/lucene-grouping-9.1.0.jar.sha1 | 1 + .../lucene-highlighter-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 - server/licenses/lucene-highlighter-9.1.0.jar.sha1 | 1 + server/licenses/lucene-join-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 - server/licenses/lucene-join-9.1.0.jar.sha1 | 1 + .../licenses/lucene-memory-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 - server/licenses/lucene-memory-9.1.0.jar.sha1 | 1 + server/licenses/lucene-misc-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 - server/licenses/lucene-misc-9.1.0.jar.sha1 | 1 + .../licenses/lucene-queries-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 - server/licenses/lucene-queries-9.1.0.jar.sha1 | 1 + .../lucene-queryparser-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 - server/licenses/lucene-queryparser-9.1.0.jar.sha1 | 1 + .../licenses/lucene-sandbox-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 - server/licenses/lucene-sandbox-9.1.0.jar.sha1 | 1 + .../lucene-spatial-extras-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 - server/licenses/lucene-spatial-extras-9.1.0.jar.sha1 | 1 + .../lucene-spatial3d-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 - server/licenses/lucene-spatial3d-9.1.0.jar.sha1 | 1 + .../licenses/lucene-suggest-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 - server/licenses/lucene-suggest-9.1.0.jar.sha1 | 1 + 45 files changed, 23 insertions(+), 23 deletions(-) delete mode 100644 modules/lang-expression/licenses/lucene-expressions-9.1.0-snapshot-ea989fe8f30.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-9.1.0.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.1.0-snapshot-ea989fe8f30.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.1.0.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.1.0-snapshot-ea989fe8f30.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.1.0.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.1.0-snapshot-ea989fe8f30.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.1.0.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.1.0-snapshot-ea989fe8f30.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.1.0.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.1.0-snapshot-ea989fe8f30.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.1.0.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.1.0-snapshot-ea989fe8f30.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.1.0.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.1.0-snapshot-ea989fe8f30.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-analysis-common-9.1.0-snapshot-ea989fe8f30.jar.sha1 create mode 100644 server/licenses/lucene-analysis-common-9.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-9.1.0-snapshot-ea989fe8f30.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-9.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-core-9.1.0-snapshot-ea989fe8f30.jar.sha1 create mode 100644 server/licenses/lucene-core-9.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-9.1.0-snapshot-ea989fe8f30.jar.sha1 create mode 100644 server/licenses/lucene-grouping-9.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-9.1.0-snapshot-ea989fe8f30.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-9.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-join-9.1.0-snapshot-ea989fe8f30.jar.sha1 create mode 100644 server/licenses/lucene-join-9.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-memory-9.1.0-snapshot-ea989fe8f30.jar.sha1 create mode 100644 server/licenses/lucene-memory-9.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-misc-9.1.0-snapshot-ea989fe8f30.jar.sha1 create mode 100644 server/licenses/lucene-misc-9.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-queries-9.1.0-snapshot-ea989fe8f30.jar.sha1 create mode 100644 server/licenses/lucene-queries-9.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-9.1.0-snapshot-ea989fe8f30.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-9.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-9.1.0-snapshot-ea989fe8f30.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-9.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-9.1.0-snapshot-ea989fe8f30.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-9.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-9.1.0-snapshot-ea989fe8f30.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-9.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-9.1.0-snapshot-ea989fe8f30.jar.sha1 create mode 100644 server/licenses/lucene-suggest-9.1.0.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index b5e14cd24bd93..34934d63a8975 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ opensearch = 2.0.0 -lucene = 9.1.0-snapshot-ea989fe8f30 +lucene = 9.1.0 bundled_jdk_vendor = adoptium bundled_jdk = 17.0.2+8 diff --git a/modules/lang-expression/licenses/lucene-expressions-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.1.0-snapshot-ea989fe8f30.jar.sha1 deleted file mode 100644 index fb85ff4827c36..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.1.0-snapshot-ea989fe8f30.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c7317bb4e72b820a516e0c8a90beac5acc82c2e2 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.1.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.1.0.jar.sha1 new file mode 100644 index 0000000000000..c825e197188fc --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.1.0.jar.sha1 @@ -0,0 +1 @@ +2711abb758d101fc738c35a6867ee7559da5308b \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.1.0-snapshot-ea989fe8f30.jar.sha1 deleted file mode 100644 index 2f0a6ad50e337..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.1.0-snapshot-ea989fe8f30.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -77930f802430648980eded22ca6ed47fedaeaba4 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.1.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.1.0.jar.sha1 new file mode 100644 index 0000000000000..b7733cfa9a00a --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.1.0.jar.sha1 @@ -0,0 +1 @@ +e9b429da553560fa0c363ffc04c774f957c56e14 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.1.0-snapshot-ea989fe8f30.jar.sha1 deleted file mode 100644 index a0d112dd733ab..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.1.0-snapshot-ea989fe8f30.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c66f568fa9138c6ab6f3abf1efbfab3c7b5991d4 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.1.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.1.0.jar.sha1 new file mode 100644 index 0000000000000..f5b818a206e7a --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.1.0.jar.sha1 @@ -0,0 +1 @@ +b247f8a877237b4663e4ab7d86fae21c68a58ea5 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.1.0-snapshot-ea989fe8f30.jar.sha1 deleted file mode 100644 index a3f939bfe9e05..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.1.0-snapshot-ea989fe8f30.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e8c47600ea859b999a5f5647341b0350b03dafcd \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.1.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.1.0.jar.sha1 new file mode 100644 index 0000000000000..4d22255d10316 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.1.0.jar.sha1 @@ -0,0 +1 @@ +30e24b42fb0440911e702a531f4373bf397eb8c6 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.1.0-snapshot-ea989fe8f30.jar.sha1 deleted file mode 100644 index e2006546433fd..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.1.0-snapshot-ea989fe8f30.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6f0f5c71052beee26e4ce99e1147ce406234f417 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.1.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.1.0.jar.sha1 new file mode 100644 index 0000000000000..a0607e6158cdd --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.1.0.jar.sha1 @@ -0,0 +1 @@ +18a321d93836ea2856a5302d192e9dc99c647c6e \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.1.0-snapshot-ea989fe8f30.jar.sha1 deleted file mode 100644 index e675c5774f5a4..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.1.0-snapshot-ea989fe8f30.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -32aad8b8491df3c9862e7fe75e98bccdb6a25bda \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.1.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.1.0.jar.sha1 new file mode 100644 index 0000000000000..bff959139a86c --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.1.0.jar.sha1 @@ -0,0 +1 @@ +41c847f39a15bb8495be8c9d8a098974be15f74b \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.1.0-snapshot-ea989fe8f30.jar.sha1 deleted file mode 100644 index 053f5c97d65dc..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.1.0-snapshot-ea989fe8f30.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ef546cfaaf727d93c4e86ddc7f77b525af135623 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.1.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.1.0.jar.sha1 new file mode 100644 index 0000000000000..39d25d7872ea9 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.1.0.jar.sha1 @@ -0,0 +1 @@ +ee7995231b181aa0a01f5aef8775562e269f5ef7 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.1.0-snapshot-ea989fe8f30.jar.sha1 deleted file mode 100644 index e5a2a0b0b4ab3..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.1.0-snapshot-ea989fe8f30.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21c3511469f67019804e41a8d83ffc5c36de6479 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.1.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.1.0.jar.sha1 new file mode 100644 index 0000000000000..9f07f122205d9 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.1.0.jar.sha1 @@ -0,0 +1 @@ +575c458431396baa7f01a546173807f27b12a087 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-analysis-common-9.1.0-snapshot-ea989fe8f30.jar.sha1 deleted file mode 100644 index 6ef0f1eafc345..0000000000000 --- a/server/licenses/lucene-analysis-common-9.1.0-snapshot-ea989fe8f30.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bafd720282a371efe7b0e7238f9dee7e2ad3a586 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.1.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.1.0.jar.sha1 new file mode 100644 index 0000000000000..4d2a9cf9451cc --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.1.0.jar.sha1 @@ -0,0 +1 @@ +240e3997fb139ff001e022124c89b686b5a8498d \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-backward-codecs-9.1.0-snapshot-ea989fe8f30.jar.sha1 deleted file mode 100644 index 017333945a866..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.1.0-snapshot-ea989fe8f30.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -597fe288a252a14c0876451c97afee2b4529f85a \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.1.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.1.0.jar.sha1 new file mode 100644 index 0000000000000..b6df56db28cd6 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.1.0.jar.sha1 @@ -0,0 +1 @@ +de23bdacb09e8b39cbe876ff79c7a5b2ecc1faa6 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-core-9.1.0-snapshot-ea989fe8f30.jar.sha1 deleted file mode 100644 index a2ba0f0ffa43c..0000000000000 --- a/server/licenses/lucene-core-9.1.0-snapshot-ea989fe8f30.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -80cd2fff33ced89924771c7079d42bf82f1266f6 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.1.0.jar.sha1 b/server/licenses/lucene-core-9.1.0.jar.sha1 new file mode 100644 index 0000000000000..45e7ae47dae3e --- /dev/null +++ b/server/licenses/lucene-core-9.1.0.jar.sha1 @@ -0,0 +1 @@ +0375603f1dacd8266526404faf0088a2ac8ec2ff \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-grouping-9.1.0-snapshot-ea989fe8f30.jar.sha1 deleted file mode 100644 index ac0c1be0f952b..0000000000000 --- a/server/licenses/lucene-grouping-9.1.0-snapshot-ea989fe8f30.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7059f47668a2942c60ad03b1d58eca8dcb010e4e \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.1.0.jar.sha1 b/server/licenses/lucene-grouping-9.1.0.jar.sha1 new file mode 100644 index 0000000000000..be423fdde04f7 --- /dev/null +++ b/server/licenses/lucene-grouping-9.1.0.jar.sha1 @@ -0,0 +1 @@ +703308505e62fa7dcb0bf64fdb6d95d335941bdc \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-highlighter-9.1.0-snapshot-ea989fe8f30.jar.sha1 deleted file mode 100644 index fa08ed63f7c44..0000000000000 --- a/server/licenses/lucene-highlighter-9.1.0-snapshot-ea989fe8f30.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3c841ca23eb08a939fa49ba4af249c3b6d849c42 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.1.0.jar.sha1 b/server/licenses/lucene-highlighter-9.1.0.jar.sha1 new file mode 100644 index 0000000000000..c130c27ed4c37 --- /dev/null +++ b/server/licenses/lucene-highlighter-9.1.0.jar.sha1 @@ -0,0 +1 @@ +7f1925f6ef985000399a277ca17b8f67d3056838 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-join-9.1.0-snapshot-ea989fe8f30.jar.sha1 deleted file mode 100644 index 2a3e2a9107a60..0000000000000 --- a/server/licenses/lucene-join-9.1.0-snapshot-ea989fe8f30.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4984e041ae68f5939c01e41b2c9648ae2c021340 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.1.0.jar.sha1 b/server/licenses/lucene-join-9.1.0.jar.sha1 new file mode 100644 index 0000000000000..b678051ddaf26 --- /dev/null +++ b/server/licenses/lucene-join-9.1.0.jar.sha1 @@ -0,0 +1 @@ +e7d39da8e623c99ee8da8bcc0185b2d908aca4b3 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-memory-9.1.0-snapshot-ea989fe8f30.jar.sha1 deleted file mode 100644 index eefd08d222ef8..0000000000000 --- a/server/licenses/lucene-memory-9.1.0-snapshot-ea989fe8f30.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fead9467ce65469579168eb0f47e014fdb3c63d9 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.1.0.jar.sha1 b/server/licenses/lucene-memory-9.1.0.jar.sha1 new file mode 100644 index 0000000000000..a07b052e9c332 --- /dev/null +++ b/server/licenses/lucene-memory-9.1.0.jar.sha1 @@ -0,0 +1 @@ +209166fd48dae3261ccf26990fe600332b8fb373 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-misc-9.1.0-snapshot-ea989fe8f30.jar.sha1 deleted file mode 100644 index 226f97cf6f3bc..0000000000000 --- a/server/licenses/lucene-misc-9.1.0-snapshot-ea989fe8f30.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d98ab1966b8ca53b70fe071281bcea27d602ec30 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.1.0.jar.sha1 b/server/licenses/lucene-misc-9.1.0.jar.sha1 new file mode 100644 index 0000000000000..8627e481c6214 --- /dev/null +++ b/server/licenses/lucene-misc-9.1.0.jar.sha1 @@ -0,0 +1 @@ +905d93b6389060cf4b0cb464ffa8fa2db81b60e7 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-queries-9.1.0-snapshot-ea989fe8f30.jar.sha1 deleted file mode 100644 index c151e6b76e21a..0000000000000 --- a/server/licenses/lucene-queries-9.1.0-snapshot-ea989fe8f30.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -895e27127ae55031e35e152da8be941bd55f7f6a \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.1.0.jar.sha1 b/server/licenses/lucene-queries-9.1.0.jar.sha1 new file mode 100644 index 0000000000000..9e81da7ca5c15 --- /dev/null +++ b/server/licenses/lucene-queries-9.1.0.jar.sha1 @@ -0,0 +1 @@ +c50fc971573910ea239ee6f275e9257b6b6bdd48 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-queryparser-9.1.0-snapshot-ea989fe8f30.jar.sha1 deleted file mode 100644 index b73b7152aed05..0000000000000 --- a/server/licenses/lucene-queryparser-9.1.0-snapshot-ea989fe8f30.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1433392237ea01ef35f4e2ffc52f496b0669624c \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.1.0.jar.sha1 b/server/licenses/lucene-queryparser-9.1.0.jar.sha1 new file mode 100644 index 0000000000000..fb04adf2051d0 --- /dev/null +++ b/server/licenses/lucene-queryparser-9.1.0.jar.sha1 @@ -0,0 +1 @@ +383eb69b12f9d9c98c44237155f50c870c9a34b9 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-sandbox-9.1.0-snapshot-ea989fe8f30.jar.sha1 deleted file mode 100644 index d441dd2f8cb31..0000000000000 --- a/server/licenses/lucene-sandbox-9.1.0-snapshot-ea989fe8f30.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b0688963ca8288f5a3e47ca6e4b38bc2fde780e7 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.1.0.jar.sha1 b/server/licenses/lucene-sandbox-9.1.0.jar.sha1 new file mode 100644 index 0000000000000..429a84de46f3c --- /dev/null +++ b/server/licenses/lucene-sandbox-9.1.0.jar.sha1 @@ -0,0 +1 @@ +0c728684e750a63f881998fbe27afd897f739762 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-spatial-extras-9.1.0-snapshot-ea989fe8f30.jar.sha1 deleted file mode 100644 index 5ffa78a6e7d87..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.1.0-snapshot-ea989fe8f30.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -206e8918a726710c8a6fb927e59adf26c6ad5bed \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.1.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.1.0.jar.sha1 new file mode 100644 index 0000000000000..7078cbc05fff7 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.1.0.jar.sha1 @@ -0,0 +1 @@ +94d7d107c399cd11d407b94fa62f5677fe86f63b \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-spatial3d-9.1.0-snapshot-ea989fe8f30.jar.sha1 deleted file mode 100644 index 8c4bb08303c34..0000000000000 --- a/server/licenses/lucene-spatial3d-9.1.0-snapshot-ea989fe8f30.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3d1e26c37b45bdf2ef598d16468220ab33983a8f \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.1.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.1.0.jar.sha1 new file mode 100644 index 0000000000000..604e8ed054ac1 --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.1.0.jar.sha1 @@ -0,0 +1 @@ +7717b300bc14dfa9eb4b7d5970d8e25a60010e64 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-suggest-9.1.0-snapshot-ea989fe8f30.jar.sha1 deleted file mode 100644 index 3c8d9b87da0e5..0000000000000 --- a/server/licenses/lucene-suggest-9.1.0-snapshot-ea989fe8f30.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -69ab05339614766c732fef7c037cc5b676bd40dc \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.1.0.jar.sha1 b/server/licenses/lucene-suggest-9.1.0.jar.sha1 new file mode 100644 index 0000000000000..4562a19706634 --- /dev/null +++ b/server/licenses/lucene-suggest-9.1.0.jar.sha1 @@ -0,0 +1 @@ +957fca507eba94dbc3ef0d02377839be49bbe619 \ No newline at end of file From c1d5491baf02b5ea0223d3075a5e1fc288d54bcf Mon Sep 17 00:00:00 2001 From: Suraj Singh <79435743+dreamer-89@users.noreply.github.com> Date: Wed, 23 Mar 2022 06:15:17 -0700 Subject: [PATCH 002/653] [Type removal] Remove deprecation warning on use of _type in doc scripts (#2564) Signed-off-by: Suraj Singh --- .../java/org/opensearch/search/lookup/LeafDocLookup.java | 9 --------- 1 file changed, 9 deletions(-) diff --git a/server/src/main/java/org/opensearch/search/lookup/LeafDocLookup.java b/server/src/main/java/org/opensearch/search/lookup/LeafDocLookup.java index 82daa94d92146..716476101ac48 100644 --- a/server/src/main/java/org/opensearch/search/lookup/LeafDocLookup.java +++ b/server/src/main/java/org/opensearch/search/lookup/LeafDocLookup.java @@ -33,7 +33,6 @@ import org.apache.lucene.index.LeafReaderContext; import org.opensearch.ExceptionsHelper; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.mapper.MappedFieldType; @@ -50,10 +49,6 @@ public class LeafDocLookup implements Map> { - private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(LeafDocLookup.class); - static final String TYPES_DEPRECATION_KEY = "type-field-doc-lookup"; - static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Looking up doc types [_type] in scripts is deprecated."; - private final Map> localCacheFieldData = new HashMap<>(4); private final MapperService mapperService; @@ -78,10 +73,6 @@ public void setDocument(int docId) { @Override public ScriptDocValues get(Object key) { - // deprecate _type - if ("_type".equals(key)) { - DEPRECATION_LOGGER.deprecate(TYPES_DEPRECATION_KEY, TYPES_DEPRECATION_MESSAGE); - } // assume its a string... String fieldName = key.toString(); ScriptDocValues scriptValues = localCacheFieldData.get(fieldName); From 511ac884fe49e6d97738d42a67ffa1819396ca2a Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Wed, 23 Mar 2022 13:26:36 -0500 Subject: [PATCH 003/653] [Bug] Fix InboundDecoder version compat check (#2570) Change InboundDecoder ensureVersionCompatibility check for onOrAfter V_2_0_0 instead of explicit version check. This way bug fix and minor versions will correctly handshake in a mixed 1.x Cluster. Signed-off-by: Nicholas Walter Knize --- .../src/main/java/org/opensearch/transport/InboundDecoder.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/transport/InboundDecoder.java b/server/src/main/java/org/opensearch/transport/InboundDecoder.java index bd1d384fd37da..9cfb4a79161e7 100644 --- a/server/src/main/java/org/opensearch/transport/InboundDecoder.java +++ b/server/src/main/java/org/opensearch/transport/InboundDecoder.java @@ -217,7 +217,7 @@ static IllegalStateException ensureVersionCompatibility(Version remoteVersion, V // handshake. This looks odd but it's required to establish the connection correctly we check for real compatibility // once the connection is established final Version compatibilityVersion = isHandshake ? currentVersion.minimumCompatibilityVersion() : currentVersion; - if ((currentVersion.equals(Version.V_2_0_0) && remoteVersion.equals(Version.fromId(6079999))) == false + if ((currentVersion.onOrAfter(Version.V_2_0_0) && remoteVersion.equals(Version.fromId(6079999))) == false && remoteVersion.isCompatible(compatibilityVersion) == false) { final Version minCompatibilityVersion = isHandshake ? compatibilityVersion : compatibilityVersion.minimumCompatibilityVersion(); String msg = "Received " + (isHandshake ? "handshake " : "") + "message from unsupported version: ["; From 2e9f89a89efb2b44adc783d378e433bce79273c0 Mon Sep 17 00:00:00 2001 From: Vacha Shah Date: Wed, 23 Mar 2022 12:56:11 -0700 Subject: [PATCH 004/653] Adding signoff option for version workflow PR (#2572) Signed-off-by: Vacha Shah --- .github/workflows/version.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml index b42e7c4f2f317..030689642677a 100644 --- a/.github/workflows/version.yml +++ b/.github/workflows/version.yml @@ -59,6 +59,7 @@ jobs: base: ${{ env.BASE }} branch: 'create-pull-request/patch-${{ env.BASE }}' commit-message: Incremented version to ${{ env.NEXT_VERSION }} + signoff: true delete-branch: true title: '[AUTO] Incremented version to ${{ env.NEXT_VERSION }}.' body: | @@ -83,6 +84,7 @@ jobs: base: ${{ env.BASE_X }} branch: 'create-pull-request/patch-${{ env.BASE_X }}' commit-message: Added bwc version ${{ env.NEXT_VERSION }} + signoff: true delete-branch: true title: '[AUTO] [${{ env.BASE_X }}] Added bwc version ${{ env.NEXT_VERSION }}.' body: | @@ -107,6 +109,7 @@ jobs: base: main branch: 'create-pull-request/patch-main' commit-message: Added bwc version ${{ env.NEXT_VERSION }} + signoff: true delete-branch: true title: '[AUTO] [main] Added bwc version ${{ env.NEXT_VERSION }}.' body: | From b6ca0d1f78e765f509d6b52af8f488548fdddf94 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 24 Mar 2022 14:20:31 -0400 Subject: [PATCH 005/653] Concurrent Searching (Experimental) (#1500) * Concurrent Searching (Experimental) Signed-off-by: Andriy Redko * Addressingf code review comments Signed-off-by: Andriy Redko --- .../plugins/concurrent-search/build.gradle | 42 + .../search/ConcurrentSegmentSearchPlugin.java | 53 + .../org/opensearch/search/package-info.java | 12 + .../query/ConcurrentQueryPhaseSearcher.java | 119 ++ .../opensearch/search/query/package-info.java | 12 + .../profile/query/QueryProfilerTests.java | 316 ++++ .../search/query/QueryPhaseTests.java | 1335 +++++++++++++++++ .../search/query/QueryProfilePhaseTests.java | 1182 +++++++++++++++ .../common/lucene/MinimumScoreCollector.java | 4 + .../lucene/search/FilteredCollector.java | 4 + .../search/DefaultSearchContext.java | 8 +- .../search/aggregations/AggregationPhase.java | 42 +- .../search/internal/ContextIndexSearcher.java | 33 +- .../internal/FilteredSearchContext.java | 6 +- .../search/internal/SearchContext.java | 6 +- .../opensearch/search/profile/Profilers.java | 2 +- .../InternalProfileCollectorManager.java | 89 ++ .../query/ProfileCollectorManager.java | 17 + .../query/EarlyTerminatingCollector.java | 4 + .../EarlyTerminatingCollectorManager.java | 74 + .../query/EarlyTerminatingListener.java | 22 + .../query/FilteredCollectorManager.java | 45 + .../search/query/MinimumCollectorManager.java | 44 + .../search/query/MultiCollectorWrapper.java | 58 + .../search/query/QueryCollectorContext.java | 71 +- .../query/QueryCollectorManagerContext.java | 99 ++ .../opensearch/search/query/QueryPhase.java | 4 +- .../search/query/ReduceableSearchResult.java | 23 + .../search/query/TopDocsCollectorContext.java | 338 ++++- .../query/TotalHitCountCollectorManager.java | 106 ++ .../search/DefaultSearchContextTests.java | 38 +- .../search/SearchCancellationTests.java | 9 +- .../internal/ContextIndexSearcherTests.java | 3 +- .../profile/query/QueryProfilerTests.java | 32 +- .../search/query/QueryPhaseTests.java | 285 +++- .../search/query/QueryProfilePhaseTests.java | 1158 ++++++++++++++ .../aggregations/AggregatorTestCase.java | 3 +- .../opensearch/test/TestSearchContext.java | 36 +- 38 files changed, 5563 insertions(+), 171 deletions(-) create mode 100644 sandbox/plugins/concurrent-search/build.gradle create mode 100644 sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/ConcurrentSegmentSearchPlugin.java create mode 100644 sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/package-info.java create mode 100644 sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/query/ConcurrentQueryPhaseSearcher.java create mode 100644 sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/query/package-info.java create mode 100644 sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java create mode 100644 sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryPhaseTests.java create mode 100644 sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java create mode 100644 server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollectorManager.java create mode 100644 server/src/main/java/org/opensearch/search/profile/query/ProfileCollectorManager.java create mode 100644 server/src/main/java/org/opensearch/search/query/EarlyTerminatingCollectorManager.java create mode 100644 server/src/main/java/org/opensearch/search/query/EarlyTerminatingListener.java create mode 100644 server/src/main/java/org/opensearch/search/query/FilteredCollectorManager.java create mode 100644 server/src/main/java/org/opensearch/search/query/MinimumCollectorManager.java create mode 100644 server/src/main/java/org/opensearch/search/query/MultiCollectorWrapper.java create mode 100644 server/src/main/java/org/opensearch/search/query/QueryCollectorManagerContext.java create mode 100644 server/src/main/java/org/opensearch/search/query/ReduceableSearchResult.java create mode 100644 server/src/main/java/org/opensearch/search/query/TotalHitCountCollectorManager.java create mode 100644 server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java diff --git a/sandbox/plugins/concurrent-search/build.gradle b/sandbox/plugins/concurrent-search/build.gradle new file mode 100644 index 0000000000000..acc3cb5092cd8 --- /dev/null +++ b/sandbox/plugins/concurrent-search/build.gradle @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +apply plugin: 'opensearch.opensearchplugin' +apply plugin: 'opensearch.yaml-rest-test' + +opensearchplugin { + name 'concurrent-search' + description 'The experimental plugin which implements concurrent search over Apache Lucene segments' + classname 'org.opensearch.search.ConcurrentSegmentSearchPlugin' + licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile rootProject.file('NOTICE.txt') +} + +yamlRestTest.enabled = false; +testingConventions.enabled = false; \ No newline at end of file diff --git a/sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/ConcurrentSegmentSearchPlugin.java b/sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/ConcurrentSegmentSearchPlugin.java new file mode 100644 index 0000000000000..da999e40f0f07 --- /dev/null +++ b/sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/ConcurrentSegmentSearchPlugin.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.SearchPlugin; +import org.opensearch.search.query.ConcurrentQueryPhaseSearcher; +import org.opensearch.search.query.QueryPhaseSearcher; +import org.opensearch.threadpool.ExecutorBuilder; +import org.opensearch.threadpool.FixedExecutorBuilder; +import org.opensearch.threadpool.ThreadPool; + +import java.util.Collections; +import java.util.List; +import java.util.Optional; + +/** + * The experimental plugin which implements the concurrent search over Apache Lucene segments. + */ +public class ConcurrentSegmentSearchPlugin extends Plugin implements SearchPlugin { + private static final String INDEX_SEARCHER = "index_searcher"; + + /** + * Default constructor + */ + public ConcurrentSegmentSearchPlugin() {} + + @Override + public Optional getQueryPhaseSearcher() { + return Optional.of(new ConcurrentQueryPhaseSearcher()); + } + + @Override + public List> getExecutorBuilders(Settings settings) { + final int allocatedProcessors = OpenSearchExecutors.allocatedProcessors(settings); + return Collections.singletonList( + new FixedExecutorBuilder(settings, INDEX_SEARCHER, allocatedProcessors, 1000, "thread_pool." + INDEX_SEARCHER) + ); + } + + @Override + public Optional getIndexSearcherExecutorProvider() { + return Optional.of((ThreadPool threadPool) -> threadPool.executor(INDEX_SEARCHER)); + } +} diff --git a/sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/package-info.java b/sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/package-info.java new file mode 100644 index 0000000000000..041f914fab7d7 --- /dev/null +++ b/sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * The implementation of the experimental plugin which implements the concurrent search over Apache Lucene segments. + */ +package org.opensearch.search; diff --git a/sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/query/ConcurrentQueryPhaseSearcher.java b/sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/query/ConcurrentQueryPhaseSearcher.java new file mode 100644 index 0000000000000..65f339838a40b --- /dev/null +++ b/sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/query/ConcurrentQueryPhaseSearcher.java @@ -0,0 +1,119 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query; + +import static org.opensearch.search.query.TopDocsCollectorContext.createTopDocsCollectorContext; + +import java.io.IOException; +import java.util.LinkedList; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.CollectorManager; +import org.apache.lucene.search.Query; +import org.opensearch.search.internal.ContextIndexSearcher; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.profile.query.ProfileCollectorManager; +import org.opensearch.search.query.QueryPhase.DefaultQueryPhaseSearcher; +import org.opensearch.search.query.QueryPhase.TimeExceededException; + +/** + * The implementation of the {@link QueryPhaseSearcher} which attempts to use concurrent + * search of Apache Lucene segments if it has been enabled. + */ +public class ConcurrentQueryPhaseSearcher extends DefaultQueryPhaseSearcher { + private static final Logger LOGGER = LogManager.getLogger(ConcurrentQueryPhaseSearcher.class); + + /** + * Default constructor + */ + public ConcurrentQueryPhaseSearcher() {} + + @Override + protected boolean searchWithCollector( + SearchContext searchContext, + ContextIndexSearcher searcher, + Query query, + LinkedList collectors, + boolean hasFilterCollector, + boolean hasTimeout + ) throws IOException { + boolean couldUseConcurrentSegmentSearch = allowConcurrentSegmentSearch(searcher); + + // TODO: support aggregations + if (searchContext.aggregations() != null) { + couldUseConcurrentSegmentSearch = false; + LOGGER.debug("Unable to use concurrent search over index segments (experimental): aggregations are present"); + } + + if (couldUseConcurrentSegmentSearch) { + LOGGER.debug("Using concurrent search over index segments (experimental)"); + return searchWithCollectorManager(searchContext, searcher, query, collectors, hasFilterCollector, hasTimeout); + } else { + return super.searchWithCollector(searchContext, searcher, query, collectors, hasFilterCollector, hasTimeout); + } + } + + private static boolean searchWithCollectorManager( + SearchContext searchContext, + ContextIndexSearcher searcher, + Query query, + LinkedList collectorContexts, + boolean hasFilterCollector, + boolean timeoutSet + ) throws IOException { + // create the top docs collector last when the other collectors are known + final TopDocsCollectorContext topDocsFactory = createTopDocsCollectorContext(searchContext, hasFilterCollector); + // add the top docs collector, the first collector context in the chain + collectorContexts.addFirst(topDocsFactory); + + final QuerySearchResult queryResult = searchContext.queryResult(); + final CollectorManager collectorManager; + + // TODO: support aggregations in concurrent segment search flow + if (searchContext.aggregations() != null) { + throw new UnsupportedOperationException("The concurrent segment search does not support aggregations yet"); + } + + if (searchContext.getProfilers() != null) { + final ProfileCollectorManager profileCollectorManager = + QueryCollectorManagerContext.createQueryCollectorManagerWithProfiler(collectorContexts); + searchContext.getProfilers().getCurrentQueryProfiler().setCollector(profileCollectorManager); + collectorManager = profileCollectorManager; + } else { + // Create multi collector manager instance + collectorManager = QueryCollectorManagerContext.createMultiCollectorManager(collectorContexts); + } + + try { + final ReduceableSearchResult result = searcher.search(query, collectorManager); + result.reduce(queryResult); + } catch (EarlyTerminatingCollector.EarlyTerminationException e) { + queryResult.terminatedEarly(true); + } catch (TimeExceededException e) { + assert timeoutSet : "TimeExceededException thrown even though timeout wasn't set"; + if (searchContext.request().allowPartialSearchResults() == false) { + // Can't rethrow TimeExceededException because not serializable + throw new QueryPhaseExecutionException(searchContext.shardTarget(), "Time exceeded"); + } + queryResult.searchTimedOut(true); + } + if (searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER && queryResult.terminatedEarly() == null) { + queryResult.terminatedEarly(false); + } + + return topDocsFactory.shouldRescore(); + } + + private static boolean allowConcurrentSegmentSearch(final ContextIndexSearcher searcher) { + return (searcher.getExecutor() != null); + } + +} diff --git a/sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/query/package-info.java b/sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/query/package-info.java new file mode 100644 index 0000000000000..0f98ae7682a84 --- /dev/null +++ b/sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/query/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * {@link org.opensearch.search.query.QueryPhaseSearcher} implementation for concurrent search + */ +package org.opensearch.search.query; diff --git a/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java b/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java new file mode 100644 index 0000000000000..51cb3c8c0cddc --- /dev/null +++ b/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java @@ -0,0 +1,316 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.profile.query; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field.Store; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.LRUQueryCache; +import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryVisitor; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.ScorerSupplier; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TotalHitCountCollector; +import org.apache.lucene.search.Weight; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.tests.search.RandomApproximationQuery; +import org.apache.lucene.tests.util.TestUtil; +import org.opensearch.core.internal.io.IOUtils; +import org.opensearch.search.internal.ContextIndexSearcher; +import org.opensearch.search.profile.ProfileResult; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +public class QueryProfilerTests extends OpenSearchTestCase { + + private Directory dir; + private IndexReader reader; + private ContextIndexSearcher searcher; + private ExecutorService executor; + + @ParametersFactory + public static Collection concurrency() { + return Arrays.asList(new Integer[] { 0 }, new Integer[] { 5 }); + } + + public QueryProfilerTests(int concurrency) { + this.executor = (concurrency > 0) ? Executors.newFixedThreadPool(concurrency) : null; + } + + @Before + public void setUp() throws Exception { + super.setUp(); + + dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir); + final int numDocs = TestUtil.nextInt(random(), 1, 20); + for (int i = 0; i < numDocs; ++i) { + final int numHoles = random().nextInt(5); + for (int j = 0; j < numHoles; ++j) { + w.addDocument(new Document()); + } + Document doc = new Document(); + doc.add(new StringField("foo", "bar", Store.NO)); + w.addDocument(doc); + } + reader = w.getReader(); + w.close(); + searcher = new ContextIndexSearcher( + reader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + ALWAYS_CACHE_POLICY, + true, + executor + ); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + + LRUQueryCache cache = (LRUQueryCache) searcher.getQueryCache(); + assertThat(cache.getHitCount(), equalTo(0L)); + assertThat(cache.getCacheCount(), equalTo(0L)); + assertThat(cache.getTotalCount(), equalTo(cache.getMissCount())); + assertThat(cache.getCacheSize(), equalTo(0L)); + + if (executor != null) { + ThreadPool.terminate(executor, 10, TimeUnit.SECONDS); + } + + IOUtils.close(reader, dir); + dir = null; + reader = null; + searcher = null; + } + + public void testBasic() throws IOException { + QueryProfiler profiler = new QueryProfiler(executor != null); + searcher.setProfiler(profiler); + Query query = new TermQuery(new Term("foo", "bar")); + searcher.search(query, 1); + List results = profiler.getTree(); + assertEquals(1, results.size()); + Map breakdown = results.get(0).getTimeBreakdown(); + assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString()), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString()), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString()), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.ADVANCE.toString()), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.SCORE.toString()), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.MATCH.toString()), equalTo(0L)); + + assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString() + "_count"), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString() + "_count"), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString() + "_count"), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.ADVANCE.toString() + "_count"), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.SCORE.toString() + "_count"), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.MATCH.toString() + "_count"), equalTo(0L)); + + long rewriteTime = profiler.getRewriteTime(); + assertThat(rewriteTime, greaterThan(0L)); + } + + public void testNoScoring() throws IOException { + QueryProfiler profiler = new QueryProfiler(executor != null); + searcher.setProfiler(profiler); + Query query = new TermQuery(new Term("foo", "bar")); + searcher.search(query, 1, Sort.INDEXORDER); // scores are not needed + List results = profiler.getTree(); + assertEquals(1, results.size()); + Map breakdown = results.get(0).getTimeBreakdown(); + assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString()), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString()), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString()), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.ADVANCE.toString()), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.SCORE.toString()), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.MATCH.toString()), equalTo(0L)); + + assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString() + "_count"), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString() + "_count"), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString() + "_count"), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.ADVANCE.toString() + "_count"), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.SCORE.toString() + "_count"), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.MATCH.toString() + "_count"), equalTo(0L)); + + long rewriteTime = profiler.getRewriteTime(); + assertThat(rewriteTime, greaterThan(0L)); + } + + public void testUseIndexStats() throws IOException { + QueryProfiler profiler = new QueryProfiler(executor != null); + searcher.setProfiler(profiler); + Query query = new TermQuery(new Term("foo", "bar")); + searcher.count(query); // will use index stats + List results = profiler.getTree(); + assertEquals(1, results.size()); + ProfileResult result = results.get(0); + assertEquals(0, (long) result.getTimeBreakdown().get("build_scorer_count")); + + long rewriteTime = profiler.getRewriteTime(); + assertThat(rewriteTime, greaterThan(0L)); + } + + public void testApproximations() throws IOException { + QueryProfiler profiler = new QueryProfiler(executor != null); + searcher.setProfiler(profiler); + Query query = new RandomApproximationQuery(new TermQuery(new Term("foo", "bar")), random()); + searcher.count(query); + List results = profiler.getTree(); + assertEquals(1, results.size()); + Map breakdown = results.get(0).getTimeBreakdown(); + assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString()), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString()), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString()), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.ADVANCE.toString()), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.SCORE.toString()), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.MATCH.toString()), greaterThan(0L)); + + assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString() + "_count"), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString() + "_count"), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString() + "_count"), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.ADVANCE.toString() + "_count"), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.SCORE.toString() + "_count"), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.MATCH.toString() + "_count"), greaterThan(0L)); + + long rewriteTime = profiler.getRewriteTime(); + assertThat(rewriteTime, greaterThan(0L)); + } + + public void testCollector() throws IOException { + TotalHitCountCollector collector = new TotalHitCountCollector(); + ProfileCollector profileCollector = new ProfileCollector(collector); + assertEquals(0, profileCollector.getTime()); + final LeafCollector leafCollector = profileCollector.getLeafCollector(reader.leaves().get(0)); + assertThat(profileCollector.getTime(), greaterThan(0L)); + long time = profileCollector.getTime(); + leafCollector.setScorer(null); + assertThat(profileCollector.getTime(), greaterThan(time)); + time = profileCollector.getTime(); + leafCollector.collect(0); + assertThat(profileCollector.getTime(), greaterThan(time)); + } + + private static class DummyQuery extends Query { + + @Override + public String toString(String field) { + return getClass().getSimpleName(); + } + + @Override + public boolean equals(Object obj) { + return this == obj; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + return new Weight(this) { + @Override + public Explanation explain(LeafReaderContext context, int doc) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public Scorer scorer(LeafReaderContext context) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException { + return new ScorerSupplier() { + + @Override + public Scorer get(long loadCost) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public long cost() { + return 42; + } + }; + } + + @Override + public boolean isCacheable(LeafReaderContext ctx) { + return true; + } + }; + } + + @Override + public void visit(QueryVisitor visitor) { + visitor.visitLeaf(this); + } + } + + public void testScorerSupplier() throws IOException { + Directory dir = newDirectory(); + IndexWriter w = new IndexWriter(dir, newIndexWriterConfig()); + w.addDocument(new Document()); + DirectoryReader reader = DirectoryReader.open(w); + w.close(); + IndexSearcher s = newSearcher(reader); + s.setQueryCache(null); + Weight weight = s.createWeight(s.rewrite(new DummyQuery()), randomFrom(ScoreMode.values()), 1f); + // exception when getting the scorer + expectThrows(UnsupportedOperationException.class, () -> weight.scorer(s.getIndexReader().leaves().get(0))); + // no exception, means scorerSupplier is delegated + weight.scorerSupplier(s.getIndexReader().leaves().get(0)); + reader.close(); + dir.close(); + } + + private static final QueryCachingPolicy ALWAYS_CACHE_POLICY = new QueryCachingPolicy() { + + @Override + public void onUse(Query query) {} + + @Override + public boolean shouldCache(Query query) throws IOException { + return true; + } + + }; +} diff --git a/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryPhaseTests.java b/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryPhaseTests.java new file mode 100644 index 0000000000000..83a0a63a6a5c8 --- /dev/null +++ b/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryPhaseTests.java @@ -0,0 +1,1335 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.search.query; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field.Store; +import org.apache.lucene.document.LatLonDocValuesField; +import org.apache.lucene.document.LatLonPoint; +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.SortedDocValuesField; +import org.apache.lucene.document.SortedSetDocValuesField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanClause.Occur; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.CollectorManager; +import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.DocValuesFieldExistsQuery; +import org.apache.lucene.search.FieldComparator; +import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.FilterCollector; +import org.apache.lucene.search.FilterLeafCollector; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.MultiTermQuery; +import org.apache.lucene.search.PrefixQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TopFieldDocs; +import org.apache.lucene.search.TotalHitCountCollector; +import org.apache.lucene.search.TotalHits; +import org.apache.lucene.search.Weight; +import org.apache.lucene.search.grouping.CollapseTopFieldDocs; +import org.apache.lucene.search.join.BitSetProducer; +import org.apache.lucene.search.join.ScoreMode; +import org.apache.lucene.queries.spans.SpanNearQuery; +import org.apache.lucene.queries.spans.SpanTermQuery; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.FixedBitSet; +import org.opensearch.action.search.SearchShardTask; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.mapper.DateFieldMapper; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.index.mapper.NumberFieldMapper.NumberFieldType; +import org.opensearch.index.mapper.NumberFieldMapper.NumberType; +import org.opensearch.index.query.ParsedQuery; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.search.OpenSearchToParentBlockJoinQuery; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.IndexShardTestCase; +import org.opensearch.lucene.queries.MinDocQuery; +import org.opensearch.search.DocValueFormat; +import org.opensearch.search.collapse.CollapseBuilder; +import org.opensearch.search.internal.ContextIndexSearcher; +import org.opensearch.search.internal.ScrollContext; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.sort.SortAndFormats; +import org.opensearch.tasks.TaskCancelledException; +import org.opensearch.test.TestSearchContext; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.search.query.TopDocsCollectorContext.hasInfMaxScore; +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.spy; + +public class QueryPhaseTests extends IndexShardTestCase { + + private IndexShard indexShard; + private final ExecutorService executor; + private final QueryPhaseSearcher queryPhaseSearcher; + + @ParametersFactory + public static Collection concurrency() { + return Arrays.asList( + new Object[] { 0, QueryPhase.DEFAULT_QUERY_PHASE_SEARCHER }, + new Object[] { 5, new ConcurrentQueryPhaseSearcher() } + ); + } + + public QueryPhaseTests(int concurrency, QueryPhaseSearcher queryPhaseSearcher) { + this.executor = (concurrency > 0) ? Executors.newFixedThreadPool(concurrency) : null; + this.queryPhaseSearcher = queryPhaseSearcher; + } + + @Override + public Settings threadPoolSettings() { + return Settings.builder().put(super.threadPoolSettings()).put("thread_pool.search.min_queue_size", 10).build(); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + indexShard = newShard(true); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + closeShards(indexShard); + + if (executor != null) { + ThreadPool.terminate(executor, 10, TimeUnit.SECONDS); + } + } + + private void countTestCase(Query query, IndexReader reader, boolean shouldCollectSearch, boolean shouldCollectCount) throws Exception { + ContextIndexSearcher searcher = shouldCollectSearch + ? newContextSearcher(reader, executor) + : newEarlyTerminationContextSearcher(reader, 0, executor); + TestSearchContext context = new TestSearchContext(null, indexShard, searcher); + context.parsedQuery(new ParsedQuery(query)); + context.setSize(0); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + final boolean rescore = QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertFalse(rescore); + + ContextIndexSearcher countSearcher = shouldCollectCount + ? newContextSearcher(reader, executor) + : newEarlyTerminationContextSearcher(reader, 0, executor); + assertEquals(countSearcher.count(query), context.queryResult().topDocs().topDocs.totalHits.value); + } + + private void countTestCase(boolean withDeletions) throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + final int numDocs = scaledRandomIntBetween(600, 900); + for (int i = 0; i < numDocs; ++i) { + Document doc = new Document(); + if (randomBoolean()) { + doc.add(new StringField("foo", "bar", Store.NO)); + doc.add(new SortedSetDocValuesField("foo", new BytesRef("bar"))); + doc.add(new SortedSetDocValuesField("docValuesOnlyField", new BytesRef("bar"))); + doc.add(new LatLonDocValuesField("latLonDVField", 1.0, 1.0)); + doc.add(new LatLonPoint("latLonDVField", 1.0, 1.0)); + } + if (randomBoolean()) { + doc.add(new StringField("foo", "baz", Store.NO)); + doc.add(new SortedSetDocValuesField("foo", new BytesRef("baz"))); + } + if (withDeletions && (rarely() || i == 0)) { + doc.add(new StringField("delete", "yes", Store.NO)); + } + w.addDocument(doc); + } + if (withDeletions) { + w.deleteDocuments(new Term("delete", "yes")); + } + final IndexReader reader = w.getReader(); + Query matchAll = new MatchAllDocsQuery(); + Query matchAllCsq = new ConstantScoreQuery(matchAll); + Query tq = new TermQuery(new Term("foo", "bar")); + Query tCsq = new ConstantScoreQuery(tq); + Query dvfeq = new DocValuesFieldExistsQuery("foo"); + Query dvfeq_points = new DocValuesFieldExistsQuery("latLonDVField"); + Query dvfeqCsq = new ConstantScoreQuery(dvfeq); + // field with doc-values but not indexed will need to collect + Query dvOnlyfeq = new DocValuesFieldExistsQuery("docValuesOnlyField"); + BooleanQuery bq = new BooleanQuery.Builder().add(matchAll, Occur.SHOULD).add(tq, Occur.MUST).build(); + + countTestCase(matchAll, reader, false, false); + countTestCase(matchAllCsq, reader, false, false); + countTestCase(tq, reader, withDeletions, withDeletions); + countTestCase(tCsq, reader, withDeletions, withDeletions); + countTestCase(dvfeq, reader, withDeletions, true); + countTestCase(dvfeq_points, reader, withDeletions, true); + countTestCase(dvfeqCsq, reader, withDeletions, true); + countTestCase(dvOnlyfeq, reader, true, true); + countTestCase(bq, reader, true, true); + reader.close(); + w.close(); + dir.close(); + } + + public void testCountWithoutDeletions() throws Exception { + countTestCase(false); + } + + public void testCountWithDeletions() throws Exception { + countTestCase(true); + } + + public void testPostFilterDisablesCountOptimization() throws Exception { + Directory dir = newDirectory(); + final Sort sort = new Sort(new SortField("rank", SortField.Type.INT)); + IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + Document doc = new Document(); + w.addDocument(doc); + w.close(); + + IndexReader reader = DirectoryReader.open(dir); + + TestSearchContext context = new TestSearchContext(null, indexShard, newEarlyTerminationContextSearcher(reader, 0, executor)); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value); + + context.setSearcher(newContextSearcher(reader, executor)); + context.parsedPostFilter(new ParsedQuery(new MatchNoDocsQuery())); + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertEquals(0, context.queryResult().topDocs().topDocs.totalHits.value); + reader.close(); + dir.close(); + } + + public void testTerminateAfterWithFilter() throws Exception { + Directory dir = newDirectory(); + final Sort sort = new Sort(new SortField("rank", SortField.Type.INT)); + IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + Document doc = new Document(); + for (int i = 0; i < 10; i++) { + doc.add(new StringField("foo", Integer.toString(i), Store.NO)); + } + w.addDocument(doc); + w.close(); + + IndexReader reader = DirectoryReader.open(dir); + + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor)); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + context.terminateAfter(1); + context.setSize(10); + for (int i = 0; i < 10; i++) { + context.parsedPostFilter(new ParsedQuery(new TermQuery(new Term("foo", Integer.toString(i))))); + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + } + reader.close(); + dir.close(); + } + + public void testMinScoreDisablesCountOptimization() throws Exception { + Directory dir = newDirectory(); + final Sort sort = new Sort(new SortField("rank", SortField.Type.INT)); + IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + Document doc = new Document(); + w.addDocument(doc); + w.close(); + + IndexReader reader = DirectoryReader.open(dir); + TestSearchContext context = new TestSearchContext(null, indexShard, newEarlyTerminationContextSearcher(reader, 0, executor)); + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + context.setSize(0); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value); + + context.minimumScore(100); + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertEquals(0, context.queryResult().topDocs().topDocs.totalHits.value); + assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); + reader.close(); + dir.close(); + } + + public void testQueryCapturesThreadPoolStats() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = newIndexWriterConfig(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + final int numDocs = scaledRandomIntBetween(600, 900); + for (int i = 0; i < numDocs; ++i) { + w.addDocument(new Document()); + } + w.close(); + IndexReader reader = DirectoryReader.open(dir); + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor)); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + QuerySearchResult results = context.queryResult(); + assertThat(results.serviceTimeEWMA(), greaterThanOrEqualTo(0L)); + assertThat(results.nodeQueueSize(), greaterThanOrEqualTo(0)); + reader.close(); + dir.close(); + } + + public void testInOrderScrollOptimization() throws Exception { + Directory dir = newDirectory(); + final Sort sort = new Sort(new SortField("rank", SortField.Type.INT)); + IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + final int numDocs = scaledRandomIntBetween(600, 900); + for (int i = 0; i < numDocs; ++i) { + w.addDocument(new Document()); + } + w.close(); + IndexReader reader = DirectoryReader.open(dir); + ScrollContext scrollContext = new ScrollContext(); + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor), scrollContext); + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + scrollContext.lastEmittedDoc = null; + scrollContext.maxScore = Float.NaN; + scrollContext.totalHits = null; + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + int size = randomIntBetween(2, 5); + context.setSize(size); + + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertNull(context.queryResult().terminatedEarly()); + assertThat(context.terminateAfter(), equalTo(0)); + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); + + context.setSearcher(newEarlyTerminationContextSearcher(reader, size, executor)); + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.terminateAfter(), equalTo(size)); + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0].doc, greaterThanOrEqualTo(size)); + reader.close(); + dir.close(); + } + + public void testTerminateAfterEarlyTermination() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = newIndexWriterConfig(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + final int numDocs = scaledRandomIntBetween(600, 900); + for (int i = 0; i < numDocs; ++i) { + Document doc = new Document(); + if (randomBoolean()) { + doc.add(new StringField("foo", "bar", Store.NO)); + } + if (randomBoolean()) { + doc.add(new StringField("foo", "baz", Store.NO)); + } + doc.add(new NumericDocValuesField("rank", numDocs - i)); + w.addDocument(doc); + } + w.close(); + final IndexReader reader = DirectoryReader.open(dir); + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor)); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + + context.terminateAfter(numDocs); + { + context.setSize(10); + final TestTotalHitCountCollectorManager manager = TestTotalHitCountCollectorManager.create(executor); + context.queryCollectorManagers().put(TotalHitCountCollector.class, manager); + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertFalse(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(10)); + assertThat(manager.getTotalHits(), equalTo(numDocs)); + } + + context.terminateAfter(1); + { + context.setSize(1); + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + + context.setSize(0); + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); + } + + { + context.setSize(1); + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + } + { + context.setSize(1); + BooleanQuery bq = new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.SHOULD) + .add(new TermQuery(new Term("foo", "baz")), Occur.SHOULD) + .build(); + context.parsedQuery(new ParsedQuery(bq)); + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + + context.setSize(0); + context.parsedQuery(new ParsedQuery(bq)); + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); + } + { + context.setSize(1); + final TestTotalHitCountCollectorManager manager = TestTotalHitCountCollectorManager.create(executor, 1); + context.queryCollectorManagers().put(TotalHitCountCollector.class, manager); + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertThat(manager.getTotalHits(), equalTo(1)); + context.queryCollectorManagers().clear(); + } + { + context.setSize(0); + final TestTotalHitCountCollectorManager manager = TestTotalHitCountCollectorManager.create(executor, 1); + context.queryCollectorManagers().put(TotalHitCountCollector.class, manager); + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); + assertThat(manager.getTotalHits(), equalTo(1)); + } + + // tests with trackTotalHits and terminateAfter + context.terminateAfter(10); + context.setSize(0); + for (int trackTotalHits : new int[] { -1, 3, 76, 100 }) { + context.trackTotalHitsUpTo(trackTotalHits); + final TestTotalHitCountCollectorManager manager = TestTotalHitCountCollectorManager.create(executor); + context.queryCollectorManagers().put(TotalHitCountCollector.class, manager); + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertTrue(context.queryResult().terminatedEarly()); + if (trackTotalHits == -1) { + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(0L)); + } else { + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) Math.min(trackTotalHits, 10))); + } + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); + // The concurrent search terminates the collection when the number of hits is reached by each + // concurrent collector. In this case, in general, the number of results are multiplied by the number of + // slices (as the unit of concurrency). To address that, we have to use the shared global state, + // much as HitsThresholdChecker does. + if (executor == null) { + assertThat(manager.getTotalHits(), equalTo(10)); + } + } + + context.terminateAfter(7); + context.setSize(10); + for (int trackTotalHits : new int[] { -1, 3, 75, 100 }) { + context.trackTotalHitsUpTo(trackTotalHits); + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertTrue(context.queryResult().terminatedEarly()); + if (trackTotalHits == -1) { + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(0L)); + } else { + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(7L)); + } + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(7)); + } + reader.close(); + dir.close(); + } + + public void testIndexSortingEarlyTermination() throws Exception { + Directory dir = newDirectory(); + final Sort sort = new Sort(new SortField("rank", SortField.Type.INT)); + IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + final int numDocs = scaledRandomIntBetween(600, 900); + for (int i = 0; i < numDocs; ++i) { + Document doc = new Document(); + if (randomBoolean()) { + doc.add(new StringField("foo", "bar", Store.NO)); + } + if (randomBoolean()) { + doc.add(new StringField("foo", "baz", Store.NO)); + } + doc.add(new NumericDocValuesField("rank", numDocs - i)); + w.addDocument(doc); + } + w.close(); + + final IndexReader reader = DirectoryReader.open(dir); + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor)); + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + context.setSize(1); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW })); + + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); + FieldDoc fieldDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0]; + assertThat(fieldDoc.fields[0], equalTo(1)); + + { + context.parsedPostFilter(new ParsedQuery(new MinDocQuery(1))); + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertNull(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(numDocs - 1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); + assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); + context.parsedPostFilter(null); + + final TestTotalHitCountCollectorManager manager = TestTotalHitCountCollectorManager.create(executor, sort); + context.queryCollectorManagers().put(TotalHitCountCollector.class, manager); + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertNull(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); + assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); + // When searching concurrently, each executors short-circuits when "size" is reached, + // including total hits collector + assertThat(manager.getTotalHits(), lessThanOrEqualTo(numDocs)); + + context.queryCollectorManagers().clear(); + } + + { + context.setSearcher(newEarlyTerminationContextSearcher(reader, 1, executor)); + context.trackTotalHitsUpTo(SearchContext.TRACK_TOTAL_HITS_DISABLED); + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertNull(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); + assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); + + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertNull(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); + assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); + } + reader.close(); + dir.close(); + } + + public void testIndexSortScrollOptimization() throws Exception { + Directory dir = newDirectory(); + final Sort indexSort = new Sort(new SortField("rank", SortField.Type.INT), new SortField("tiebreaker", SortField.Type.INT)); + IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(indexSort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + final int numDocs = scaledRandomIntBetween(600, 900); + for (int i = 0; i < numDocs; ++i) { + Document doc = new Document(); + doc.add(new NumericDocValuesField("rank", random().nextInt())); + doc.add(new NumericDocValuesField("tiebreaker", i)); + w.addDocument(doc); + } + if (randomBoolean()) { + w.forceMerge(randomIntBetween(1, 10)); + } + w.close(); + + final IndexReader reader = DirectoryReader.open(dir); + List searchSortAndFormats = new ArrayList<>(); + searchSortAndFormats.add(new SortAndFormats(indexSort, new DocValueFormat[] { DocValueFormat.RAW, DocValueFormat.RAW })); + // search sort is a prefix of the index sort + searchSortAndFormats.add(new SortAndFormats(new Sort(indexSort.getSort()[0]), new DocValueFormat[] { DocValueFormat.RAW })); + for (SortAndFormats searchSortAndFormat : searchSortAndFormats) { + ScrollContext scrollContext = new ScrollContext(); + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor), scrollContext); + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + scrollContext.lastEmittedDoc = null; + scrollContext.maxScore = Float.NaN; + scrollContext.totalHits = null; + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.setSize(10); + context.sort(searchSortAndFormat); + + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertNull(context.queryResult().terminatedEarly()); + assertThat(context.terminateAfter(), equalTo(0)); + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); + int sizeMinus1 = context.queryResult().topDocs().topDocs.scoreDocs.length - 1; + FieldDoc lastDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[sizeMinus1]; + + context.setSearcher(newEarlyTerminationContextSearcher(reader, 10, executor)); + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertNull(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.terminateAfter(), equalTo(0)); + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); + FieldDoc firstDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0]; + for (int i = 0; i < searchSortAndFormat.sort.getSort().length; i++) { + @SuppressWarnings("unchecked") + FieldComparator comparator = (FieldComparator) searchSortAndFormat.sort.getSort()[i].getComparator( + i, + false + ); + int cmp = comparator.compareValues(firstDoc.fields[i], lastDoc.fields[i]); + if (cmp == 0) { + continue; + } + assertThat(cmp, equalTo(1)); + break; + } + } + reader.close(); + dir.close(); + } + + public void testDisableTopScoreCollection() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = newIndexWriterConfig(new StandardAnalyzer()); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + Document doc = new Document(); + final int numDocs = 2 * scaledRandomIntBetween(50, 450); + for (int i = 0; i < numDocs; i++) { + doc.clear(); + if (i % 2 == 0) { + doc.add(new TextField("title", "foo bar", Store.NO)); + } else { + doc.add(new TextField("title", "foo", Store.NO)); + } + w.addDocument(doc); + } + w.close(); + + IndexReader reader = DirectoryReader.open(dir); + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor)); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + Query q = new SpanNearQuery.Builder("title", true).addClause(new SpanTermQuery(new Term("title", "foo"))) + .addClause(new SpanTermQuery(new Term("title", "bar"))) + .build(); + + context.parsedQuery(new ParsedQuery(q)); + context.setSize(3); + context.trackTotalHitsUpTo(3); + TopDocsCollectorContext topDocsContext = TopDocsCollectorContext.createTopDocsCollectorContext(context, false); + assertEquals(topDocsContext.create(null).scoreMode(), org.apache.lucene.search.ScoreMode.COMPLETE); + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertEquals(numDocs / 2, context.queryResult().topDocs().topDocs.totalHits.value); + assertEquals(context.queryResult().topDocs().topDocs.totalHits.relation, TotalHits.Relation.EQUAL_TO); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(3)); + + context.sort(new SortAndFormats(new Sort(new SortField("other", SortField.Type.INT)), new DocValueFormat[] { DocValueFormat.RAW })); + topDocsContext = TopDocsCollectorContext.createTopDocsCollectorContext(context, false); + assertEquals(topDocsContext.create(null).scoreMode(), org.apache.lucene.search.ScoreMode.TOP_DOCS); + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertEquals(numDocs / 2, context.queryResult().topDocs().topDocs.totalHits.value); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(3)); + assertEquals(context.queryResult().topDocs().topDocs.totalHits.relation, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO); + + reader.close(); + dir.close(); + } + + public void testEnhanceSortOnNumeric() throws Exception { + final String fieldNameLong = "long-field"; + final String fieldNameDate = "date-field"; + MappedFieldType fieldTypeLong = new NumberFieldMapper.NumberFieldType(fieldNameLong, NumberFieldMapper.NumberType.LONG); + MappedFieldType fieldTypeDate = new DateFieldMapper.DateFieldType(fieldNameDate); + MapperService mapperService = mock(MapperService.class); + when(mapperService.fieldType(fieldNameLong)).thenReturn(fieldTypeLong); + when(mapperService.fieldType(fieldNameDate)).thenReturn(fieldTypeDate); + // enough docs to have a tree with several leaf nodes + final int numDocs = 3500 * 5; + Directory dir = newDirectory(); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(null)); + long firstValue = randomLongBetween(-10000000L, 10000000L); + long longValue = firstValue; + long dateValue = randomLongBetween(0, 3000000000000L); + for (int i = 1; i <= numDocs; ++i) { + Document doc = new Document(); + + doc.add(new LongPoint(fieldNameLong, longValue)); + doc.add(new NumericDocValuesField(fieldNameLong, longValue)); + + doc.add(new LongPoint(fieldNameDate, dateValue)); + doc.add(new NumericDocValuesField(fieldNameDate, dateValue)); + writer.addDocument(doc); + longValue++; + dateValue++; + if (i % 3500 == 0) writer.commit(); + } + writer.close(); + final IndexReader reader = DirectoryReader.open(dir); + final SortField sortFieldLong = new SortField(fieldNameLong, SortField.Type.LONG); + sortFieldLong.setMissingValue(Long.MAX_VALUE); + final SortField sortFieldDate = new SortField(fieldNameDate, SortField.Type.LONG); + sortFieldDate.setMissingValue(Long.MAX_VALUE); + DocValueFormat dateFormat = fieldTypeDate.docValueFormat(null, null); + final Sort longSort = new Sort(sortFieldLong); + final Sort longDateSort = new Sort(sortFieldLong, sortFieldDate); + final Sort dateSort = new Sort(sortFieldDate); + final Sort dateLongSort = new Sort(sortFieldDate, sortFieldLong); + SortAndFormats longSortAndFormats = new SortAndFormats(longSort, new DocValueFormat[] { DocValueFormat.RAW }); + SortAndFormats longDateSortAndFormats = new SortAndFormats(longDateSort, new DocValueFormat[] { DocValueFormat.RAW, dateFormat }); + SortAndFormats dateSortAndFormats = new SortAndFormats(dateSort, new DocValueFormat[] { dateFormat }); + SortAndFormats dateLongSortAndFormats = new SortAndFormats(dateLongSort, new DocValueFormat[] { dateFormat, DocValueFormat.RAW }); + ParsedQuery query = new ParsedQuery(new MatchAllDocsQuery()); + SearchShardTask task = new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()); + + // 1. Test a sort on long field + { + TestSearchContext searchContext = spy(new TestSearchContext(null, indexShard, newContextSearcher(reader, executor))); + when(searchContext.mapperService()).thenReturn(mapperService); + searchContext.sort(longSortAndFormats); + searchContext.parsedQuery(query); + searchContext.setTask(task); + searchContext.setSize(10); + QueryPhase.executeInternal(searchContext.withCleanQueryResult(), queryPhaseSearcher); + assertSortResults(searchContext.queryResult().topDocs().topDocs, (long) numDocs, false); + } + + // 2. Test a sort on long field + date field + { + TestSearchContext searchContext = spy(new TestSearchContext(null, indexShard, newContextSearcher(reader, executor))); + when(searchContext.mapperService()).thenReturn(mapperService); + searchContext.sort(longDateSortAndFormats); + searchContext.parsedQuery(query); + searchContext.setTask(task); + searchContext.setSize(10); + QueryPhase.executeInternal(searchContext.withCleanQueryResult(), queryPhaseSearcher); + assertSortResults(searchContext.queryResult().topDocs().topDocs, (long) numDocs, true); + } + + // 3. Test a sort on date field + { + TestSearchContext searchContext = spy(new TestSearchContext(null, indexShard, newContextSearcher(reader, executor))); + when(searchContext.mapperService()).thenReturn(mapperService); + searchContext.sort(dateSortAndFormats); + searchContext.parsedQuery(query); + searchContext.setTask(task); + searchContext.setSize(10); + QueryPhase.executeInternal(searchContext.withCleanQueryResult(), queryPhaseSearcher); + assertSortResults(searchContext.queryResult().topDocs().topDocs, (long) numDocs, false); + } + + // 4. Test a sort on date field + long field + { + TestSearchContext searchContext = spy(new TestSearchContext(null, indexShard, newContextSearcher(reader, executor))); + when(searchContext.mapperService()).thenReturn(mapperService); + searchContext.sort(dateLongSortAndFormats); + searchContext.parsedQuery(query); + searchContext.setTask(task); + searchContext.setSize(10); + QueryPhase.executeInternal(searchContext.withCleanQueryResult(), queryPhaseSearcher); + assertSortResults(searchContext.queryResult().topDocs().topDocs, (long) numDocs, true); + } + + // 5. Test that sort optimization is run when from > 0 and size = 0 + { + TestSearchContext searchContext = spy(new TestSearchContext(null, indexShard, newContextSearcher(reader, executor))); + when(searchContext.mapperService()).thenReturn(mapperService); + searchContext.sort(longSortAndFormats); + searchContext.parsedQuery(query); + searchContext.setTask(task); + searchContext.from(5); + searchContext.setSize(0); + QueryPhase.executeInternal(searchContext.withCleanQueryResult(), queryPhaseSearcher); + assertSortResults(searchContext.queryResult().topDocs().topDocs, (long) numDocs, false); + } + + // 6. Test that sort optimization works with from = 0 and size= 0 + { + TestSearchContext searchContext = spy(new TestSearchContext(null, indexShard, newContextSearcher(reader, executor))); + when(searchContext.mapperService()).thenReturn(mapperService); + searchContext.sort(longSortAndFormats); + searchContext.parsedQuery(query); + searchContext.setTask(task); + searchContext.setSize(0); + QueryPhase.executeInternal(searchContext.withCleanQueryResult(), queryPhaseSearcher); + } + + // 7. Test that sort optimization works with search after + { + TestSearchContext searchContext = spy(new TestSearchContext(null, indexShard, newContextSearcher(reader, executor))); + when(searchContext.mapperService()).thenReturn(mapperService); + int afterDocument = (int) randomLongBetween(0, 50); + long afterValue = firstValue + afterDocument; + FieldDoc after = new FieldDoc(afterDocument, Float.NaN, new Long[] { afterValue }); + searchContext.searchAfter(after); + searchContext.sort(longSortAndFormats); + searchContext.parsedQuery(query); + searchContext.setTask(task); + searchContext.setSize(10); + QueryPhase.executeInternal(searchContext.withCleanQueryResult(), queryPhaseSearcher); + final TopDocs topDocs = searchContext.queryResult().topDocs().topDocs; + long topValue = (long) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]; + assertThat(topValue, greaterThan(afterValue)); + assertSortResults(topDocs, (long) numDocs, false); + + final TotalHits totalHits = topDocs.totalHits; + assertEquals(TotalHits.Relation.EQUAL_TO, totalHits.relation); + assertEquals(numDocs, totalHits.value); + } + + reader.close(); + dir.close(); + } + + public void testMaxScoreQueryVisitor() { + BitSetProducer producer = context -> new FixedBitSet(1); + Query query = new OpenSearchToParentBlockJoinQuery(new MatchAllDocsQuery(), producer, ScoreMode.Avg, "nested"); + assertTrue(hasInfMaxScore(query)); + + query = new OpenSearchToParentBlockJoinQuery(new MatchAllDocsQuery(), producer, ScoreMode.None, "nested"); + assertFalse(hasInfMaxScore(query)); + + for (Occur occur : Occur.values()) { + query = new BooleanQuery.Builder().add( + new OpenSearchToParentBlockJoinQuery(new MatchAllDocsQuery(), producer, ScoreMode.Avg, "nested"), + occur + ).build(); + if (occur == Occur.MUST) { + assertTrue(hasInfMaxScore(query)); + } else { + assertFalse(hasInfMaxScore(query)); + } + + query = new BooleanQuery.Builder().add( + new BooleanQuery.Builder().add( + new OpenSearchToParentBlockJoinQuery(new MatchAllDocsQuery(), producer, ScoreMode.Avg, "nested"), + occur + ).build(), + occur + ).build(); + if (occur == Occur.MUST) { + assertTrue(hasInfMaxScore(query)); + } else { + assertFalse(hasInfMaxScore(query)); + } + + query = new BooleanQuery.Builder().add( + new BooleanQuery.Builder().add( + new OpenSearchToParentBlockJoinQuery(new MatchAllDocsQuery(), producer, ScoreMode.Avg, "nested"), + occur + ).build(), + Occur.FILTER + ).build(); + assertFalse(hasInfMaxScore(query)); + + query = new BooleanQuery.Builder().add( + new BooleanQuery.Builder().add(new SpanTermQuery(new Term("field", "foo")), occur) + .add(new OpenSearchToParentBlockJoinQuery(new MatchAllDocsQuery(), producer, ScoreMode.Avg, "nested"), occur) + .build(), + occur + ).build(); + if (occur == Occur.MUST) { + assertTrue(hasInfMaxScore(query)); + } else { + assertFalse(hasInfMaxScore(query)); + } + } + } + + // assert score docs are in order and their number is as expected + private void assertSortResults(TopDocs topDocs, long expectedNumDocs, boolean isDoubleSort) { + if (topDocs.totalHits.relation == TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO) { + assertThat(topDocs.totalHits.value, lessThanOrEqualTo(expectedNumDocs)); + } else { + assertEquals(topDocs.totalHits.value, expectedNumDocs); + } + long cur1, cur2; + long prev1 = Long.MIN_VALUE; + long prev2 = Long.MIN_VALUE; + for (ScoreDoc scoreDoc : topDocs.scoreDocs) { + cur1 = (long) ((FieldDoc) scoreDoc).fields[0]; + assertThat(cur1, greaterThanOrEqualTo(prev1)); // test that docs are properly sorted on the first sort + if (isDoubleSort) { + cur2 = (long) ((FieldDoc) scoreDoc).fields[1]; + if (cur1 == prev1) { + assertThat(cur2, greaterThanOrEqualTo(prev2)); // test that docs are properly sorted on the secondary sort + } + prev2 = cur2; + } + prev1 = cur1; + } + } + + public void testMinScore() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = newIndexWriterConfig(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + for (int i = 0; i < 10; i++) { + Document doc = new Document(); + doc.add(new StringField("foo", "bar", Store.NO)); + doc.add(new StringField("filter", "f1", Store.NO)); + w.addDocument(doc); + } + w.close(); + + IndexReader reader = DirectoryReader.open(dir); + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor)); + context.parsedQuery( + new ParsedQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.MUST) + .add(new TermQuery(new Term("filter", "f1")), Occur.SHOULD) + .build() + ) + ); + context.minimumScore(0.01f); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.setSize(1); + context.trackTotalHitsUpTo(5); + + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertEquals(10, context.queryResult().topDocs().topDocs.totalHits.value); + + reader.close(); + dir.close(); + } + + public void testMaxScore() throws Exception { + Directory dir = newDirectory(); + final Sort sort = new Sort(new SortField("filter", SortField.Type.STRING)); + IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + + final int numDocs = scaledRandomIntBetween(600, 900); + for (int i = 0; i < numDocs; i++) { + Document doc = new Document(); + doc.add(new StringField("foo", "bar", Store.NO)); + doc.add(new StringField("filter", "f1" + ((i > 0) ? " " + Integer.toString(i) : ""), Store.NO)); + doc.add(new SortedDocValuesField("filter", newBytesRef("f1" + ((i > 0) ? " " + Integer.toString(i) : "")))); + w.addDocument(doc); + } + w.close(); + + IndexReader reader = DirectoryReader.open(dir); + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor)); + context.trackScores(true); + context.parsedQuery( + new ParsedQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.MUST) + .add(new TermQuery(new Term("filter", "f1")), Occur.SHOULD) + .build() + ) + ); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.setSize(1); + context.trackTotalHitsUpTo(5); + + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertFalse(Float.isNaN(context.queryResult().getMaxScore())); + assertEquals(1, context.queryResult().topDocs().topDocs.scoreDocs.length); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, greaterThanOrEqualTo(6L)); + + context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW })); + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertFalse(Float.isNaN(context.queryResult().getMaxScore())); + assertEquals(1, context.queryResult().topDocs().topDocs.scoreDocs.length); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, greaterThanOrEqualTo(6L)); + + context.trackScores(false); + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertTrue(Float.isNaN(context.queryResult().getMaxScore())); + assertEquals(1, context.queryResult().topDocs().topDocs.scoreDocs.length); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, greaterThanOrEqualTo(6L)); + + reader.close(); + dir.close(); + } + + public void testCollapseQuerySearchResults() throws Exception { + Directory dir = newDirectory(); + final Sort sort = new Sort(new SortField("user", SortField.Type.INT)); + IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + + // Always end up with uneven buckets so collapsing is predictable + final int numDocs = 2 * scaledRandomIntBetween(600, 900) - 1; + for (int i = 0; i < numDocs; i++) { + Document doc = new Document(); + doc.add(new StringField("foo", "bar", Store.NO)); + doc.add(new NumericDocValuesField("user", i & 1)); + w.addDocument(doc); + } + w.close(); + + IndexReader reader = DirectoryReader.open(dir); + QueryShardContext queryShardContext = mock(QueryShardContext.class); + when(queryShardContext.fieldMapper("user")).thenReturn( + new NumberFieldType("user", NumberType.INTEGER, true, false, true, false, null, Collections.emptyMap()) + ); + + TestSearchContext context = new TestSearchContext(queryShardContext, indexShard, newContextSearcher(reader, executor)); + context.collapse(new CollapseBuilder("user").build(context.getQueryShardContext())); + context.trackScores(true); + context.parsedQuery(new ParsedQuery(new TermQuery(new Term("foo", "bar")))); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.setSize(2); + context.trackTotalHitsUpTo(5); + + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertFalse(Float.isNaN(context.queryResult().getMaxScore())); + assertEquals(2, context.queryResult().topDocs().topDocs.scoreDocs.length); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs, instanceOf(CollapseTopFieldDocs.class)); + + CollapseTopFieldDocs topDocs = (CollapseTopFieldDocs) context.queryResult().topDocs().topDocs; + assertThat(topDocs.collapseValues.length, equalTo(2)); + assertThat(topDocs.collapseValues[0], equalTo(0L)); // user == 0 + assertThat(topDocs.collapseValues[1], equalTo(1L)); // user == 1 + + context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW })); + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertFalse(Float.isNaN(context.queryResult().getMaxScore())); + assertEquals(2, context.queryResult().topDocs().topDocs.scoreDocs.length); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs, instanceOf(CollapseTopFieldDocs.class)); + + topDocs = (CollapseTopFieldDocs) context.queryResult().topDocs().topDocs; + assertThat(topDocs.collapseValues.length, equalTo(2)); + assertThat(topDocs.collapseValues[0], equalTo(0L)); // user == 0 + assertThat(topDocs.collapseValues[1], equalTo(1L)); // user == 1 + + context.trackScores(false); + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertTrue(Float.isNaN(context.queryResult().getMaxScore())); + assertEquals(2, context.queryResult().topDocs().topDocs.scoreDocs.length); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs, instanceOf(CollapseTopFieldDocs.class)); + + topDocs = (CollapseTopFieldDocs) context.queryResult().topDocs().topDocs; + assertThat(topDocs.collapseValues.length, equalTo(2)); + assertThat(topDocs.collapseValues[0], equalTo(0L)); // user == 0 + assertThat(topDocs.collapseValues[1], equalTo(1L)); // user == 1 + + reader.close(); + dir.close(); + } + + public void testCancellationDuringPreprocess() throws IOException { + try (Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig())) { + + for (int i = 0; i < 10; i++) { + Document doc = new Document(); + StringBuilder sb = new StringBuilder(); + for (int j = 0; j < i; j++) { + sb.append('a'); + } + doc.add(new StringField("foo", sb.toString(), Store.NO)); + w.addDocument(doc); + } + w.flush(); + w.close(); + + try (IndexReader reader = DirectoryReader.open(dir)) { + TestSearchContext context = new TestSearchContextWithRewriteAndCancellation( + null, + indexShard, + newContextSearcher(reader, executor) + ); + PrefixQuery prefixQuery = new PrefixQuery(new Term("foo", "a")); + prefixQuery.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE); + context.parsedQuery(new ParsedQuery(prefixQuery)); + SearchShardTask task = mock(SearchShardTask.class); + when(task.isCancelled()).thenReturn(true); + context.setTask(task); + expectThrows(TaskCancelledException.class, () -> new QueryPhase().preProcess(context)); + } + } + } + + private static class TestSearchContextWithRewriteAndCancellation extends TestSearchContext { + + private TestSearchContextWithRewriteAndCancellation( + QueryShardContext queryShardContext, + IndexShard indexShard, + ContextIndexSearcher searcher + ) { + super(queryShardContext, indexShard, searcher); + } + + @Override + public void preProcess(boolean rewrite) { + try { + searcher().rewrite(query()); + } catch (IOException e) { + fail("IOException shouldn't be thrown"); + } + } + + @Override + public boolean lowLevelCancellation() { + return true; + } + } + + private static ContextIndexSearcher newContextSearcher(IndexReader reader, ExecutorService executor) throws IOException { + return new ContextIndexSearcher( + reader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + true, + executor + ); + } + + private static ContextIndexSearcher newEarlyTerminationContextSearcher(IndexReader reader, int size, ExecutorService executor) + throws IOException { + return new ContextIndexSearcher( + reader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + true, + executor + ) { + + @Override + public void search(List leaves, Weight weight, Collector collector) throws IOException { + final Collector in = new AssertingEarlyTerminationFilterCollector(collector, size); + super.search(leaves, weight, in); + } + }; + } + + // used to check that numeric long or date sort optimization was run + private static ContextIndexSearcher newOptimizedContextSearcher(IndexReader reader, int queryType, ExecutorService executor) + throws IOException { + return new ContextIndexSearcher( + reader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + true, + executor + ) { + + @Override + public void search( + Query query, + CollectorManager manager, + QuerySearchResult result, + DocValueFormat[] formats, + TotalHits totalHits + ) throws IOException { + assertTrue(query instanceof BooleanQuery); + List clauses = ((BooleanQuery) query).clauses(); + assertTrue(clauses.size() == 2); + assertTrue(clauses.get(0).getOccur() == Occur.FILTER); + assertTrue(clauses.get(1).getOccur() == Occur.SHOULD); + if (queryType == 0) { + assertTrue( + clauses.get(1).getQuery().getClass() == LongPoint.newDistanceFeatureQuery("random_field", 1, 1, 1).getClass() + ); + } + if (queryType == 1) assertTrue(clauses.get(1).getQuery() instanceof DocValuesFieldExistsQuery); + super.search(query, manager, result, formats, totalHits); + } + + @Override + public void search( + List leaves, + Weight weight, + @SuppressWarnings("rawtypes") CollectorManager manager, + QuerySearchResult result, + DocValueFormat[] formats, + TotalHits totalHits + ) throws IOException { + final Query query = weight.getQuery(); + assertTrue(query instanceof BooleanQuery); + List clauses = ((BooleanQuery) query).clauses(); + assertTrue(clauses.size() == 2); + assertTrue(clauses.get(0).getOccur() == Occur.FILTER); + assertTrue(clauses.get(1).getOccur() == Occur.SHOULD); + if (queryType == 0) { + assertTrue( + clauses.get(1).getQuery().getClass() == LongPoint.newDistanceFeatureQuery("random_field", 1, 1, 1).getClass() + ); + } + if (queryType == 1) assertTrue(clauses.get(1).getQuery() instanceof DocValuesFieldExistsQuery); + super.search(leaves, weight, manager, result, formats, totalHits); + } + + @Override + public void search(List leaves, Weight weight, Collector collector) throws IOException { + if (getExecutor() == null) { + assert (false); // should not be there, expected to search with CollectorManager + } else { + super.search(leaves, weight, collector); + } + } + }; + } + + private static class TestTotalHitCountCollectorManager extends TotalHitCountCollectorManager { + private int totalHits; + private final TotalHitCountCollector collector; + private final Integer teminateAfter; + + static TestTotalHitCountCollectorManager create(final ExecutorService executor) { + return create(executor, null, null); + } + + static TestTotalHitCountCollectorManager create(final ExecutorService executor, final Integer teminateAfter) { + return create(executor, null, teminateAfter); + } + + static TestTotalHitCountCollectorManager create(final ExecutorService executor, final Sort sort) { + return create(executor, sort, null); + } + + static TestTotalHitCountCollectorManager create(final ExecutorService executor, final Sort sort, final Integer teminateAfter) { + if (executor == null) { + return new TestTotalHitCountCollectorManager(new TotalHitCountCollector(), sort); + } else { + return new TestTotalHitCountCollectorManager(sort, teminateAfter); + } + } + + private TestTotalHitCountCollectorManager(final TotalHitCountCollector collector, final Sort sort) { + super(sort); + this.collector = collector; + this.teminateAfter = null; + } + + private TestTotalHitCountCollectorManager(final Sort sort, final Integer teminateAfter) { + super(sort); + this.collector = null; + this.teminateAfter = teminateAfter; + } + + @Override + public TotalHitCountCollector newCollector() throws IOException { + return (collector == null) ? super.newCollector() : collector; + } + + @Override + public ReduceableSearchResult reduce(Collection collectors) throws IOException { + final ReduceableSearchResult result = super.reduce(collectors); + totalHits = collectors.stream().mapToInt(TotalHitCountCollector::getTotalHits).sum(); + + if (teminateAfter != null) { + assertThat(totalHits, greaterThanOrEqualTo(teminateAfter)); + totalHits = Math.min(totalHits, teminateAfter); + } + + return result; + } + + public int getTotalHits() { + return (collector == null) ? totalHits : collector.getTotalHits(); + } + } + + private static class AssertingEarlyTerminationFilterCollector extends FilterCollector { + private final int size; + + AssertingEarlyTerminationFilterCollector(Collector in, int size) { + super(in); + this.size = size; + } + + @Override + public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { + final LeafCollector in = super.getLeafCollector(context); + return new FilterLeafCollector(in) { + int collected; + + @Override + public void collect(int doc) throws IOException { + assert collected <= size : "should not collect more than " + size + " doc per segment, got " + collected; + ++collected; + super.collect(doc); + } + }; + } + } +} diff --git a/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java b/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java new file mode 100644 index 0000000000000..d2cb77f529793 --- /dev/null +++ b/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java @@ -0,0 +1,1182 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field.Store; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.SortedDocValuesField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.Term; +import org.apache.lucene.queries.spans.SpanNearQuery; +import org.apache.lucene.queries.spans.SpanTermQuery; +import org.apache.lucene.search.BooleanClause.Occur; +import org.apache.lucene.search.grouping.CollapseTopFieldDocs; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.FieldComparator; +import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.FilterCollector; +import org.apache.lucene.search.FilterLeafCollector; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TotalHits; +import org.apache.lucene.search.Weight; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.store.Directory; +import org.opensearch.action.search.SearchShardTask; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.ToXContent; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.index.mapper.NumberFieldMapper.NumberFieldType; +import org.opensearch.index.mapper.NumberFieldMapper.NumberType; +import org.opensearch.index.query.ParsedQuery; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.IndexShardTestCase; +import org.opensearch.lucene.queries.MinDocQuery; +import org.opensearch.search.DocValueFormat; +import org.opensearch.search.collapse.CollapseBuilder; +import org.opensearch.search.internal.ContextIndexSearcher; +import org.opensearch.search.internal.ScrollContext; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.profile.ProfileResult; +import org.opensearch.search.profile.ProfileShardResult; +import org.opensearch.search.profile.SearchProfileShardResults; +import org.opensearch.search.profile.query.CollectorResult; +import org.opensearch.search.profile.query.QueryProfileShardResult; +import org.opensearch.search.sort.SortAndFormats; +import org.opensearch.test.TestSearchContext; +import org.opensearch.threadpool.ThreadPool; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; + +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.hamcrest.Matchers.hasSize; + +public class QueryProfilePhaseTests extends IndexShardTestCase { + + private IndexShard indexShard; + private final ExecutorService executor; + private final QueryPhaseSearcher queryPhaseSearcher; + + @ParametersFactory + public static Collection concurrency() { + return Arrays.asList( + new Object[] { 0, QueryPhase.DEFAULT_QUERY_PHASE_SEARCHER }, + new Object[] { 5, new ConcurrentQueryPhaseSearcher() } + ); + } + + public QueryProfilePhaseTests(int concurrency, QueryPhaseSearcher queryPhaseSearcher) { + this.executor = (concurrency > 0) ? Executors.newFixedThreadPool(concurrency) : null; + this.queryPhaseSearcher = queryPhaseSearcher; + } + + @Override + public Settings threadPoolSettings() { + return Settings.builder().put(super.threadPoolSettings()).put("thread_pool.search.min_queue_size", 10).build(); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + indexShard = newShard(true); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + closeShards(indexShard); + + if (executor != null) { + ThreadPool.terminate(executor, 10, TimeUnit.SECONDS); + } + } + + public void testPostFilterDisablesCountOptimization() throws Exception { + Directory dir = newDirectory(); + final Sort sort = new Sort(new SortField("rank", SortField.Type.INT)); + IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + Document doc = new Document(); + w.addDocument(doc); + w.close(); + + IndexReader reader = DirectoryReader.open(dir); + + TestSearchContext context = new TestSearchContext(null, indexShard, newEarlyTerminationContextSearcher(reader, 0, executor)); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); + assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value); + assertProfileData(context, "MatchAllDocsQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_count")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + + context.setSearcher(newContextSearcher(reader, executor)); + context.parsedPostFilter(new ParsedQuery(new MatchNoDocsQuery())); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); + assertEquals(0, context.queryResult().topDocs().topDocs.totalHits.value); + assertProfileData(context, collector -> { + assertThat(collector.getReason(), equalTo("search_post_filter")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), hasSize(1)); + assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_count")); + assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + }, (query) -> { + assertThat(query.getQueryName(), equalTo("MatchNoDocsQuery")); + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, (query) -> { + assertThat(query.getQueryName(), equalTo("MatchAllDocsQuery")); + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }); + + reader.close(); + dir.close(); + } + + public void testTerminateAfterWithFilter() throws Exception { + Directory dir = newDirectory(); + final Sort sort = new Sort(new SortField("rank", SortField.Type.INT)); + IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + Document doc = new Document(); + for (int i = 0; i < 10; i++) { + doc.add(new StringField("foo", Integer.toString(i), Store.NO)); + } + w.addDocument(doc); + w.close(); + + IndexReader reader = DirectoryReader.open(dir); + + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor)); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + context.terminateAfter(1); + context.setSize(10); + for (int i = 0; i < 10; i++) { + context.parsedPostFilter(new ParsedQuery(new TermQuery(new Term("foo", Integer.toString(i))))); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); + assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertProfileData(context, collector -> { + assertThat(collector.getReason(), equalTo("search_post_filter")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), hasSize(1)); + assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_terminate_after_count")); + assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren().get(0).getProfiledChildren(), hasSize(1)); + assertThat(collector.getProfiledChildren().get(0).getProfiledChildren().get(0).getReason(), equalTo("search_top_hits")); + assertThat(collector.getProfiledChildren().get(0).getProfiledChildren().get(0).getTime(), greaterThan(0L)); + }, (query) -> { + assertThat(query.getQueryName(), equalTo("TermQuery")); + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, (query) -> { + assertThat(query.getQueryName(), equalTo("MatchAllDocsQuery")); + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(1L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }); + } + reader.close(); + dir.close(); + } + + public void testMinScoreDisablesCountOptimization() throws Exception { + Directory dir = newDirectory(); + final Sort sort = new Sort(new SortField("rank", SortField.Type.INT)); + IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + Document doc = new Document(); + w.addDocument(doc); + w.close(); + + IndexReader reader = DirectoryReader.open(dir); + TestSearchContext context = new TestSearchContext(null, indexShard, newEarlyTerminationContextSearcher(reader, 0, executor)); + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + context.setSize(0); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); + assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value); + assertProfileData(context, "MatchAllDocsQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_count")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + + context.minimumScore(100); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); + assertEquals(0, context.queryResult().topDocs().topDocs.totalHits.value); + assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); + assertProfileData(context, "MatchAllDocsQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThanOrEqualTo(100L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(1L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_min_score")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), hasSize(1)); + assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_count")); + assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + }); + + reader.close(); + dir.close(); + } + + public void testInOrderScrollOptimization() throws Exception { + Directory dir = newDirectory(); + final Sort sort = new Sort(new SortField("rank", SortField.Type.INT)); + IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + final int numDocs = scaledRandomIntBetween(600, 900); + for (int i = 0; i < numDocs; ++i) { + w.addDocument(new Document()); + } + w.close(); + IndexReader reader = DirectoryReader.open(dir); + ScrollContext scrollContext = new ScrollContext(); + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor), scrollContext); + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + scrollContext.lastEmittedDoc = null; + scrollContext.maxScore = Float.NaN; + scrollContext.totalHits = null; + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + int size = randomIntBetween(2, 5); + context.setSize(size); + + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertNull(context.queryResult().terminatedEarly()); + assertThat(context.terminateAfter(), equalTo(0)); + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); + assertProfileData(context, "MatchAllDocsQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_top_hits")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + + context.setSearcher(newEarlyTerminationContextSearcher(reader, size, executor)); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.terminateAfter(), equalTo(size)); + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0].doc, greaterThanOrEqualTo(size)); + assertProfileData(context, "ConstantScoreQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_terminate_after_count")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), hasSize(1)); + assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits")); + assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + }); + + reader.close(); + dir.close(); + } + + public void testTerminateAfterEarlyTermination() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = newIndexWriterConfig(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + final int numDocs = scaledRandomIntBetween(600, 900); + for (int i = 0; i < numDocs; ++i) { + Document doc = new Document(); + if (randomBoolean()) { + doc.add(new StringField("foo", "bar", Store.NO)); + } + if (randomBoolean()) { + doc.add(new StringField("foo", "baz", Store.NO)); + } + doc.add(new NumericDocValuesField("rank", numDocs - i)); + w.addDocument(doc); + } + w.close(); + final IndexReader reader = DirectoryReader.open(dir); + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor)); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + + context.terminateAfter(1); + { + context.setSize(1); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertProfileData(context, "MatchAllDocsQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_terminate_after_count")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), hasSize(1)); + assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits")); + assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + }); + + context.setSize(0); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); + assertProfileData(context, "MatchAllDocsQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_terminate_after_count")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), hasSize(1)); + assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_count")); + assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + }); + } + + { + context.setSize(1); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertProfileData(context, "MatchAllDocsQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_terminate_after_count")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), hasSize(1)); + assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits")); + assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + }); + } + { + context.setSize(1); + BooleanQuery bq = new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.SHOULD) + .add(new TermQuery(new Term("foo", "baz")), Occur.SHOULD) + .build(); + context.parsedQuery(new ParsedQuery(bq)); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertProfileData(context, "BooleanQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + + assertThat(query.getProfiledChildren(), hasSize(2)); + assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("TermQuery")); + assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + + assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery")); + assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_terminate_after_count")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), hasSize(1)); + assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits")); + assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + }); + context.setSize(0); + context.parsedQuery(new ParsedQuery(bq)); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); + + assertProfileData(context, "BooleanQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + + assertThat(query.getProfiledChildren(), hasSize(2)); + assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("TermQuery")); + assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score_count"), equalTo(0L)); + + assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery")); + assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score_count"), equalTo(0L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_terminate_after_count")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), hasSize(1)); + assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_count")); + assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + }); + } + + context.terminateAfter(7); + context.setSize(10); + for (int trackTotalHits : new int[] { -1, 3, 75, 100 }) { + context.trackTotalHitsUpTo(trackTotalHits); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); + assertTrue(context.queryResult().terminatedEarly()); + if (trackTotalHits == -1) { + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(0L)); + } else { + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(7L)); + } + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(7)); + assertProfileData(context, "BooleanQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(7L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + + assertThat(query.getProfiledChildren(), hasSize(2)); + assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("TermQuery")); + assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score_count"), greaterThan(0L)); + + assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery")); + assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score_count"), greaterThan(0L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_terminate_after_count")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), hasSize(1)); + assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits")); + assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + }); + } + + reader.close(); + dir.close(); + } + + public void testIndexSortingEarlyTermination() throws Exception { + Directory dir = newDirectory(); + final Sort sort = new Sort(new SortField("rank", SortField.Type.INT)); + IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + final int numDocs = scaledRandomIntBetween(600, 900); + for (int i = 0; i < numDocs; ++i) { + Document doc = new Document(); + if (randomBoolean()) { + doc.add(new StringField("foo", "bar", Store.NO)); + } + if (randomBoolean()) { + doc.add(new StringField("foo", "baz", Store.NO)); + } + doc.add(new NumericDocValuesField("rank", numDocs - i)); + w.addDocument(doc); + } + w.close(); + + final IndexReader reader = DirectoryReader.open(dir); + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor)); + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + context.setSize(1); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW })); + + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); + FieldDoc fieldDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0]; + assertThat(fieldDoc.fields[0], equalTo(1)); + assertProfileData(context, "MatchAllDocsQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_top_hits")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + + { + context.parsedPostFilter(new ParsedQuery(new MinDocQuery(1))); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); + assertNull(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(numDocs - 1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); + assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); + assertProfileData(context, collector -> { + assertThat(collector.getReason(), equalTo("search_post_filter")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), hasSize(1)); + assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits")); + assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + }, (query) -> { + assertThat(query.getQueryName(), equalTo("MinDocQuery")); + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, (query) -> { + assertThat(query.getQueryName(), equalTo("MatchAllDocsQuery")); + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }); + context.parsedPostFilter(null); + } + + { + context.setSearcher(newEarlyTerminationContextSearcher(reader, 1, executor)); + context.trackTotalHitsUpTo(SearchContext.TRACK_TOTAL_HITS_DISABLED); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); + assertNull(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); + assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); + assertProfileData(context, "MatchAllDocsQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_top_hits")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); + assertNull(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); + assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); + assertProfileData(context, "MatchAllDocsQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_top_hits")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + } + + reader.close(); + dir.close(); + } + + public void testIndexSortScrollOptimization() throws Exception { + Directory dir = newDirectory(); + final Sort indexSort = new Sort(new SortField("rank", SortField.Type.INT), new SortField("tiebreaker", SortField.Type.INT)); + IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(indexSort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + final int numDocs = scaledRandomIntBetween(600, 900); + for (int i = 0; i < numDocs; ++i) { + Document doc = new Document(); + doc.add(new NumericDocValuesField("rank", random().nextInt())); + doc.add(new NumericDocValuesField("tiebreaker", i)); + w.addDocument(doc); + } + if (randomBoolean()) { + w.forceMerge(randomIntBetween(1, 10)); + } + w.close(); + + final IndexReader reader = DirectoryReader.open(dir); + List searchSortAndFormats = new ArrayList<>(); + searchSortAndFormats.add(new SortAndFormats(indexSort, new DocValueFormat[] { DocValueFormat.RAW, DocValueFormat.RAW })); + // search sort is a prefix of the index sort + searchSortAndFormats.add(new SortAndFormats(new Sort(indexSort.getSort()[0]), new DocValueFormat[] { DocValueFormat.RAW })); + for (SortAndFormats searchSortAndFormat : searchSortAndFormats) { + ScrollContext scrollContext = new ScrollContext(); + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor), scrollContext); + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + scrollContext.lastEmittedDoc = null; + scrollContext.maxScore = Float.NaN; + scrollContext.totalHits = null; + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.setSize(10); + context.sort(searchSortAndFormat); + + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertNull(context.queryResult().terminatedEarly()); + assertThat(context.terminateAfter(), equalTo(0)); + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); + assertProfileData(context, "MatchAllDocsQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_top_hits")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + + int sizeMinus1 = context.queryResult().topDocs().topDocs.scoreDocs.length - 1; + FieldDoc lastDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[sizeMinus1]; + + context.setSearcher(newEarlyTerminationContextSearcher(reader, 10, executor)); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); + assertNull(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.terminateAfter(), equalTo(0)); + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); + assertProfileData(context, "ConstantScoreQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + + assertThat(query.getProfiledChildren(), hasSize(1)); + assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("SearchAfterSortedDocQuery")); + assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_top_hits")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + FieldDoc firstDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0]; + for (int i = 0; i < searchSortAndFormat.sort.getSort().length; i++) { + @SuppressWarnings("unchecked") + FieldComparator comparator = (FieldComparator) searchSortAndFormat.sort.getSort()[i].getComparator(i, true); + int cmp = comparator.compareValues(firstDoc.fields[i], lastDoc.fields[i]); + if (cmp == 0) { + continue; + } + assertThat(cmp, equalTo(1)); + break; + } + } + reader.close(); + dir.close(); + } + + public void testDisableTopScoreCollection() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = newIndexWriterConfig(new StandardAnalyzer()); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + Document doc = new Document(); + final int numDocs = 2 * scaledRandomIntBetween(50, 450); + for (int i = 0; i < numDocs; i++) { + doc.clear(); + if (i % 2 == 0) { + doc.add(new TextField("title", "foo bar", Store.NO)); + } else { + doc.add(new TextField("title", "foo", Store.NO)); + } + w.addDocument(doc); + } + w.close(); + + IndexReader reader = DirectoryReader.open(dir); + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor)); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + Query q = new SpanNearQuery.Builder("title", true).addClause(new SpanTermQuery(new Term("title", "foo"))) + .addClause(new SpanTermQuery(new Term("title", "bar"))) + .build(); + + context.parsedQuery(new ParsedQuery(q)); + context.setSize(3); + context.trackTotalHitsUpTo(3); + TopDocsCollectorContext topDocsContext = TopDocsCollectorContext.createTopDocsCollectorContext(context, false); + assertEquals(topDocsContext.create(null).scoreMode(), org.apache.lucene.search.ScoreMode.COMPLETE); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); + assertEquals(numDocs / 2, context.queryResult().topDocs().topDocs.totalHits.value); + assertEquals(context.queryResult().topDocs().topDocs.totalHits.relation, TotalHits.Relation.EQUAL_TO); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(3)); + assertProfileData(context, "SpanNearQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_top_hits")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + + context.sort(new SortAndFormats(new Sort(new SortField("other", SortField.Type.INT)), new DocValueFormat[] { DocValueFormat.RAW })); + topDocsContext = TopDocsCollectorContext.createTopDocsCollectorContext(context, false); + assertEquals(topDocsContext.create(null).scoreMode(), org.apache.lucene.search.ScoreMode.TOP_DOCS); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); + assertEquals(numDocs / 2, context.queryResult().topDocs().topDocs.totalHits.value); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(3)); + assertEquals(context.queryResult().topDocs().topDocs.totalHits.relation, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO); + assertProfileData(context, "SpanNearQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_top_hits")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + + reader.close(); + dir.close(); + } + + public void testMinScore() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = newIndexWriterConfig(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + for (int i = 0; i < 10; i++) { + Document doc = new Document(); + doc.add(new StringField("foo", "bar", Store.NO)); + doc.add(new StringField("filter", "f1", Store.NO)); + w.addDocument(doc); + } + w.close(); + + IndexReader reader = DirectoryReader.open(dir); + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor)); + context.parsedQuery( + new ParsedQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.MUST) + .add(new TermQuery(new Term("filter", "f1")), Occur.SHOULD) + .build() + ) + ); + context.minimumScore(0.01f); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.setSize(1); + context.trackTotalHitsUpTo(5); + + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); + assertEquals(10, context.queryResult().topDocs().topDocs.totalHits.value); + assertProfileData(context, "BooleanQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(10L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + + assertThat(query.getProfiledChildren(), hasSize(2)); + assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("TermQuery")); + assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + + assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery")); + assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_min_score")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), hasSize(1)); + assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits")); + assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + }); + + reader.close(); + dir.close(); + } + + public void testMaxScore() throws Exception { + Directory dir = newDirectory(); + final Sort sort = new Sort(new SortField("filter", SortField.Type.STRING)); + IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + + final int numDocs = scaledRandomIntBetween(600, 900); + for (int i = 0; i < numDocs; i++) { + Document doc = new Document(); + doc.add(new StringField("foo", "bar", Store.NO)); + doc.add(new StringField("filter", "f1" + ((i > 0) ? " " + Integer.toString(i) : ""), Store.NO)); + doc.add(new SortedDocValuesField("filter", newBytesRef("f1" + ((i > 0) ? " " + Integer.toString(i) : "")))); + w.addDocument(doc); + } + w.close(); + + IndexReader reader = DirectoryReader.open(dir); + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor)); + context.trackScores(true); + context.parsedQuery( + new ParsedQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.MUST) + .add(new TermQuery(new Term("filter", "f1")), Occur.SHOULD) + .build() + ) + ); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.setSize(1); + context.trackTotalHitsUpTo(5); + + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); + assertFalse(Float.isNaN(context.queryResult().getMaxScore())); + assertEquals(1, context.queryResult().topDocs().topDocs.scoreDocs.length); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, greaterThanOrEqualTo(6L)); + assertProfileData(context, "BooleanQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + + assertThat(query.getProfiledChildren(), hasSize(2)); + assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("TermQuery")); + assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + + assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery")); + assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_top_hits")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + + context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW })); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); + assertFalse(Float.isNaN(context.queryResult().getMaxScore())); + assertEquals(1, context.queryResult().topDocs().topDocs.scoreDocs.length); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, greaterThanOrEqualTo(6L)); + assertProfileData(context, "BooleanQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + + assertThat(query.getProfiledChildren(), hasSize(2)); + assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("TermQuery")); + assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + + assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery")); + assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_top_hits")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + + reader.close(); + dir.close(); + } + + public void testCollapseQuerySearchResults() throws Exception { + Directory dir = newDirectory(); + final Sort sort = new Sort(new SortField("user", SortField.Type.INT)); + IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + + // Always end up with uneven buckets so collapsing is predictable + final int numDocs = 2 * scaledRandomIntBetween(600, 900) - 1; + for (int i = 0; i < numDocs; i++) { + Document doc = new Document(); + doc.add(new StringField("foo", "bar", Store.NO)); + doc.add(new NumericDocValuesField("user", i & 1)); + w.addDocument(doc); + } + w.close(); + + IndexReader reader = DirectoryReader.open(dir); + QueryShardContext queryShardContext = mock(QueryShardContext.class); + when(queryShardContext.fieldMapper("user")).thenReturn( + new NumberFieldType("user", NumberType.INTEGER, true, false, true, false, null, Collections.emptyMap()) + ); + + TestSearchContext context = new TestSearchContext(queryShardContext, indexShard, newContextSearcher(reader, executor)); + context.collapse(new CollapseBuilder("user").build(context.getQueryShardContext())); + context.trackScores(true); + context.parsedQuery(new ParsedQuery(new TermQuery(new Term("foo", "bar")))); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.setSize(2); + context.trackTotalHitsUpTo(5); + + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); + assertFalse(Float.isNaN(context.queryResult().getMaxScore())); + assertEquals(2, context.queryResult().topDocs().topDocs.scoreDocs.length); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs, instanceOf(CollapseTopFieldDocs.class)); + + assertProfileData(context, "TermQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + assertThat(query.getProfiledChildren(), empty()); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_top_hits")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + + context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW })); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); + assertFalse(Float.isNaN(context.queryResult().getMaxScore())); + assertEquals(2, context.queryResult().topDocs().topDocs.scoreDocs.length); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs, instanceOf(CollapseTopFieldDocs.class)); + + assertProfileData(context, "TermQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + assertThat(query.getProfiledChildren(), empty()); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_top_hits")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + + reader.close(); + dir.close(); + } + + private void assertProfileData(SearchContext context, String type, Consumer query, Consumer collector) + throws IOException { + assertProfileData(context, collector, (profileResult) -> { + assertThat(profileResult.getQueryName(), equalTo(type)); + assertThat(profileResult.getTime(), greaterThan(0L)); + query.accept(profileResult); + }); + } + + private void assertProfileData(SearchContext context, Consumer collector, Consumer query1) + throws IOException { + assertProfileData(context, Arrays.asList(query1), collector, false); + } + + private void assertProfileData( + SearchContext context, + Consumer collector, + Consumer query1, + Consumer query2 + ) throws IOException { + assertProfileData(context, Arrays.asList(query1, query2), collector, false); + } + + private final void assertProfileData( + SearchContext context, + List> queries, + Consumer collector, + boolean debug + ) throws IOException { + assertThat(context.getProfilers(), not(nullValue())); + + final ProfileShardResult result = SearchProfileShardResults.buildShardResults(context.getProfilers(), null); + if (debug) { + final SearchProfileShardResults results = new SearchProfileShardResults( + Collections.singletonMap(indexShard.shardId().toString(), result) + ); + + try (final XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint()) { + builder.startObject(); + results.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + builder.flush(); + + final OutputStream out = builder.getOutputStream(); + assertThat(out, instanceOf(ByteArrayOutputStream.class)); + + logger.info(new String(((ByteArrayOutputStream) out).toByteArray(), StandardCharsets.UTF_8)); + } + } + + assertThat(result.getQueryProfileResults(), hasSize(1)); + + final QueryProfileShardResult queryProfileShardResult = result.getQueryProfileResults().get(0); + assertThat(queryProfileShardResult.getQueryResults(), hasSize(queries.size())); + + for (int i = 0; i < queries.size(); ++i) { + queries.get(i).accept(queryProfileShardResult.getQueryResults().get(i)); + } + + collector.accept(queryProfileShardResult.getCollectorResult()); + } + + private static ContextIndexSearcher newContextSearcher(IndexReader reader, ExecutorService executor) throws IOException { + return new ContextIndexSearcher( + reader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + true, + executor + ); + } + + private static ContextIndexSearcher newEarlyTerminationContextSearcher(IndexReader reader, int size, ExecutorService executor) + throws IOException { + return new ContextIndexSearcher( + reader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + true, + executor + ) { + + @Override + public void search(List leaves, Weight weight, Collector collector) throws IOException { + final Collector in = new AssertingEarlyTerminationFilterCollector(collector, size); + super.search(leaves, weight, in); + } + }; + } + + private static class AssertingEarlyTerminationFilterCollector extends FilterCollector { + private final int size; + + AssertingEarlyTerminationFilterCollector(Collector in, int size) { + super(in); + this.size = size; + } + + @Override + public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { + final LeafCollector in = super.getLeafCollector(context); + return new FilterLeafCollector(in) { + int collected; + + @Override + public void collect(int doc) throws IOException { + assert collected <= size : "should not collect more than " + size + " doc per segment, got " + collected; + ++collected; + super.collect(doc); + } + }; + } + } +} diff --git a/server/src/main/java/org/opensearch/common/lucene/MinimumScoreCollector.java b/server/src/main/java/org/opensearch/common/lucene/MinimumScoreCollector.java index 81c98c862d2b2..a883e111f7c95 100644 --- a/server/src/main/java/org/opensearch/common/lucene/MinimumScoreCollector.java +++ b/server/src/main/java/org/opensearch/common/lucene/MinimumScoreCollector.java @@ -55,6 +55,10 @@ public MinimumScoreCollector(Collector collector, float minimumScore) { this.minimumScore = minimumScore; } + public Collector getCollector() { + return collector; + } + @Override public void setScorer(Scorable scorer) throws IOException { if (!(scorer instanceof ScoreCachingWrappingScorer)) { diff --git a/server/src/main/java/org/opensearch/common/lucene/search/FilteredCollector.java b/server/src/main/java/org/opensearch/common/lucene/search/FilteredCollector.java index 331b67a40878f..2dcb0578fd23d 100644 --- a/server/src/main/java/org/opensearch/common/lucene/search/FilteredCollector.java +++ b/server/src/main/java/org/opensearch/common/lucene/search/FilteredCollector.java @@ -53,6 +53,10 @@ public FilteredCollector(Collector collector, Weight filter) { this.filter = filter; } + public Collector getCollector() { + return collector; + } + @Override public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { final ScorerSupplier filterScorerSupplier = filter.scorerSupplier(context); diff --git a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java index bfe8eed05ea9b..6fd78b834344d 100644 --- a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java @@ -36,6 +36,7 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.Collector; +import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; @@ -82,6 +83,7 @@ import org.opensearch.search.profile.Profilers; import org.opensearch.search.query.QueryPhaseExecutionException; import org.opensearch.search.query.QuerySearchResult; +import org.opensearch.search.query.ReduceableSearchResult; import org.opensearch.search.rescore.RescoreContext; import org.opensearch.search.slice.SliceBuilder; import org.opensearch.search.sort.SortAndFormats; @@ -163,7 +165,7 @@ final class DefaultSearchContext extends SearchContext { private Profilers profilers; private final Map searchExtBuilders = new HashMap<>(); - private final Map, Collector> queryCollectors = new HashMap<>(); + private final Map, CollectorManager> queryCollectorManagers = new HashMap<>(); private final QueryShardContext queryShardContext; private final FetchPhase fetchPhase; @@ -823,8 +825,8 @@ public long getRelativeTimeInMillis() { } @Override - public Map, Collector> queryCollectors() { - return queryCollectors; + public Map, CollectorManager> queryCollectorManagers() { + return queryCollectorManagers; } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregationPhase.java b/server/src/main/java/org/opensearch/search/aggregations/AggregationPhase.java index be62b33adb356..5a837a6e14c5a 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregationPhase.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregationPhase.java @@ -32,6 +32,7 @@ package org.opensearch.search.aggregations; import org.apache.lucene.search.Collector; +import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.Query; import org.opensearch.common.inject.Inject; import org.opensearch.common.lucene.search.Queries; @@ -40,9 +41,11 @@ import org.opensearch.search.profile.query.CollectorResult; import org.opensearch.search.profile.query.InternalProfileCollector; import org.opensearch.search.query.QueryPhaseExecutionException; +import org.opensearch.search.query.ReduceableSearchResult; import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.List; @@ -68,17 +71,18 @@ public void preProcess(SearchContext context) { } context.aggregations().aggregators(aggregators); if (!collectors.isEmpty()) { - Collector collector = MultiBucketCollector.wrap(collectors); - ((BucketCollector) collector).preCollection(); - if (context.getProfilers() != null) { - collector = new InternalProfileCollector( - collector, - CollectorResult.REASON_AGGREGATION, - // TODO: report on child aggs as well - Collections.emptyList() - ); - } - context.queryCollectors().put(AggregationPhase.class, collector); + final Collector collector = createCollector(context, collectors); + context.queryCollectorManagers().put(AggregationPhase.class, new CollectorManager() { + @Override + public Collector newCollector() throws IOException { + return collector; + } + + @Override + public ReduceableSearchResult reduce(Collection collectors) throws IOException { + throw new UnsupportedOperationException("The concurrent aggregation over index segments is not supported"); + } + }); } } catch (IOException e) { throw new AggregationInitializationException("Could not initialize aggregators", e); @@ -147,6 +151,20 @@ public void execute(SearchContext context) { // disable aggregations so that they don't run on next pages in case of scrolling context.aggregations(null); - context.queryCollectors().remove(AggregationPhase.class); + context.queryCollectorManagers().remove(AggregationPhase.class); + } + + private Collector createCollector(SearchContext context, List collectors) throws IOException { + Collector collector = MultiBucketCollector.wrap(collectors); + ((BucketCollector) collector).preCollection(); + if (context.getProfilers() != null) { + collector = new InternalProfileCollector( + collector, + CollectorResult.REASON_AGGREGATION, + // TODO: report on child aggs as well + Collections.emptyList() + ); + } + return collector; } } diff --git a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java index 2cc15d4c65b96..2fb5ababe19ad 100644 --- a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java +++ b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java @@ -96,16 +96,6 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable { private QueryProfiler profiler; private MutableQueryTimeout cancellable; - public ContextIndexSearcher( - IndexReader reader, - Similarity similarity, - QueryCache queryCache, - QueryCachingPolicy queryCachingPolicy, - boolean wrapWithExitableDirectoryReader - ) throws IOException { - this(reader, similarity, queryCache, queryCachingPolicy, new MutableQueryTimeout(), wrapWithExitableDirectoryReader, null); - } - public ContextIndexSearcher( IndexReader reader, Similarity similarity, @@ -233,6 +223,25 @@ public void search( result.topDocs(new TopDocsAndMaxScore(mergedTopDocs, Float.NaN), formats); } + public void search( + Query query, + CollectorManager manager, + QuerySearchResult result, + DocValueFormat[] formats, + TotalHits totalHits + ) throws IOException { + TopFieldDocs mergedTopDocs = search(query, manager); + // Lucene sets shards indexes during merging of topDocs from different collectors + // We need to reset shard index; OpenSearch will set shard index later during reduce stage + for (ScoreDoc scoreDoc : mergedTopDocs.scoreDocs) { + scoreDoc.shardIndex = -1; + } + if (totalHits != null) { // we have already precalculated totalHits for the whole index + mergedTopDocs = new TopFieldDocs(totalHits, mergedTopDocs.scoreDocs, mergedTopDocs.fields); + } + result.topDocs(new TopDocsAndMaxScore(mergedTopDocs, Float.NaN), formats); + } + @Override protected void search(List leaves, Weight weight, Collector collector) throws IOException { for (LeafReaderContext ctx : leaves) { // search each subreader @@ -420,8 +429,4 @@ public void clear() { runnables.clear(); } } - - public boolean allowConcurrentSegmentSearch() { - return (getExecutor() != null); - } } diff --git a/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java b/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java index 6d77558ec3bd0..961d45b0011ef 100644 --- a/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java @@ -33,6 +33,7 @@ package org.opensearch.search.internal; import org.apache.lucene.search.Collector; +import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.Query; import org.opensearch.action.search.SearchShardTask; @@ -61,6 +62,7 @@ import org.opensearch.search.fetch.subphase.highlight.SearchHighlightContext; import org.opensearch.search.profile.Profilers; import org.opensearch.search.query.QuerySearchResult; +import org.opensearch.search.query.ReduceableSearchResult; import org.opensearch.search.rescore.RescoreContext; import org.opensearch.search.sort.SortAndFormats; import org.opensearch.search.suggest.SuggestionSearchContext; @@ -492,8 +494,8 @@ public Profilers getProfilers() { } @Override - public Map, Collector> queryCollectors() { - return in.queryCollectors(); + public Map, CollectorManager> queryCollectorManagers() { + return in.queryCollectorManagers(); } @Override diff --git a/server/src/main/java/org/opensearch/search/internal/SearchContext.java b/server/src/main/java/org/opensearch/search/internal/SearchContext.java index 7ff0eaed4be63..0c24fbee76335 100644 --- a/server/src/main/java/org/opensearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/SearchContext.java @@ -32,6 +32,7 @@ package org.opensearch.search.internal; import org.apache.lucene.search.Collector; +import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.Query; import org.opensearch.action.search.SearchShardTask; @@ -66,6 +67,7 @@ import org.opensearch.search.fetch.subphase.highlight.SearchHighlightContext; import org.opensearch.search.profile.Profilers; import org.opensearch.search.query.QuerySearchResult; +import org.opensearch.search.query.ReduceableSearchResult; import org.opensearch.search.rescore.RescoreContext; import org.opensearch.search.sort.SortAndFormats; import org.opensearch.search.suggest.SuggestionSearchContext; @@ -388,8 +390,8 @@ public final boolean hasOnlySuggest() { */ public abstract long getRelativeTimeInMillis(); - /** Return a view of the additional query collectors that should be run for this context. */ - public abstract Map, Collector> queryCollectors(); + /** Return a view of the additional query collector managers that should be run for this context. */ + public abstract Map, CollectorManager> queryCollectorManagers(); public abstract QueryShardContext getQueryShardContext(); diff --git a/server/src/main/java/org/opensearch/search/profile/Profilers.java b/server/src/main/java/org/opensearch/search/profile/Profilers.java index 6b9be0167b50f..3cc9b1710d420 100644 --- a/server/src/main/java/org/opensearch/search/profile/Profilers.java +++ b/server/src/main/java/org/opensearch/search/profile/Profilers.java @@ -57,7 +57,7 @@ public Profilers(ContextIndexSearcher searcher) { /** Switch to a new profile. */ public QueryProfiler addQueryProfiler() { - QueryProfiler profiler = new QueryProfiler(searcher.allowConcurrentSegmentSearch()); + QueryProfiler profiler = new QueryProfiler(searcher.getExecutor() != null); searcher.setProfiler(profiler); queryProfilers.add(profiler); return profiler; diff --git a/server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollectorManager.java b/server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollectorManager.java new file mode 100644 index 0000000000000..a35c22a6a2457 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollectorManager.java @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.profile.query; + +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.CollectorManager; +import org.opensearch.search.query.EarlyTerminatingListener; +import org.opensearch.search.query.ReduceableSearchResult; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +public class InternalProfileCollectorManager + implements + ProfileCollectorManager, + EarlyTerminatingListener { + private final CollectorManager manager; + private final String reason; + private final List children; + private long time = 0; + + public InternalProfileCollectorManager( + CollectorManager manager, + String reason, + List children + ) { + this.manager = manager; + this.reason = reason; + this.children = children; + } + + @Override + public InternalProfileCollector newCollector() throws IOException { + return new InternalProfileCollector(manager.newCollector(), reason, children); + } + + @SuppressWarnings("unchecked") + @Override + public ReduceableSearchResult reduce(Collection collectors) throws IOException { + final Collection subs = new ArrayList<>(); + + for (final InternalProfileCollector collector : collectors) { + subs.add(collector.getCollector()); + time += collector.getTime(); + } + + return ((CollectorManager) manager).reduce(subs); + } + + @Override + public String getReason() { + return reason; + } + + @Override + public long getTime() { + return time; + } + + @Override + public Collection children() { + return children; + } + + @Override + public String getName() { + return manager.getClass().getSimpleName(); + } + + @Override + public CollectorResult getCollectorTree() { + return InternalProfileCollector.doGetCollectorTree(this); + } + + @Override + public void onEarlyTermination(int maxCountHits, boolean forcedTermination) { + if (manager instanceof EarlyTerminatingListener) { + ((EarlyTerminatingListener) manager).onEarlyTermination(maxCountHits, forcedTermination); + } + } +} diff --git a/server/src/main/java/org/opensearch/search/profile/query/ProfileCollectorManager.java b/server/src/main/java/org/opensearch/search/profile/query/ProfileCollectorManager.java new file mode 100644 index 0000000000000..7037988401fce --- /dev/null +++ b/server/src/main/java/org/opensearch/search/profile/query/ProfileCollectorManager.java @@ -0,0 +1,17 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.profile.query; + +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.CollectorManager; + +/** + * Collector manager which supports profiling + */ +public interface ProfileCollectorManager extends CollectorManager, InternalProfileComponent {} diff --git a/server/src/main/java/org/opensearch/search/query/EarlyTerminatingCollector.java b/server/src/main/java/org/opensearch/search/query/EarlyTerminatingCollector.java index 3ee8430522891..56cb49835dcc4 100644 --- a/server/src/main/java/org/opensearch/search/query/EarlyTerminatingCollector.java +++ b/server/src/main/java/org/opensearch/search/query/EarlyTerminatingCollector.java @@ -95,6 +95,10 @@ public void collect(int doc) throws IOException { }; } + Collector getCollector() { + return in; + } + /** * Returns true if this collector has early terminated. */ diff --git a/server/src/main/java/org/opensearch/search/query/EarlyTerminatingCollectorManager.java b/server/src/main/java/org/opensearch/search/query/EarlyTerminatingCollectorManager.java new file mode 100644 index 0000000000000..32fbb24d16436 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/query/EarlyTerminatingCollectorManager.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query; + +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.CollectorManager; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +public class EarlyTerminatingCollectorManager + implements + CollectorManager, + EarlyTerminatingListener { + + private final CollectorManager manager; + private final int maxCountHits; + private boolean forceTermination; + + EarlyTerminatingCollectorManager(CollectorManager manager, int maxCountHits, boolean forceTermination) { + this.manager = manager; + this.maxCountHits = maxCountHits; + this.forceTermination = forceTermination; + } + + @Override + public EarlyTerminatingCollector newCollector() throws IOException { + return new EarlyTerminatingCollector(manager.newCollector(), maxCountHits, false /* forced termination is not supported */); + } + + @SuppressWarnings("unchecked") + @Override + public ReduceableSearchResult reduce(Collection collectors) throws IOException { + final List innerCollectors = new ArrayList<>(collectors.size()); + + boolean didTerminateEarly = false; + for (EarlyTerminatingCollector collector : collectors) { + innerCollectors.add((C) collector.getCollector()); + if (collector.hasEarlyTerminated()) { + didTerminateEarly = true; + } + } + + if (didTerminateEarly) { + onEarlyTermination(maxCountHits, forceTermination); + + final ReduceableSearchResult result = manager.reduce(innerCollectors); + return new ReduceableSearchResult() { + @Override + public void reduce(QuerySearchResult r) throws IOException { + result.reduce(r); + r.terminatedEarly(true); + } + }; + } + + return manager.reduce(innerCollectors); + } + + @Override + public void onEarlyTermination(int maxCountHits, boolean forcedTermination) { + if (manager instanceof EarlyTerminatingListener) { + ((EarlyTerminatingListener) manager).onEarlyTermination(maxCountHits, forcedTermination); + } + } +} diff --git a/server/src/main/java/org/opensearch/search/query/EarlyTerminatingListener.java b/server/src/main/java/org/opensearch/search/query/EarlyTerminatingListener.java new file mode 100644 index 0000000000000..dd6793266a7ca --- /dev/null +++ b/server/src/main/java/org/opensearch/search/query/EarlyTerminatingListener.java @@ -0,0 +1,22 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query; + +/** + * Early termination event listener. It is used during concurrent segment search + * to propagate the early termination intent. + */ +public interface EarlyTerminatingListener { + /** + * Early termination event notification + * @param maxCountHits desired maximum number of hits + * @param forcedTermination :true" if forced termination has been requested, "false" otherwise + */ + void onEarlyTermination(int maxCountHits, boolean forcedTermination); +} diff --git a/server/src/main/java/org/opensearch/search/query/FilteredCollectorManager.java b/server/src/main/java/org/opensearch/search/query/FilteredCollectorManager.java new file mode 100644 index 0000000000000..ef47cf2a388f3 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/query/FilteredCollectorManager.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query; + +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.CollectorManager; +import org.apache.lucene.search.Weight; +import org.opensearch.common.lucene.search.FilteredCollector; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; + +class FilteredCollectorManager implements CollectorManager { + private final CollectorManager manager; + private final Weight filter; + + FilteredCollectorManager(CollectorManager manager, Weight filter) { + this.manager = manager; + this.filter = filter; + } + + @Override + public FilteredCollector newCollector() throws IOException { + return new FilteredCollector(manager.newCollector(), filter); + } + + @Override + @SuppressWarnings("unchecked") + public ReduceableSearchResult reduce(Collection collectors) throws IOException { + final Collection subCollectors = new ArrayList<>(); + + for (final FilteredCollector collector : collectors) { + subCollectors.add(collector.getCollector()); + } + + return ((CollectorManager) manager).reduce(subCollectors); + } +} diff --git a/server/src/main/java/org/opensearch/search/query/MinimumCollectorManager.java b/server/src/main/java/org/opensearch/search/query/MinimumCollectorManager.java new file mode 100644 index 0000000000000..22b25222b639d --- /dev/null +++ b/server/src/main/java/org/opensearch/search/query/MinimumCollectorManager.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query; + +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.CollectorManager; +import org.opensearch.common.lucene.MinimumScoreCollector; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; + +class MinimumCollectorManager implements CollectorManager { + private final CollectorManager manager; + private final float minimumScore; + + MinimumCollectorManager(CollectorManager manager, float minimumScore) { + this.manager = manager; + this.minimumScore = minimumScore; + } + + @Override + public MinimumScoreCollector newCollector() throws IOException { + return new MinimumScoreCollector(manager.newCollector(), minimumScore); + } + + @Override + @SuppressWarnings("unchecked") + public ReduceableSearchResult reduce(Collection collectors) throws IOException { + final Collection subCollectors = new ArrayList<>(); + + for (final MinimumScoreCollector collector : collectors) { + subCollectors.add(collector.getCollector()); + } + + return ((CollectorManager) manager).reduce(subCollectors); + } +} diff --git a/server/src/main/java/org/opensearch/search/query/MultiCollectorWrapper.java b/server/src/main/java/org/opensearch/search/query/MultiCollectorWrapper.java new file mode 100644 index 0000000000000..0ee423b48caeb --- /dev/null +++ b/server/src/main/java/org/opensearch/search/query/MultiCollectorWrapper.java @@ -0,0 +1,58 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.MultiCollector; +import org.apache.lucene.search.ScoreMode; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; + +/** + * Wraps MultiCollector and provide access to underlying collectors. + * Please check out https://github.com/apache/lucene/pull/455. + */ +public class MultiCollectorWrapper implements Collector { + private final MultiCollector delegate; + private final Collection collectors; + + MultiCollectorWrapper(MultiCollector delegate, Collection collectors) { + this.delegate = delegate; + this.collectors = collectors; + } + + @Override + public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { + return delegate.getLeafCollector(context); + } + + @Override + public ScoreMode scoreMode() { + return delegate.scoreMode(); + } + + public Collection getCollectors() { + return collectors; + } + + public static Collector wrap(Collector... collectors) { + final List collectorsList = Arrays.asList(collectors); + final Collector collector = MultiCollector.wrap(collectorsList); + if (collector instanceof MultiCollector) { + return new MultiCollectorWrapper((MultiCollector) collector, collectorsList); + } else { + return collector; + } + } +} diff --git a/server/src/main/java/org/opensearch/search/query/QueryCollectorContext.java b/server/src/main/java/org/opensearch/search/query/QueryCollectorContext.java index d1ff855888f0b..95ad514adf97d 100644 --- a/server/src/main/java/org/opensearch/search/query/QueryCollectorContext.java +++ b/server/src/main/java/org/opensearch/search/query/QueryCollectorContext.java @@ -33,6 +33,7 @@ package org.opensearch.search.query; import org.apache.lucene.search.Collector; +import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MultiCollector; import org.apache.lucene.search.Query; @@ -42,6 +43,7 @@ import org.opensearch.common.lucene.MinimumScoreCollector; import org.opensearch.common.lucene.search.FilteredCollector; import org.opensearch.search.profile.query.InternalProfileCollector; +import org.opensearch.search.profile.query.InternalProfileCollectorManager; import java.io.IOException; import java.util.ArrayList; @@ -54,7 +56,7 @@ import static org.opensearch.search.profile.query.CollectorResult.REASON_SEARCH_POST_FILTER; import static org.opensearch.search.profile.query.CollectorResult.REASON_SEARCH_TERMINATE_AFTER_COUNT; -abstract class QueryCollectorContext { +public abstract class QueryCollectorContext { private static final Collector EMPTY_COLLECTOR = new SimpleCollector() { @Override public void collect(int doc) {} @@ -77,6 +79,8 @@ public ScoreMode scoreMode() { */ abstract Collector create(Collector in) throws IOException; + abstract CollectorManager createManager(CollectorManager in) throws IOException; + /** * Wraps this collector with a profiler */ @@ -85,6 +89,18 @@ protected InternalProfileCollector createWithProfiler(InternalProfileCollector i return new InternalProfileCollector(collector, profilerName, in != null ? Collections.singletonList(in) : Collections.emptyList()); } + /** + * Wraps this collector manager with a profiler + */ + protected InternalProfileCollectorManager createWithProfiler(InternalProfileCollectorManager in) throws IOException { + final CollectorManager manager = createManager(in); + return new InternalProfileCollectorManager( + manager, + profilerName, + in != null ? Collections.singletonList(in) : Collections.emptyList() + ); + } + /** * Post-process result after search execution. * @@ -126,6 +142,11 @@ static QueryCollectorContext createMinScoreCollectorContext(float minScore) { Collector create(Collector in) { return new MinimumScoreCollector(in, minScore); } + + @Override + CollectorManager createManager(CollectorManager in) throws IOException { + return new MinimumCollectorManager(in, minScore); + } }; } @@ -139,35 +160,58 @@ Collector create(Collector in) throws IOException { final Weight filterWeight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f); return new FilteredCollector(in, filterWeight); } + + @Override + CollectorManager createManager(CollectorManager in) throws IOException { + final Weight filterWeight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f); + return new FilteredCollectorManager(in, filterWeight); + } }; } /** - * Creates a multi collector from the provided subs + * Creates a multi collector manager from the provided subs */ - static QueryCollectorContext createMultiCollectorContext(Collection subs) { + static QueryCollectorContext createMultiCollectorContext( + Collection> subs + ) { return new QueryCollectorContext(REASON_SEARCH_MULTI) { @Override - Collector create(Collector in) { + Collector create(Collector in) throws IOException { List subCollectors = new ArrayList<>(); subCollectors.add(in); - subCollectors.addAll(subs); + for (CollectorManager manager : subs) { + subCollectors.add(manager.newCollector()); + } return MultiCollector.wrap(subCollectors); } @Override - protected InternalProfileCollector createWithProfiler(InternalProfileCollector in) { + protected InternalProfileCollector createWithProfiler(InternalProfileCollector in) throws IOException { final List subCollectors = new ArrayList<>(); subCollectors.add(in); - if (subs.stream().anyMatch((col) -> col instanceof InternalProfileCollector == false)) { - throw new IllegalArgumentException("non-profiling collector"); - } - for (Collector collector : subs) { + + for (CollectorManager manager : subs) { + final Collector collector = manager.newCollector(); + if (!(collector instanceof InternalProfileCollector)) { + throw new IllegalArgumentException("non-profiling collector"); + } subCollectors.add((InternalProfileCollector) collector); } + final Collector collector = MultiCollector.wrap(subCollectors); return new InternalProfileCollector(collector, REASON_SEARCH_MULTI, subCollectors); } + + @Override + CollectorManager createManager( + CollectorManager in + ) throws IOException { + final List> managers = new ArrayList<>(); + managers.add(in); + managers.addAll(subs); + return QueryCollectorManagerContext.createOpaqueCollectorManager(managers); + } }; } @@ -192,6 +236,13 @@ Collector create(Collector in) { this.collector = MultiCollector.wrap(subCollectors); return collector; } + + @Override + CollectorManager createManager( + CollectorManager in + ) throws IOException { + return new EarlyTerminatingCollectorManager<>(in, numHits, true); + } }; } } diff --git a/server/src/main/java/org/opensearch/search/query/QueryCollectorManagerContext.java b/server/src/main/java/org/opensearch/search/query/QueryCollectorManagerContext.java new file mode 100644 index 0000000000000..c98f4884bb030 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/query/QueryCollectorManagerContext.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query; + +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.CollectorManager; +import org.apache.lucene.search.MultiCollectorManager; +import org.opensearch.search.profile.query.InternalProfileCollectorManager; +import org.opensearch.search.profile.query.ProfileCollectorManager; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +public abstract class QueryCollectorManagerContext { + private static class QueryCollectorManager implements CollectorManager { + private final MultiCollectorManager manager; + + private QueryCollectorManager(Collection> managers) { + this.manager = new MultiCollectorManager(managers.toArray(new CollectorManager[0])); + } + + @Override + public Collector newCollector() throws IOException { + return manager.newCollector(); + } + + @Override + public ReduceableSearchResult reduce(Collection collectors) throws IOException { + final Object[] results = manager.reduce(collectors); + + final ReduceableSearchResult[] transformed = new ReduceableSearchResult[results.length]; + for (int i = 0; i < results.length; ++i) { + assert results[i] instanceof ReduceableSearchResult; + transformed[i] = (ReduceableSearchResult) results[i]; + } + + return reduceWith(transformed); + } + + protected ReduceableSearchResult reduceWith(final ReduceableSearchResult[] results) { + return (QuerySearchResult result) -> { + for (final ReduceableSearchResult r : results) { + r.reduce(result); + } + }; + } + } + + private static class OpaqueQueryCollectorManager extends QueryCollectorManager { + private OpaqueQueryCollectorManager(Collection> managers) { + super(managers); + } + + @Override + protected ReduceableSearchResult reduceWith(final ReduceableSearchResult[] results) { + return (QuerySearchResult result) -> {}; + } + } + + public static CollectorManager createOpaqueCollectorManager( + List> managers + ) throws IOException { + return new OpaqueQueryCollectorManager(managers); + } + + public static CollectorManager createMultiCollectorManager( + List collectors + ) throws IOException { + final Collection> managers = new ArrayList<>(); + + CollectorManager manager = null; + for (QueryCollectorContext ctx : collectors) { + manager = ctx.createManager(manager); + managers.add(manager); + } + + return new QueryCollectorManager(managers); + } + + public static ProfileCollectorManager createQueryCollectorManagerWithProfiler( + List collectors + ) throws IOException { + InternalProfileCollectorManager manager = null; + + for (QueryCollectorContext ctx : collectors) { + manager = ctx.createWithProfiler(manager); + } + + return manager; + } +} diff --git a/server/src/main/java/org/opensearch/search/query/QueryPhase.java b/server/src/main/java/org/opensearch/search/query/QueryPhase.java index 3edbc16cd613f..1501067ec7983 100644 --- a/server/src/main/java/org/opensearch/search/query/QueryPhase.java +++ b/server/src/main/java/org/opensearch/search/query/QueryPhase.java @@ -238,9 +238,9 @@ static boolean executeInternal(SearchContext searchContext, QueryPhaseSearcher q // this collector can filter documents during the collection hasFilterCollector = true; } - if (searchContext.queryCollectors().isEmpty() == false) { + if (searchContext.queryCollectorManagers().isEmpty() == false) { // plug in additional collectors, like aggregations - collectors.add(createMultiCollectorContext(searchContext.queryCollectors().values())); + collectors.add(createMultiCollectorContext(searchContext.queryCollectorManagers().values())); } if (searchContext.minimumScore() != null) { // apply the minimum score after multi collector so we filter aggs as well diff --git a/server/src/main/java/org/opensearch/search/query/ReduceableSearchResult.java b/server/src/main/java/org/opensearch/search/query/ReduceableSearchResult.java new file mode 100644 index 0000000000000..48e8d7198ea3b --- /dev/null +++ b/server/src/main/java/org/opensearch/search/query/ReduceableSearchResult.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query; + +import java.io.IOException; + +/** + * The search result callback returned by reduce phase of the collector manager. + */ +public interface ReduceableSearchResult { + /** + * Apply the reduce operation to the query search results + * @param result query search results + * @throws IOException exception if reduce operation failed + */ + void reduce(QuerySearchResult result) throws IOException; +} diff --git a/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java b/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java index 9cf7dca3c4caf..5f19462a2c33a 100644 --- a/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java +++ b/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java @@ -44,6 +44,7 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.Collector; +import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.FieldDoc; @@ -80,6 +81,9 @@ import org.opensearch.search.sort.SortAndFormats; import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.Objects; import java.util.function.Supplier; @@ -89,7 +93,7 @@ /** * A {@link QueryCollectorContext} that creates top docs collector */ -abstract class TopDocsCollectorContext extends QueryCollectorContext { +public abstract class TopDocsCollectorContext extends QueryCollectorContext { protected final int numHits; TopDocsCollectorContext(String profilerName, int numHits) { @@ -107,7 +111,7 @@ final int numHits() { /** * Returns true if the top docs should be re-scored after initial search */ - boolean shouldRescore() { + public boolean shouldRescore() { return false; } @@ -115,6 +119,8 @@ static class EmptyTopDocsCollectorContext extends TopDocsCollectorContext { private final Sort sort; private final Collector collector; private final Supplier hitCountSupplier; + private final int trackTotalHitsUpTo; + private final int hitCount; /** * Ctr @@ -132,16 +138,18 @@ private EmptyTopDocsCollectorContext( ) throws IOException { super(REASON_SEARCH_COUNT, 0); this.sort = sortAndFormats == null ? null : sortAndFormats.sort; - if (trackTotalHitsUpTo == SearchContext.TRACK_TOTAL_HITS_DISABLED) { + this.trackTotalHitsUpTo = trackTotalHitsUpTo; + if (this.trackTotalHitsUpTo == SearchContext.TRACK_TOTAL_HITS_DISABLED) { this.collector = new EarlyTerminatingCollector(new TotalHitCountCollector(), 0, false); // for bwc hit count is set to 0, it will be converted to -1 by the coordinating node this.hitCountSupplier = () -> new TotalHits(0, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO); + this.hitCount = Integer.MIN_VALUE; } else { TotalHitCountCollector hitCountCollector = new TotalHitCountCollector(); // implicit total hit counts are valid only when there is no filter collector in the chain - int hitCount = hasFilterCollector ? -1 : shortcutTotalHitCount(reader, query); - if (hitCount == -1) { - if (trackTotalHitsUpTo == SearchContext.TRACK_TOTAL_HITS_ACCURATE) { + this.hitCount = hasFilterCollector ? -1 : shortcutTotalHitCount(reader, query); + if (this.hitCount == -1) { + if (this.trackTotalHitsUpTo == SearchContext.TRACK_TOTAL_HITS_ACCURATE) { this.collector = hitCountCollector; this.hitCountSupplier = () -> new TotalHits(hitCountCollector.getTotalHits(), TotalHits.Relation.EQUAL_TO); } else { @@ -159,6 +167,39 @@ private EmptyTopDocsCollectorContext( } } + @Override + CollectorManager createManager(CollectorManager in) throws IOException { + assert in == null; + + CollectorManager manager = null; + + if (trackTotalHitsUpTo == SearchContext.TRACK_TOTAL_HITS_DISABLED) { + manager = new EarlyTerminatingCollectorManager<>( + new TotalHitCountCollectorManager.Empty(new TotalHits(0, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO), sort), + 0, + false + ); + } else { + if (hitCount == -1) { + if (trackTotalHitsUpTo != SearchContext.TRACK_TOTAL_HITS_ACCURATE) { + manager = new EarlyTerminatingCollectorManager<>( + new TotalHitCountCollectorManager(sort), + trackTotalHitsUpTo, + false + ); + } + } else { + manager = new EarlyTerminatingCollectorManager<>( + new TotalHitCountCollectorManager.Empty(new TotalHits(hitCount, TotalHits.Relation.EQUAL_TO), sort), + 0, + false + ); + } + } + + return manager; + } + @Override Collector create(Collector in) { assert in == null; @@ -181,7 +222,11 @@ void postProcess(QuerySearchResult result) { static class CollapsingTopDocsCollectorContext extends TopDocsCollectorContext { private final DocValueFormat[] sortFmt; private final CollapsingTopDocsCollector topDocsCollector; + private final Collector collector; private final Supplier maxScoreSupplier; + private final CollapseContext collapseContext; + private final boolean trackMaxScore; + private final Sort sort; /** * Ctr @@ -199,30 +244,94 @@ private CollapsingTopDocsCollectorContext( super(REASON_SEARCH_TOP_HITS, numHits); assert numHits > 0; assert collapseContext != null; - Sort sort = sortAndFormats == null ? Sort.RELEVANCE : sortAndFormats.sort; + this.sort = sortAndFormats == null ? Sort.RELEVANCE : sortAndFormats.sort; this.sortFmt = sortAndFormats == null ? new DocValueFormat[] { DocValueFormat.RAW } : sortAndFormats.formats; + this.collapseContext = collapseContext; this.topDocsCollector = collapseContext.createTopDocs(sort, numHits); + this.trackMaxScore = trackMaxScore; - MaxScoreCollector maxScoreCollector; + MaxScoreCollector maxScoreCollector = null; if (trackMaxScore) { maxScoreCollector = new MaxScoreCollector(); maxScoreSupplier = maxScoreCollector::getMaxScore; } else { + maxScoreCollector = null; maxScoreSupplier = () -> Float.NaN; } + + this.collector = MultiCollector.wrap(topDocsCollector, maxScoreCollector); } @Override Collector create(Collector in) throws IOException { assert in == null; - return topDocsCollector; + return collector; } @Override void postProcess(QuerySearchResult result) throws IOException { - CollapseTopFieldDocs topDocs = topDocsCollector.getTopDocs(); + final CollapseTopFieldDocs topDocs = topDocsCollector.getTopDocs(); result.topDocs(new TopDocsAndMaxScore(topDocs, maxScoreSupplier.get()), sortFmt); } + + @Override + CollectorManager createManager(CollectorManager in) throws IOException { + return new CollectorManager() { + @Override + public Collector newCollector() throws IOException { + MaxScoreCollector maxScoreCollector = null; + + if (trackMaxScore) { + maxScoreCollector = new MaxScoreCollector(); + } + + return MultiCollectorWrapper.wrap(collapseContext.createTopDocs(sort, numHits), maxScoreCollector); + } + + @Override + public ReduceableSearchResult reduce(Collection collectors) throws IOException { + final Collection subs = new ArrayList<>(); + for (final Collector collector : collectors) { + if (collector instanceof MultiCollectorWrapper) { + subs.addAll(((MultiCollectorWrapper) collector).getCollectors()); + } else { + subs.add(collector); + } + } + + final Collection topFieldDocs = new ArrayList(); + float maxScore = Float.NaN; + + for (final Collector collector : subs) { + if (collector instanceof CollapsingTopDocsCollector) { + topFieldDocs.add(((CollapsingTopDocsCollector) collector).getTopDocs()); + } else if (collector instanceof MaxScoreCollector) { + float score = ((MaxScoreCollector) collector).getMaxScore(); + if (Float.isNaN(maxScore)) { + maxScore = score; + } else { + maxScore = Math.max(maxScore, score); + } + } + } + + return reduceWith(topFieldDocs, maxScore); + } + }; + } + + protected ReduceableSearchResult reduceWith(final Collection topFieldDocs, float maxScore) { + return (QuerySearchResult result) -> { + final CollapseTopFieldDocs topDocs = CollapseTopFieldDocs.merge( + sort, + 0, + numHits, + topFieldDocs.toArray(new CollapseTopFieldDocs[0]), + true + ); + result.topDocs(new TopDocsAndMaxScore(topDocs, maxScore), sortFmt); + }; + } } abstract static class SimpleTopDocsCollectorContext extends TopDocsCollectorContext { @@ -240,11 +349,38 @@ private static TopDocsCollector createCollector( } } + private static CollectorManager, ? extends TopDocs> createCollectorManager( + @Nullable SortAndFormats sortAndFormats, + int numHits, + @Nullable ScoreDoc searchAfter, + int hitCountThreshold + ) { + if (sortAndFormats == null) { + // See please https://github.com/apache/lucene/pull/450, should be fixed in 9.x + if (searchAfter != null) { + return TopScoreDocCollector.createSharedManager( + numHits, + new FieldDoc(searchAfter.doc, searchAfter.score), + hitCountThreshold + ); + } else { + return TopScoreDocCollector.createSharedManager(numHits, null, hitCountThreshold); + } + } else { + return TopFieldCollector.createSharedManager(sortAndFormats.sort, numHits, (FieldDoc) searchAfter, hitCountThreshold); + } + } + protected final @Nullable SortAndFormats sortAndFormats; private final Collector collector; private final Supplier totalHitsSupplier; private final Supplier topDocsSupplier; private final Supplier maxScoreSupplier; + private final ScoreDoc searchAfter; + private final int trackTotalHitsUpTo; + private final boolean trackMaxScore; + private final boolean hasInfMaxScore; + private final int hitCount; /** * Ctr @@ -269,24 +405,30 @@ private SimpleTopDocsCollectorContext( ) throws IOException { super(REASON_SEARCH_TOP_HITS, numHits); this.sortAndFormats = sortAndFormats; + this.searchAfter = searchAfter; + this.trackTotalHitsUpTo = trackTotalHitsUpTo; + this.trackMaxScore = trackMaxScore; + this.hasInfMaxScore = hasInfMaxScore(query); final TopDocsCollector topDocsCollector; - if ((sortAndFormats == null || SortField.FIELD_SCORE.equals(sortAndFormats.sort.getSort()[0])) && hasInfMaxScore(query)) { + if ((sortAndFormats == null || SortField.FIELD_SCORE.equals(sortAndFormats.sort.getSort()[0])) && hasInfMaxScore) { // disable max score optimization since we have a mandatory clause // that doesn't track the maximum score topDocsCollector = createCollector(sortAndFormats, numHits, searchAfter, Integer.MAX_VALUE); topDocsSupplier = new CachedSupplier<>(topDocsCollector::topDocs); totalHitsSupplier = () -> topDocsSupplier.get().totalHits; + hitCount = Integer.MIN_VALUE; } else if (trackTotalHitsUpTo == SearchContext.TRACK_TOTAL_HITS_DISABLED) { // don't compute hit counts via the collector topDocsCollector = createCollector(sortAndFormats, numHits, searchAfter, 1); topDocsSupplier = new CachedSupplier<>(topDocsCollector::topDocs); totalHitsSupplier = () -> new TotalHits(0, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO); + hitCount = -1; } else { // implicit total hit counts are valid only when there is no filter collector in the chain - final int hitCount = hasFilterCollector ? -1 : shortcutTotalHitCount(reader, query); - if (hitCount == -1) { + this.hitCount = hasFilterCollector ? -1 : shortcutTotalHitCount(reader, query); + if (this.hitCount == -1) { topDocsCollector = createCollector(sortAndFormats, numHits, searchAfter, trackTotalHitsUpTo); topDocsSupplier = new CachedSupplier<>(topDocsCollector::topDocs); totalHitsSupplier = () -> topDocsSupplier.get().totalHits; @@ -294,7 +436,7 @@ private SimpleTopDocsCollectorContext( // don't compute hit counts via the collector topDocsCollector = createCollector(sortAndFormats, numHits, searchAfter, 1); topDocsSupplier = new CachedSupplier<>(topDocsCollector::topDocs); - totalHitsSupplier = () -> new TotalHits(hitCount, TotalHits.Relation.EQUAL_TO); + totalHitsSupplier = () -> new TotalHits(this.hitCount, TotalHits.Relation.EQUAL_TO); } } MaxScoreCollector maxScoreCollector = null; @@ -315,7 +457,98 @@ private SimpleTopDocsCollectorContext( } this.collector = MultiCollector.wrap(topDocsCollector, maxScoreCollector); + } + + private class SimpleTopDocsCollectorManager + implements + CollectorManager, + EarlyTerminatingListener { + private Integer terminatedAfter; + private final CollectorManager, ? extends TopDocs> manager; + + private SimpleTopDocsCollectorManager() { + if ((sortAndFormats == null || SortField.FIELD_SCORE.equals(sortAndFormats.sort.getSort()[0])) && hasInfMaxScore) { + // disable max score optimization since we have a mandatory clause + // that doesn't track the maximum score + manager = createCollectorManager(sortAndFormats, numHits, searchAfter, Integer.MAX_VALUE); + } else if (trackTotalHitsUpTo == SearchContext.TRACK_TOTAL_HITS_DISABLED) { + // don't compute hit counts via the collector + manager = createCollectorManager(sortAndFormats, numHits, searchAfter, 1); + } else { + // implicit total hit counts are valid only when there is no filter collector in the chain + if (hitCount == -1) { + manager = createCollectorManager(sortAndFormats, numHits, searchAfter, trackTotalHitsUpTo); + } else { + // don't compute hit counts via the collector + manager = createCollectorManager(sortAndFormats, numHits, searchAfter, 1); + } + } + } + + @Override + public void onEarlyTermination(int maxCountHits, boolean forcedTermination) { + terminatedAfter = maxCountHits; + } + + @Override + public Collector newCollector() throws IOException { + MaxScoreCollector maxScoreCollector = null; + + if (sortAndFormats != null && trackMaxScore) { + maxScoreCollector = new MaxScoreCollector(); + } + + return MultiCollectorWrapper.wrap(manager.newCollector(), maxScoreCollector); + } + + @SuppressWarnings("unchecked") + @Override + public ReduceableSearchResult reduce(Collection collectors) throws IOException { + final Collection> topDocsCollectors = new ArrayList<>(); + final Collection maxScoreCollectors = new ArrayList<>(); + + for (final Collector collector : collectors) { + if (collector instanceof MultiCollectorWrapper) { + for (final Collector sub : (((MultiCollectorWrapper) collector).getCollectors())) { + if (sub instanceof TopDocsCollector) { + topDocsCollectors.add((TopDocsCollector) sub); + } else if (sub instanceof MaxScoreCollector) { + maxScoreCollectors.add((MaxScoreCollector) sub); + } + } + } else if (collector instanceof TopDocsCollector) { + topDocsCollectors.add((TopDocsCollector) collector); + } else if (collector instanceof MaxScoreCollector) { + maxScoreCollectors.add((MaxScoreCollector) collector); + } + } + + float maxScore = Float.NaN; + for (final MaxScoreCollector collector : maxScoreCollectors) { + float score = collector.getMaxScore(); + if (Float.isNaN(maxScore)) { + maxScore = score; + } else { + maxScore = Math.max(maxScore, score); + } + } + final TopDocs topDocs = ((CollectorManager, ? extends TopDocs>) manager).reduce(topDocsCollectors); + return reduceWith(topDocs, maxScore, terminatedAfter); + } + } + + @Override + CollectorManager createManager(CollectorManager in) throws IOException { + assert in == null; + return new SimpleTopDocsCollectorManager(); + } + + protected ReduceableSearchResult reduceWith(final TopDocs topDocs, final float maxScore, final Integer terminatedAfter) { + return (QuerySearchResult result) -> { + final TopDocsAndMaxScore topDocsAndMaxScore = newTopDocs(topDocs, maxScore, terminatedAfter); + result.topDocs(topDocsAndMaxScore, sortAndFormats == null ? null : sortAndFormats.formats); + }; } @Override @@ -324,6 +557,50 @@ Collector create(Collector in) { return collector; } + TopDocsAndMaxScore newTopDocs(final TopDocs topDocs, final float maxScore, final Integer terminatedAfter) { + TotalHits totalHits = null; + + if ((sortAndFormats == null || SortField.FIELD_SCORE.equals(sortAndFormats.sort.getSort()[0])) && hasInfMaxScore) { + totalHits = topDocs.totalHits; + } else if (trackTotalHitsUpTo == SearchContext.TRACK_TOTAL_HITS_DISABLED) { + // don't compute hit counts via the collector + totalHits = new TotalHits(0, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO); + } else { + if (hitCount == -1) { + totalHits = topDocs.totalHits; + } else { + totalHits = new TotalHits(hitCount, TotalHits.Relation.EQUAL_TO); + } + } + + // Since we cannot support early forced termination, we have to simulate it by + // artificially reducing the number of total hits and doc scores. + ScoreDoc[] scoreDocs = topDocs.scoreDocs; + if (terminatedAfter != null) { + if (totalHits.value > terminatedAfter) { + totalHits = new TotalHits(terminatedAfter, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO); + } + + if (scoreDocs != null && scoreDocs.length > terminatedAfter) { + scoreDocs = Arrays.copyOf(scoreDocs, terminatedAfter); + } + } + + final TopDocs newTopDocs; + if (topDocs instanceof TopFieldDocs) { + TopFieldDocs fieldDocs = (TopFieldDocs) topDocs; + newTopDocs = new TopFieldDocs(totalHits, scoreDocs, fieldDocs.fields); + } else { + newTopDocs = new TopDocs(totalHits, scoreDocs); + } + + if (Float.isNaN(maxScore) && newTopDocs.scoreDocs.length > 0 && sortAndFormats == null) { + return new TopDocsAndMaxScore(newTopDocs, newTopDocs.scoreDocs[0].score); + } else { + return new TopDocsAndMaxScore(newTopDocs, maxScore); + } + } + TopDocsAndMaxScore newTopDocs() { TopDocs in = topDocsSupplier.get(); float maxScore = maxScoreSupplier.get(); @@ -373,6 +650,35 @@ private ScrollingTopDocsCollectorContext( this.numberOfShards = numberOfShards; } + @Override + protected ReduceableSearchResult reduceWith(final TopDocs topDocs, final float maxScore, final Integer terminatedAfter) { + return (QuerySearchResult result) -> { + final TopDocsAndMaxScore topDocsAndMaxScore = newTopDocs(topDocs, maxScore, terminatedAfter); + + if (scrollContext.totalHits == null) { + // first round + scrollContext.totalHits = topDocsAndMaxScore.topDocs.totalHits; + scrollContext.maxScore = topDocsAndMaxScore.maxScore; + } else { + // subsequent round: the total number of hits and + // the maximum score were computed on the first round + topDocsAndMaxScore.topDocs.totalHits = scrollContext.totalHits; + topDocsAndMaxScore.maxScore = scrollContext.maxScore; + } + + if (numberOfShards == 1) { + // if we fetch the document in the same roundtrip, we already know the last emitted doc + if (topDocsAndMaxScore.topDocs.scoreDocs.length > 0) { + // set the last emitted doc + scrollContext.lastEmittedDoc = topDocsAndMaxScore.topDocs.scoreDocs[topDocsAndMaxScore.topDocs.scoreDocs.length + - 1]; + } + } + + result.topDocs(topDocsAndMaxScore, sortAndFormats == null ? null : sortAndFormats.formats); + }; + } + @Override void postProcess(QuerySearchResult result) throws IOException { final TopDocsAndMaxScore topDocs = newTopDocs(); @@ -457,7 +763,7 @@ static int shortcutTotalHitCount(IndexReader reader, Query query) throws IOExcep * Creates a {@link TopDocsCollectorContext} from the provided searchContext. * @param hasFilterCollector True if the collector chain contains at least one collector that can filters document. */ - static TopDocsCollectorContext createTopDocsCollectorContext(SearchContext searchContext, boolean hasFilterCollector) + public static TopDocsCollectorContext createTopDocsCollectorContext(SearchContext searchContext, boolean hasFilterCollector) throws IOException { final IndexReader reader = searchContext.searcher().getIndexReader(); final Query query = searchContext.query(); @@ -515,7 +821,7 @@ static TopDocsCollectorContext createTopDocsCollectorContext(SearchContext searc hasFilterCollector ) { @Override - boolean shouldRescore() { + public boolean shouldRescore() { return rescore; } }; diff --git a/server/src/main/java/org/opensearch/search/query/TotalHitCountCollectorManager.java b/server/src/main/java/org/opensearch/search/query/TotalHitCountCollectorManager.java new file mode 100644 index 0000000000000..6d4159c977743 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/query/TotalHitCountCollectorManager.java @@ -0,0 +1,106 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query; + +import org.apache.lucene.search.CollectorManager; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TopFieldDocs; +import org.apache.lucene.search.TotalHitCountCollector; +import org.apache.lucene.search.TotalHits; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.lucene.search.TopDocsAndMaxScore; + +import java.io.IOException; +import java.util.Collection; + +public class TotalHitCountCollectorManager + implements + CollectorManager, + EarlyTerminatingListener { + + private static final TotalHitCountCollector EMPTY_COLLECTOR = new TotalHitCountCollector() { + @Override + public void collect(int doc) {} + + @Override + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE_NO_SCORES; + } + }; + + private final Sort sort; + private Integer terminatedAfter; + + public TotalHitCountCollectorManager(final Sort sort) { + this.sort = sort; + } + + @Override + public void onEarlyTermination(int maxCountHits, boolean forcedTermination) { + terminatedAfter = maxCountHits; + } + + @Override + public TotalHitCountCollector newCollector() throws IOException { + return new TotalHitCountCollector(); + } + + @Override + public ReduceableSearchResult reduce(Collection collectors) throws IOException { + return (QuerySearchResult result) -> { + final TotalHits.Relation relation = (terminatedAfter != null) + ? TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO + : TotalHits.Relation.EQUAL_TO; + + int totalHits = collectors.stream().mapToInt(TotalHitCountCollector::getTotalHits).sum(); + if (terminatedAfter != null && totalHits > terminatedAfter) { + totalHits = terminatedAfter; + } + + final TotalHits totalHitCount = new TotalHits(totalHits, relation); + final TopDocs topDocs = (sort != null) + ? new TopFieldDocs(totalHitCount, Lucene.EMPTY_SCORE_DOCS, sort.getSort()) + : new TopDocs(totalHitCount, Lucene.EMPTY_SCORE_DOCS); + + result.topDocs(new TopDocsAndMaxScore(topDocs, Float.NaN), null); + }; + } + + static class Empty implements CollectorManager { + private final TotalHits totalHits; + private final Sort sort; + + Empty(final TotalHits totalHits, final Sort sort) { + this.totalHits = totalHits; + this.sort = sort; + } + + @Override + public TotalHitCountCollector newCollector() throws IOException { + return EMPTY_COLLECTOR; + } + + @Override + public ReduceableSearchResult reduce(Collection collectors) throws IOException { + return (QuerySearchResult result) -> { + final TopDocs topDocs; + + if (sort != null) { + topDocs = new TopFieldDocs(totalHits, Lucene.EMPTY_SCORE_DOCS, sort.getSort()); + } else { + topDocs = new TopDocs(totalHits, Lucene.EMPTY_SCORE_DOCS); + } + + result.topDocs(new TopDocsAndMaxScore(topDocs, Float.NaN), null); + }; + } + } +} diff --git a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java index e1cf74bdd6aeb..f6ca12f1c514c 100644 --- a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java +++ b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java @@ -32,6 +32,8 @@ package org.opensearch.search; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.index.IndexReader; import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; @@ -76,7 +78,12 @@ import org.opensearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; import java.util.UUID; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.function.Supplier; @@ -91,6 +98,25 @@ import static org.mockito.Mockito.when; public class DefaultSearchContextTests extends OpenSearchTestCase { + private final ExecutorService executor; + + @ParametersFactory + public static Collection concurrency() { + return Arrays.asList(new Integer[] { 0 }, new Integer[] { 5 }); + } + + public DefaultSearchContextTests(int concurrency) { + this.executor = (concurrency > 0) ? Executors.newFixedThreadPool(concurrency) : null; + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + + if (executor != null) { + ThreadPool.terminate(executor, 10, TimeUnit.SECONDS); + } + } public void testPreProcess() throws Exception { TimeValue timeout = new TimeValue(randomIntBetween(1, 100)); @@ -183,7 +209,7 @@ protected Engine.Searcher acquireSearcherInternal(String source) { false, Version.CURRENT, false, - null + executor ); contextWithoutScroll.from(300); contextWithoutScroll.close(); @@ -225,7 +251,7 @@ protected Engine.Searcher acquireSearcherInternal(String source) { false, Version.CURRENT, false, - null + executor ); context1.from(300); exception = expectThrows(IllegalArgumentException.class, () -> context1.preProcess(false)); @@ -295,7 +321,7 @@ protected Engine.Searcher acquireSearcherInternal(String source) { false, Version.CURRENT, false, - null + executor ); SliceBuilder sliceBuilder = mock(SliceBuilder.class); @@ -334,7 +360,7 @@ protected Engine.Searcher acquireSearcherInternal(String source) { false, Version.CURRENT, false, - null + executor ); ParsedQuery parsedQuery = ParsedQuery.parsedMatchAllQuery(); context3.sliceBuilder(null).parsedQuery(parsedQuery).preProcess(false); @@ -365,7 +391,7 @@ protected Engine.Searcher acquireSearcherInternal(String source) { false, Version.CURRENT, false, - null + executor ); context4.sliceBuilder(new SliceBuilder(1, 2)).parsedQuery(parsedQuery).preProcess(false); Query query1 = context4.query(); @@ -446,7 +472,7 @@ protected Engine.Searcher acquireSearcherInternal(String source) { false, Version.CURRENT, false, - null + executor ); assertThat(context.searcher().hasCancellations(), is(false)); context.searcher().addQueryCancellation(() -> {}); diff --git a/server/src/test/java/org/opensearch/search/SearchCancellationTests.java b/server/src/test/java/org/opensearch/search/SearchCancellationTests.java index 1927558f94094..f479f3a1b99f1 100644 --- a/server/src/test/java/org/opensearch/search/SearchCancellationTests.java +++ b/server/src/test/java/org/opensearch/search/SearchCancellationTests.java @@ -108,7 +108,8 @@ public void testAddingCancellationActions() throws IOException { IndexSearcher.getDefaultSimilarity(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), - true + true, + null ); NullPointerException npe = expectThrows(NullPointerException.class, () -> searcher.addQueryCancellation(null)); assertEquals("cancellation runnable should not be null", npe.getMessage()); @@ -127,7 +128,8 @@ public void testCancellableCollector() throws IOException { IndexSearcher.getDefaultSimilarity(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), - true + true, + null ); searcher.search(new MatchAllDocsQuery(), collector1); @@ -154,7 +156,8 @@ public void testExitableDirectoryReader() throws IOException { IndexSearcher.getDefaultSimilarity(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), - true + true, + null ); searcher.addQueryCancellation(cancellation); CompiledAutomaton automaton = new CompiledAutomaton(new RegExp("a.*").toAutomaton()); diff --git a/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java b/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java index de0a31b9dc04b..eb7dde4b0b2ce 100644 --- a/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java +++ b/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java @@ -258,7 +258,8 @@ public void onRemoval(ShardId shardId, Accountable accountable) { IndexSearcher.getDefaultSimilarity(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), - true + true, + null ); for (LeafReaderContext context : searcher.getIndexReader().leaves()) { diff --git a/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java b/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java index afaab15e1431e..7f4dcdaed2aa1 100644 --- a/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java +++ b/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java @@ -32,8 +32,6 @@ package org.opensearch.search.profile.query; -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - import org.apache.lucene.document.Document; import org.apache.lucene.document.Field.Store; import org.apache.lucene.document.StringField; @@ -64,18 +62,12 @@ import org.opensearch.search.internal.ContextIndexSearcher; import org.opensearch.search.profile.ProfileResult; import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.threadpool.ThreadPool; import org.junit.After; import org.junit.Before; import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; import java.util.List; import java.util.Map; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -85,16 +77,6 @@ public class QueryProfilerTests extends OpenSearchTestCase { private Directory dir; private IndexReader reader; private ContextIndexSearcher searcher; - private ExecutorService executor; - - @ParametersFactory - public static Collection concurrency() { - return Arrays.asList(new Integer[] { 0 }, new Integer[] { 5 }); - } - - public QueryProfilerTests(int concurrency) { - this.executor = (concurrency > 0) ? Executors.newFixedThreadPool(concurrency) : null; - } @Before public void setUp() throws Exception { @@ -120,7 +102,7 @@ public void setUp() throws Exception { IndexSearcher.getDefaultQueryCache(), ALWAYS_CACHE_POLICY, true, - executor + null ); } @@ -134,10 +116,6 @@ public void tearDown() throws Exception { assertThat(cache.getTotalCount(), equalTo(cache.getMissCount())); assertThat(cache.getCacheSize(), equalTo(0L)); - if (executor != null) { - ThreadPool.terminate(executor, 10, TimeUnit.SECONDS); - } - IOUtils.close(reader, dir); dir = null; reader = null; @@ -145,7 +123,7 @@ public void tearDown() throws Exception { } public void testBasic() throws IOException { - QueryProfiler profiler = new QueryProfiler(searcher.allowConcurrentSegmentSearch()); + QueryProfiler profiler = new QueryProfiler(false); searcher.setProfiler(profiler); Query query = new TermQuery(new Term("foo", "bar")); searcher.search(query, 1); @@ -171,7 +149,7 @@ public void testBasic() throws IOException { } public void testNoScoring() throws IOException { - QueryProfiler profiler = new QueryProfiler(searcher.allowConcurrentSegmentSearch()); + QueryProfiler profiler = new QueryProfiler(false); searcher.setProfiler(profiler); Query query = new TermQuery(new Term("foo", "bar")); searcher.search(query, 1, Sort.INDEXORDER); // scores are not needed @@ -197,7 +175,7 @@ public void testNoScoring() throws IOException { } public void testUseIndexStats() throws IOException { - QueryProfiler profiler = new QueryProfiler(searcher.allowConcurrentSegmentSearch()); + QueryProfiler profiler = new QueryProfiler(false); searcher.setProfiler(profiler); Query query = new TermQuery(new Term("foo", "bar")); searcher.count(query); // will use index stats @@ -211,7 +189,7 @@ public void testUseIndexStats() throws IOException { } public void testApproximations() throws IOException { - QueryProfiler profiler = new QueryProfiler(searcher.allowConcurrentSegmentSearch()); + QueryProfiler profiler = new QueryProfiler(false); searcher.setProfiler(profiler); Query query = new RandomApproximationQuery(new TermQuery(new Term("foo", "bar")), random()); searcher.count(query); diff --git a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java index b87c11dce5be2..1232347edea64 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java @@ -39,6 +39,7 @@ import org.apache.lucene.document.LatLonPoint; import org.apache.lucene.document.LongPoint; import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.SortedDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; @@ -77,6 +78,7 @@ import org.apache.lucene.search.TotalHitCountCollector; import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.Weight; +import org.apache.lucene.search.grouping.CollapseTopFieldDocs; import org.apache.lucene.search.join.BitSetProducer; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.store.Directory; @@ -88,12 +90,15 @@ import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.index.mapper.NumberFieldMapper.NumberFieldType; +import org.opensearch.index.mapper.NumberFieldMapper.NumberType; import org.opensearch.index.query.ParsedQuery; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.search.OpenSearchToParentBlockJoinQuery; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.search.DocValueFormat; +import org.opensearch.search.collapse.CollapseBuilder; import org.opensearch.search.internal.ContextIndexSearcher; import org.opensearch.search.internal.ScrollContext; import org.opensearch.search.internal.SearchContext; @@ -144,7 +149,7 @@ private void countTestCase(Query query, IndexReader reader, boolean shouldCollec context.parsedQuery(new ParsedQuery(query)); context.setSize(0); context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); - final boolean rescore = QueryPhase.executeInternal(context); + final boolean rescore = QueryPhase.executeInternal(context.withCleanQueryResult()); assertFalse(rescore); ContextIndexSearcher countSearcher = shouldCollectCount @@ -157,7 +162,7 @@ private void countTestCase(boolean withDeletions) throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE); RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); - final int numDocs = scaledRandomIntBetween(100, 200); + final int numDocs = scaledRandomIntBetween(600, 900); for (int i = 0; i < numDocs; ++i) { Document doc = new Document(); if (randomBoolean()) { @@ -228,12 +233,12 @@ public void testPostFilterDisablesCountOptimization() throws Exception { context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); - QueryPhase.executeInternal(context); + QueryPhase.executeInternal(context.withCleanQueryResult()); assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value); context.setSearcher(newContextSearcher(reader)); context.parsedPostFilter(new ParsedQuery(new MatchNoDocsQuery())); - QueryPhase.executeInternal(context); + QueryPhase.executeInternal(context.withCleanQueryResult()); assertEquals(0, context.queryResult().topDocs().topDocs.totalHits.value); reader.close(); dir.close(); @@ -261,7 +266,7 @@ public void testTerminateAfterWithFilter() throws Exception { context.setSize(10); for (int i = 0; i < 10; i++) { context.parsedPostFilter(new ParsedQuery(new TermQuery(new Term("foo", Integer.toString(i))))); - QueryPhase.executeInternal(context); + QueryPhase.executeInternal(context.withCleanQueryResult()); assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); } @@ -283,12 +288,13 @@ public void testMinScoreDisablesCountOptimization() throws Exception { context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); context.setSize(0); context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); - QueryPhase.executeInternal(context); + QueryPhase.executeInternal(context.withCleanQueryResult()); assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value); context.minimumScore(100); - QueryPhase.executeInternal(context); + QueryPhase.executeInternal(context.withCleanQueryResult()); assertEquals(0, context.queryResult().topDocs().topDocs.totalHits.value); + assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); reader.close(); dir.close(); } @@ -297,7 +303,7 @@ public void testQueryCapturesThreadPoolStats() throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(); RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); - final int numDocs = scaledRandomIntBetween(100, 200); + final int numDocs = scaledRandomIntBetween(600, 900); for (int i = 0; i < numDocs; ++i) { w.addDocument(new Document()); } @@ -307,7 +313,7 @@ public void testQueryCapturesThreadPoolStats() throws Exception { context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); - QueryPhase.executeInternal(context); + QueryPhase.executeInternal(context.withCleanQueryResult()); QuerySearchResult results = context.queryResult(); assertThat(results.serviceTimeEWMA(), greaterThanOrEqualTo(0L)); assertThat(results.nodeQueueSize(), greaterThanOrEqualTo(0)); @@ -320,7 +326,7 @@ public void testInOrderScrollOptimization() throws Exception { final Sort sort = new Sort(new SortField("rank", SortField.Type.INT)); IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); - final int numDocs = scaledRandomIntBetween(100, 200); + final int numDocs = scaledRandomIntBetween(600, 900); for (int i = 0; i < numDocs; ++i) { w.addDocument(new Document()); } @@ -336,14 +342,14 @@ public void testInOrderScrollOptimization() throws Exception { int size = randomIntBetween(2, 5); context.setSize(size); - QueryPhase.executeInternal(context); + QueryPhase.executeInternal(context.withCleanQueryResult()); assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); assertNull(context.queryResult().terminatedEarly()); assertThat(context.terminateAfter(), equalTo(0)); assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); context.setSearcher(newEarlyTerminationContextSearcher(reader, size)); - QueryPhase.executeInternal(context); + QueryPhase.executeInternal(context.withCleanQueryResult()); assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); assertThat(context.terminateAfter(), equalTo(size)); assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); @@ -356,7 +362,7 @@ public void testTerminateAfterEarlyTermination() throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(); RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); - final int numDocs = scaledRandomIntBetween(100, 200); + final int numDocs = scaledRandomIntBetween(600, 900); for (int i = 0; i < numDocs; ++i) { Document doc = new Document(); if (randomBoolean()) { @@ -377,25 +383,25 @@ public void testTerminateAfterEarlyTermination() throws Exception { context.terminateAfter(numDocs); { context.setSize(10); - TotalHitCountCollector collector = new TotalHitCountCollector(); - context.queryCollectors().put(TotalHitCountCollector.class, collector); - QueryPhase.executeInternal(context); + final TestTotalHitCountCollectorManager manager = TestTotalHitCountCollectorManager.create(); + context.queryCollectorManagers().put(TotalHitCountCollector.class, manager); + QueryPhase.executeInternal(context.withCleanQueryResult()); assertFalse(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(10)); - assertThat(collector.getTotalHits(), equalTo(numDocs)); + assertThat(manager.getTotalHits(), equalTo(numDocs)); } context.terminateAfter(1); { context.setSize(1); - QueryPhase.executeInternal(context); + QueryPhase.executeInternal(context.withCleanQueryResult()); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); context.setSize(0); - QueryPhase.executeInternal(context); + QueryPhase.executeInternal(context.withCleanQueryResult()); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); @@ -403,7 +409,7 @@ public void testTerminateAfterEarlyTermination() throws Exception { { context.setSize(1); - QueryPhase.executeInternal(context); + QueryPhase.executeInternal(context.withCleanQueryResult()); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); @@ -414,38 +420,38 @@ public void testTerminateAfterEarlyTermination() throws Exception { .add(new TermQuery(new Term("foo", "baz")), Occur.SHOULD) .build(); context.parsedQuery(new ParsedQuery(bq)); - QueryPhase.executeInternal(context); + QueryPhase.executeInternal(context.withCleanQueryResult()); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); context.setSize(0); context.parsedQuery(new ParsedQuery(bq)); - QueryPhase.executeInternal(context); + QueryPhase.executeInternal(context.withCleanQueryResult()); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); } { context.setSize(1); - TotalHitCountCollector collector = new TotalHitCountCollector(); - context.queryCollectors().put(TotalHitCountCollector.class, collector); - QueryPhase.executeInternal(context); + final TestTotalHitCountCollectorManager manager = TestTotalHitCountCollectorManager.create(); + context.queryCollectorManagers().put(TotalHitCountCollector.class, manager); + QueryPhase.executeInternal(context.withCleanQueryResult()); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); - assertThat(collector.getTotalHits(), equalTo(1)); - context.queryCollectors().clear(); + assertThat(manager.getTotalHits(), equalTo(1)); + context.queryCollectorManagers().clear(); } { context.setSize(0); - TotalHitCountCollector collector = new TotalHitCountCollector(); - context.queryCollectors().put(TotalHitCountCollector.class, collector); - QueryPhase.executeInternal(context); + final TestTotalHitCountCollectorManager manager = TestTotalHitCountCollectorManager.create(); + context.queryCollectorManagers().put(TotalHitCountCollector.class, manager); + QueryPhase.executeInternal(context.withCleanQueryResult()); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); - assertThat(collector.getTotalHits(), equalTo(1)); + assertThat(manager.getTotalHits(), equalTo(1)); } // tests with trackTotalHits and terminateAfter @@ -453,9 +459,9 @@ public void testTerminateAfterEarlyTermination() throws Exception { context.setSize(0); for (int trackTotalHits : new int[] { -1, 3, 76, 100 }) { context.trackTotalHitsUpTo(trackTotalHits); - TotalHitCountCollector collector = new TotalHitCountCollector(); - context.queryCollectors().put(TotalHitCountCollector.class, collector); - QueryPhase.executeInternal(context); + final TestTotalHitCountCollectorManager manager = TestTotalHitCountCollectorManager.create(); + context.queryCollectorManagers().put(TotalHitCountCollector.class, manager); + QueryPhase.executeInternal(context.withCleanQueryResult()); assertTrue(context.queryResult().terminatedEarly()); if (trackTotalHits == -1) { assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(0L)); @@ -463,16 +469,14 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) Math.min(trackTotalHits, 10))); } assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); - assertThat(collector.getTotalHits(), equalTo(10)); + assertThat(manager.getTotalHits(), equalTo(10)); } context.terminateAfter(7); context.setSize(10); for (int trackTotalHits : new int[] { -1, 3, 75, 100 }) { context.trackTotalHitsUpTo(trackTotalHits); - EarlyTerminatingCollector collector = new EarlyTerminatingCollector(new TotalHitCountCollector(), 1, false); - context.queryCollectors().put(EarlyTerminatingCollector.class, collector); - QueryPhase.executeInternal(context); + QueryPhase.executeInternal(context.withCleanQueryResult()); assertTrue(context.queryResult().terminatedEarly()); if (trackTotalHits == -1) { assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(0L)); @@ -490,7 +494,7 @@ public void testIndexSortingEarlyTermination() throws Exception { final Sort sort = new Sort(new SortField("rank", SortField.Type.INT)); IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); - final int numDocs = scaledRandomIntBetween(100, 200); + final int numDocs = scaledRandomIntBetween(600, 900); for (int i = 0; i < numDocs; ++i) { Document doc = new Document(); if (randomBoolean()) { @@ -511,7 +515,7 @@ public void testIndexSortingEarlyTermination() throws Exception { context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW })); - QueryPhase.executeInternal(context); + QueryPhase.executeInternal(context.withCleanQueryResult()); assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); @@ -520,7 +524,7 @@ public void testIndexSortingEarlyTermination() throws Exception { { context.parsedPostFilter(new ParsedQuery(new MinDocQuery(1))); - QueryPhase.executeInternal(context); + QueryPhase.executeInternal(context.withCleanQueryResult()); assertNull(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(numDocs - 1L)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); @@ -528,28 +532,28 @@ public void testIndexSortingEarlyTermination() throws Exception { assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); context.parsedPostFilter(null); - final TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector(); - context.queryCollectors().put(TotalHitCountCollector.class, totalHitCountCollector); - QueryPhase.executeInternal(context); + final TestTotalHitCountCollectorManager manager = TestTotalHitCountCollectorManager.create(sort); + context.queryCollectorManagers().put(TotalHitCountCollector.class, manager); + QueryPhase.executeInternal(context.withCleanQueryResult()); assertNull(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); - assertThat(totalHitCountCollector.getTotalHits(), equalTo(numDocs)); - context.queryCollectors().clear(); + assertThat(manager.getTotalHits(), equalTo(numDocs)); + context.queryCollectorManagers().clear(); } { context.setSearcher(newEarlyTerminationContextSearcher(reader, 1)); context.trackTotalHitsUpTo(SearchContext.TRACK_TOTAL_HITS_DISABLED); - QueryPhase.executeInternal(context); + QueryPhase.executeInternal(context.withCleanQueryResult()); assertNull(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); - QueryPhase.executeInternal(context); + QueryPhase.executeInternal(context.withCleanQueryResult()); assertNull(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); @@ -564,7 +568,7 @@ public void testIndexSortScrollOptimization() throws Exception { final Sort indexSort = new Sort(new SortField("rank", SortField.Type.INT), new SortField("tiebreaker", SortField.Type.INT)); IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(indexSort); RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); - final int numDocs = scaledRandomIntBetween(100, 200); + final int numDocs = scaledRandomIntBetween(600, 900); for (int i = 0; i < numDocs; ++i) { Document doc = new Document(); doc.add(new NumericDocValuesField("rank", random().nextInt())); @@ -592,7 +596,7 @@ public void testIndexSortScrollOptimization() throws Exception { context.setSize(10); context.sort(searchSortAndFormat); - QueryPhase.executeInternal(context); + QueryPhase.executeInternal(context.withCleanQueryResult()); assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); assertNull(context.queryResult().terminatedEarly()); assertThat(context.terminateAfter(), equalTo(0)); @@ -601,7 +605,7 @@ public void testIndexSortScrollOptimization() throws Exception { FieldDoc lastDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[sizeMinus1]; context.setSearcher(newEarlyTerminationContextSearcher(reader, 10)); - QueryPhase.executeInternal(context); + QueryPhase.executeInternal(context.withCleanQueryResult()); assertNull(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); assertThat(context.terminateAfter(), equalTo(0)); @@ -630,7 +634,8 @@ public void testDisableTopScoreCollection() throws Exception { IndexWriterConfig iwc = newIndexWriterConfig(new StandardAnalyzer()); RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); Document doc = new Document(); - for (int i = 0; i < 10; i++) { + final int numDocs = 2 * scaledRandomIntBetween(50, 450); + for (int i = 0; i < numDocs; i++) { doc.clear(); if (i % 2 == 0) { doc.add(new TextField("title", "foo bar", Store.NO)); @@ -653,16 +658,16 @@ public void testDisableTopScoreCollection() throws Exception { context.trackTotalHitsUpTo(3); TopDocsCollectorContext topDocsContext = TopDocsCollectorContext.createTopDocsCollectorContext(context, false); assertEquals(topDocsContext.create(null).scoreMode(), org.apache.lucene.search.ScoreMode.COMPLETE); - QueryPhase.executeInternal(context); - assertEquals(5, context.queryResult().topDocs().topDocs.totalHits.value); + QueryPhase.executeInternal(context.withCleanQueryResult()); + assertEquals(numDocs / 2, context.queryResult().topDocs().topDocs.totalHits.value); assertEquals(context.queryResult().topDocs().topDocs.totalHits.relation, TotalHits.Relation.EQUAL_TO); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(3)); context.sort(new SortAndFormats(new Sort(new SortField("other", SortField.Type.INT)), new DocValueFormat[] { DocValueFormat.RAW })); topDocsContext = TopDocsCollectorContext.createTopDocsCollectorContext(context, false); assertEquals(topDocsContext.create(null).scoreMode(), org.apache.lucene.search.ScoreMode.TOP_DOCS); - QueryPhase.executeInternal(context); - assertEquals(5, context.queryResult().topDocs().topDocs.totalHits.value); + QueryPhase.executeInternal(context.withCleanQueryResult()); + assertEquals(numDocs / 2, context.queryResult().topDocs().topDocs.totalHits.value); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(3)); assertEquals(context.queryResult().topDocs().topDocs.totalHits.relation, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO); @@ -724,7 +729,7 @@ public void testEnhanceSortOnNumeric() throws Exception { searchContext.parsedQuery(query); searchContext.setTask(task); searchContext.setSize(10); - QueryPhase.executeInternal(searchContext); + QueryPhase.executeInternal(searchContext.withCleanQueryResult()); assertSortResults(searchContext.queryResult().topDocs().topDocs, (long) numDocs, false); } @@ -736,7 +741,7 @@ public void testEnhanceSortOnNumeric() throws Exception { searchContext.parsedQuery(query); searchContext.setTask(task); searchContext.setSize(10); - QueryPhase.executeInternal(searchContext); + QueryPhase.executeInternal(searchContext.withCleanQueryResult()); assertSortResults(searchContext.queryResult().topDocs().topDocs, (long) numDocs, true); } @@ -748,7 +753,7 @@ public void testEnhanceSortOnNumeric() throws Exception { searchContext.parsedQuery(query); searchContext.setTask(task); searchContext.setSize(10); - QueryPhase.executeInternal(searchContext); + QueryPhase.executeInternal(searchContext.withCleanQueryResult()); assertSortResults(searchContext.queryResult().topDocs().topDocs, (long) numDocs, false); } @@ -773,7 +778,7 @@ public void testEnhanceSortOnNumeric() throws Exception { searchContext.setTask(task); searchContext.from(5); searchContext.setSize(0); - QueryPhase.executeInternal(searchContext); + QueryPhase.executeInternal(searchContext.withCleanQueryResult()); assertSortResults(searchContext.queryResult().topDocs().topDocs, (long) numDocs, false); } @@ -800,11 +805,15 @@ public void testEnhanceSortOnNumeric() throws Exception { searchContext.parsedQuery(query); searchContext.setTask(task); searchContext.setSize(10); - QueryPhase.executeInternal(searchContext); + QueryPhase.executeInternal(searchContext.withCleanQueryResult()); final TopDocs topDocs = searchContext.queryResult().topDocs().topDocs; long topValue = (long) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]; assertThat(topValue, greaterThan(afterValue)); assertSortResults(topDocs, (long) numDocs, false); + + final TotalHits totalHits = topDocs.totalHits; + assertEquals(TotalHits.Relation.EQUAL_TO, totalHits.relation); + assertEquals(numDocs, totalHits.value); } reader.close(); @@ -916,13 +925,133 @@ public void testMinScore() throws Exception { context.setSize(1); context.trackTotalHitsUpTo(5); - QueryPhase.executeInternal(context); + QueryPhase.executeInternal(context.withCleanQueryResult()); assertEquals(10, context.queryResult().topDocs().topDocs.totalHits.value); reader.close(); dir.close(); } + public void testMaxScore() throws Exception { + Directory dir = newDirectory(); + final Sort sort = new Sort(new SortField("filter", SortField.Type.STRING)); + IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + + final int numDocs = scaledRandomIntBetween(600, 900); + for (int i = 0; i < numDocs; i++) { + Document doc = new Document(); + doc.add(new StringField("foo", "bar", Store.NO)); + doc.add(new StringField("filter", "f1" + ((i > 0) ? " " + Integer.toString(i) : ""), Store.NO)); + doc.add(new SortedDocValuesField("filter", newBytesRef("f1" + ((i > 0) ? " " + Integer.toString(i) : "")))); + w.addDocument(doc); + } + w.close(); + + IndexReader reader = DirectoryReader.open(dir); + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader)); + context.trackScores(true); + context.parsedQuery( + new ParsedQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.MUST) + .add(new TermQuery(new Term("filter", "f1")), Occur.SHOULD) + .build() + ) + ); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.setSize(1); + context.trackTotalHitsUpTo(5); + + QueryPhase.executeInternal(context.withCleanQueryResult()); + assertFalse(Float.isNaN(context.queryResult().getMaxScore())); + assertEquals(1, context.queryResult().topDocs().topDocs.scoreDocs.length); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, greaterThanOrEqualTo(6L)); + + context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW })); + QueryPhase.executeInternal(context.withCleanQueryResult()); + assertFalse(Float.isNaN(context.queryResult().getMaxScore())); + assertEquals(1, context.queryResult().topDocs().topDocs.scoreDocs.length); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, greaterThanOrEqualTo(6L)); + + context.trackScores(false); + QueryPhase.executeInternal(context.withCleanQueryResult()); + assertTrue(Float.isNaN(context.queryResult().getMaxScore())); + assertEquals(1, context.queryResult().topDocs().topDocs.scoreDocs.length); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, greaterThanOrEqualTo(6L)); + + reader.close(); + dir.close(); + } + + public void testCollapseQuerySearchResults() throws Exception { + Directory dir = newDirectory(); + final Sort sort = new Sort(new SortField("user", SortField.Type.INT)); + IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + + // Always end up with uneven buckets so collapsing is predictable + final int numDocs = 2 * scaledRandomIntBetween(600, 900) - 1; + for (int i = 0; i < numDocs; i++) { + Document doc = new Document(); + doc.add(new StringField("foo", "bar", Store.NO)); + doc.add(new NumericDocValuesField("user", i & 1)); + w.addDocument(doc); + } + w.close(); + + IndexReader reader = DirectoryReader.open(dir); + QueryShardContext queryShardContext = mock(QueryShardContext.class); + when(queryShardContext.fieldMapper("user")).thenReturn( + new NumberFieldType("user", NumberType.INTEGER, true, false, true, false, null, Collections.emptyMap()) + ); + + TestSearchContext context = new TestSearchContext(queryShardContext, indexShard, newContextSearcher(reader)); + context.collapse(new CollapseBuilder("user").build(context.getQueryShardContext())); + context.trackScores(true); + context.parsedQuery(new ParsedQuery(new TermQuery(new Term("foo", "bar")))); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.setSize(2); + context.trackTotalHitsUpTo(5); + + QueryPhase.executeInternal(context.withCleanQueryResult()); + assertFalse(Float.isNaN(context.queryResult().getMaxScore())); + assertEquals(2, context.queryResult().topDocs().topDocs.scoreDocs.length); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs, instanceOf(CollapseTopFieldDocs.class)); + + CollapseTopFieldDocs topDocs = (CollapseTopFieldDocs) context.queryResult().topDocs().topDocs; + assertThat(topDocs.collapseValues.length, equalTo(2)); + assertThat(topDocs.collapseValues[0], equalTo(0L)); // user == 0 + assertThat(topDocs.collapseValues[1], equalTo(1L)); // user == 1 + + context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW })); + QueryPhase.executeInternal(context.withCleanQueryResult()); + assertFalse(Float.isNaN(context.queryResult().getMaxScore())); + assertEquals(2, context.queryResult().topDocs().topDocs.scoreDocs.length); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs, instanceOf(CollapseTopFieldDocs.class)); + + topDocs = (CollapseTopFieldDocs) context.queryResult().topDocs().topDocs; + assertThat(topDocs.collapseValues.length, equalTo(2)); + assertThat(topDocs.collapseValues[0], equalTo(0L)); // user == 0 + assertThat(topDocs.collapseValues[1], equalTo(1L)); // user == 1 + + context.trackScores(false); + QueryPhase.executeInternal(context.withCleanQueryResult()); + assertTrue(Float.isNaN(context.queryResult().getMaxScore())); + assertEquals(2, context.queryResult().topDocs().topDocs.scoreDocs.length); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs, instanceOf(CollapseTopFieldDocs.class)); + + topDocs = (CollapseTopFieldDocs) context.queryResult().topDocs().topDocs; + assertThat(topDocs.collapseValues.length, equalTo(2)); + assertThat(topDocs.collapseValues[0], equalTo(0L)); // user == 0 + assertThat(topDocs.collapseValues[1], equalTo(1L)); // user == 1 + + reader.close(); + dir.close(); + } + public void testCancellationDuringPreprocess() throws IOException { try (Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig())) { @@ -982,7 +1111,8 @@ private static ContextIndexSearcher newContextSearcher(IndexReader reader) throw IndexSearcher.getDefaultSimilarity(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), - true + true, + null ); } @@ -992,7 +1122,8 @@ private static ContextIndexSearcher newEarlyTerminationContextSearcher(IndexRead IndexSearcher.getDefaultSimilarity(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), - true + true, + null ) { @Override @@ -1003,6 +1134,32 @@ public void search(List leaves, Weight weight, Collector coll }; } + private static class TestTotalHitCountCollectorManager extends TotalHitCountCollectorManager { + private final TotalHitCountCollector collector; + + static TestTotalHitCountCollectorManager create() { + return create(null); + } + + static TestTotalHitCountCollectorManager create(final Sort sort) { + return new TestTotalHitCountCollectorManager(new TotalHitCountCollector(), sort); + } + + private TestTotalHitCountCollectorManager(final TotalHitCountCollector collector, final Sort sort) { + super(sort); + this.collector = collector; + } + + @Override + public TotalHitCountCollector newCollector() throws IOException { + return collector; + } + + public int getTotalHits() { + return collector.getTotalHits(); + } + } + private static class AssertingEarlyTerminationFilterCollector extends FilterCollector { private final int size; diff --git a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java new file mode 100644 index 0000000000000..dfa41edb5cff2 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java @@ -0,0 +1,1158 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query; + +import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field.Store; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.SortedDocValuesField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.Term; +import org.apache.lucene.queries.spans.SpanNearQuery; +import org.apache.lucene.queries.spans.SpanTermQuery; +import org.apache.lucene.search.BooleanClause.Occur; +import org.apache.lucene.search.grouping.CollapseTopFieldDocs; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.FieldComparator; +import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.FilterCollector; +import org.apache.lucene.search.FilterLeafCollector; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TotalHits; +import org.apache.lucene.search.Weight; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.opensearch.action.search.SearchShardTask; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.ToXContent; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.index.mapper.NumberFieldMapper.NumberFieldType; +import org.opensearch.index.mapper.NumberFieldMapper.NumberType; +import org.opensearch.index.query.ParsedQuery; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.IndexShardTestCase; +import org.opensearch.lucene.queries.MinDocQuery; +import org.opensearch.search.DocValueFormat; +import org.opensearch.search.collapse.CollapseBuilder; +import org.opensearch.search.internal.ContextIndexSearcher; +import org.opensearch.search.internal.ScrollContext; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.profile.ProfileResult; +import org.opensearch.search.profile.ProfileShardResult; +import org.opensearch.search.profile.SearchProfileShardResults; +import org.opensearch.search.profile.query.CollectorResult; +import org.opensearch.search.profile.query.QueryProfileShardResult; +import org.opensearch.search.sort.SortAndFormats; +import org.opensearch.test.TestSearchContext; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.function.Consumer; + +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.hamcrest.Matchers.hasSize; + +public class QueryProfilePhaseTests extends IndexShardTestCase { + + private IndexShard indexShard; + + @Override + public Settings threadPoolSettings() { + return Settings.builder().put(super.threadPoolSettings()).put("thread_pool.search.min_queue_size", 10).build(); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + indexShard = newShard(true); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + closeShards(indexShard); + } + + public void testPostFilterDisablesCountOptimization() throws Exception { + Directory dir = newDirectory(); + final Sort sort = new Sort(new SortField("rank", SortField.Type.INT)); + IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + Document doc = new Document(); + w.addDocument(doc); + w.close(); + + IndexReader reader = DirectoryReader.open(dir); + + TestSearchContext context = new TestSearchContext(null, indexShard, newEarlyTerminationContextSearcher(reader, 0)); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); + assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value); + assertProfileData(context, "MatchAllDocsQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_count")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + + context.setSearcher(newContextSearcher(reader)); + context.parsedPostFilter(new ParsedQuery(new MatchNoDocsQuery())); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); + assertEquals(0, context.queryResult().topDocs().topDocs.totalHits.value); + assertProfileData(context, collector -> { + assertThat(collector.getReason(), equalTo("search_post_filter")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), hasSize(1)); + assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_count")); + assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + }, (query) -> { + assertThat(query.getQueryName(), equalTo("MatchNoDocsQuery")); + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, (query) -> { + assertThat(query.getQueryName(), equalTo("MatchAllDocsQuery")); + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }); + + reader.close(); + dir.close(); + } + + public void testTerminateAfterWithFilter() throws Exception { + Directory dir = newDirectory(); + final Sort sort = new Sort(new SortField("rank", SortField.Type.INT)); + IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + Document doc = new Document(); + for (int i = 0; i < 10; i++) { + doc.add(new StringField("foo", Integer.toString(i), Store.NO)); + } + w.addDocument(doc); + w.close(); + + IndexReader reader = DirectoryReader.open(dir); + + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader)); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + context.terminateAfter(1); + context.setSize(10); + for (int i = 0; i < 10; i++) { + context.parsedPostFilter(new ParsedQuery(new TermQuery(new Term("foo", Integer.toString(i))))); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); + assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertProfileData(context, collector -> { + assertThat(collector.getReason(), equalTo("search_post_filter")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), hasSize(1)); + assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_terminate_after_count")); + assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren().get(0).getProfiledChildren(), hasSize(1)); + assertThat(collector.getProfiledChildren().get(0).getProfiledChildren().get(0).getReason(), equalTo("search_top_hits")); + assertThat(collector.getProfiledChildren().get(0).getProfiledChildren().get(0).getTime(), greaterThan(0L)); + }, (query) -> { + assertThat(query.getQueryName(), equalTo("TermQuery")); + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, (query) -> { + assertThat(query.getQueryName(), equalTo("MatchAllDocsQuery")); + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(1L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }); + } + reader.close(); + dir.close(); + } + + public void testMinScoreDisablesCountOptimization() throws Exception { + Directory dir = newDirectory(); + final Sort sort = new Sort(new SortField("rank", SortField.Type.INT)); + IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + Document doc = new Document(); + w.addDocument(doc); + w.close(); + + IndexReader reader = DirectoryReader.open(dir); + TestSearchContext context = new TestSearchContext(null, indexShard, newEarlyTerminationContextSearcher(reader, 0)); + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + context.setSize(0); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); + assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value); + assertProfileData(context, "MatchAllDocsQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_count")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + + context.minimumScore(100); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); + assertEquals(0, context.queryResult().topDocs().topDocs.totalHits.value); + assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); + assertProfileData(context, "MatchAllDocsQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThanOrEqualTo(100L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(1L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_min_score")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), hasSize(1)); + assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_count")); + assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + }); + + reader.close(); + dir.close(); + } + + public void testInOrderScrollOptimization() throws Exception { + Directory dir = newDirectory(); + final Sort sort = new Sort(new SortField("rank", SortField.Type.INT)); + IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + final int numDocs = scaledRandomIntBetween(600, 900); + for (int i = 0; i < numDocs; ++i) { + w.addDocument(new Document()); + } + w.close(); + IndexReader reader = DirectoryReader.open(dir); + ScrollContext scrollContext = new ScrollContext(); + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader), scrollContext); + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + scrollContext.lastEmittedDoc = null; + scrollContext.maxScore = Float.NaN; + scrollContext.totalHits = null; + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + int size = randomIntBetween(2, 5); + context.setSize(size); + + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertNull(context.queryResult().terminatedEarly()); + assertThat(context.terminateAfter(), equalTo(0)); + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); + assertProfileData(context, "MatchAllDocsQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_top_hits")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + + context.setSearcher(newEarlyTerminationContextSearcher(reader, size)); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.terminateAfter(), equalTo(size)); + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0].doc, greaterThanOrEqualTo(size)); + assertProfileData(context, "ConstantScoreQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_terminate_after_count")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), hasSize(1)); + assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits")); + assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + }); + + reader.close(); + dir.close(); + } + + public void testTerminateAfterEarlyTermination() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = newIndexWriterConfig(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + final int numDocs = scaledRandomIntBetween(600, 900); + for (int i = 0; i < numDocs; ++i) { + Document doc = new Document(); + if (randomBoolean()) { + doc.add(new StringField("foo", "bar", Store.NO)); + } + if (randomBoolean()) { + doc.add(new StringField("foo", "baz", Store.NO)); + } + doc.add(new NumericDocValuesField("rank", numDocs - i)); + w.addDocument(doc); + } + w.close(); + final IndexReader reader = DirectoryReader.open(dir); + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader)); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + + context.terminateAfter(1); + { + context.setSize(1); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertProfileData(context, "MatchAllDocsQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_terminate_after_count")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), hasSize(1)); + assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits")); + assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + }); + + context.setSize(0); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); + assertProfileData(context, "MatchAllDocsQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_terminate_after_count")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), hasSize(1)); + assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_count")); + assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + }); + } + + { + context.setSize(1); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertProfileData(context, "MatchAllDocsQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_terminate_after_count")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), hasSize(1)); + assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits")); + assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + }); + } + { + context.setSize(1); + BooleanQuery bq = new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.SHOULD) + .add(new TermQuery(new Term("foo", "baz")), Occur.SHOULD) + .build(); + context.parsedQuery(new ParsedQuery(bq)); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertProfileData(context, "BooleanQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + + assertThat(query.getProfiledChildren(), hasSize(2)); + assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("TermQuery")); + assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + + assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery")); + assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_terminate_after_count")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), hasSize(1)); + assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits")); + assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + }); + context.setSize(0); + context.parsedQuery(new ParsedQuery(bq)); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); + assertTrue(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); + + assertProfileData(context, "BooleanQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + + assertThat(query.getProfiledChildren(), hasSize(2)); + assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("TermQuery")); + assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score_count"), equalTo(0L)); + + assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery")); + assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score_count"), equalTo(0L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_terminate_after_count")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), hasSize(1)); + assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_count")); + assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + }); + } + + context.terminateAfter(7); + context.setSize(10); + for (int trackTotalHits : new int[] { -1, 3, 75, 100 }) { + context.trackTotalHitsUpTo(trackTotalHits); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); + assertTrue(context.queryResult().terminatedEarly()); + if (trackTotalHits == -1) { + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(0L)); + } else { + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(7L)); + } + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(7)); + assertProfileData(context, "BooleanQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(7L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + + assertThat(query.getProfiledChildren(), hasSize(2)); + assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("TermQuery")); + assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score_count"), greaterThan(0L)); + + assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery")); + assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score_count"), greaterThan(0L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_terminate_after_count")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), hasSize(1)); + assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits")); + assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + }); + } + + reader.close(); + dir.close(); + } + + public void testIndexSortingEarlyTermination() throws Exception { + Directory dir = newDirectory(); + final Sort sort = new Sort(new SortField("rank", SortField.Type.INT)); + IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + final int numDocs = scaledRandomIntBetween(600, 900); + for (int i = 0; i < numDocs; ++i) { + Document doc = new Document(); + if (randomBoolean()) { + doc.add(new StringField("foo", "bar", Store.NO)); + } + if (randomBoolean()) { + doc.add(new StringField("foo", "baz", Store.NO)); + } + doc.add(new NumericDocValuesField("rank", numDocs - i)); + w.addDocument(doc); + } + w.close(); + + final IndexReader reader = DirectoryReader.open(dir); + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader)); + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + context.setSize(1); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW })); + + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); + FieldDoc fieldDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0]; + assertThat(fieldDoc.fields[0], equalTo(1)); + assertProfileData(context, "MatchAllDocsQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_top_hits")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + + { + context.parsedPostFilter(new ParsedQuery(new MinDocQuery(1))); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); + assertNull(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(numDocs - 1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); + assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); + assertProfileData(context, collector -> { + assertThat(collector.getReason(), equalTo("search_post_filter")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), hasSize(1)); + assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits")); + assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + }, (query) -> { + assertThat(query.getQueryName(), equalTo("MinDocQuery")); + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, (query) -> { + assertThat(query.getQueryName(), equalTo("MatchAllDocsQuery")); + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }); + context.parsedPostFilter(null); + } + + { + context.setSearcher(newEarlyTerminationContextSearcher(reader, 1)); + context.trackTotalHitsUpTo(SearchContext.TRACK_TOTAL_HITS_DISABLED); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); + assertNull(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); + assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); + assertProfileData(context, "MatchAllDocsQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_top_hits")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); + assertNull(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); + assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); + assertProfileData(context, "MatchAllDocsQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_top_hits")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + } + + reader.close(); + dir.close(); + } + + public void testIndexSortScrollOptimization() throws Exception { + Directory dir = newDirectory(); + final Sort indexSort = new Sort(new SortField("rank", SortField.Type.INT), new SortField("tiebreaker", SortField.Type.INT)); + IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(indexSort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + final int numDocs = scaledRandomIntBetween(600, 900); + for (int i = 0; i < numDocs; ++i) { + Document doc = new Document(); + doc.add(new NumericDocValuesField("rank", random().nextInt())); + doc.add(new NumericDocValuesField("tiebreaker", i)); + w.addDocument(doc); + } + if (randomBoolean()) { + w.forceMerge(randomIntBetween(1, 10)); + } + w.close(); + + final IndexReader reader = DirectoryReader.open(dir); + List searchSortAndFormats = new ArrayList<>(); + searchSortAndFormats.add(new SortAndFormats(indexSort, new DocValueFormat[] { DocValueFormat.RAW, DocValueFormat.RAW })); + // search sort is a prefix of the index sort + searchSortAndFormats.add(new SortAndFormats(new Sort(indexSort.getSort()[0]), new DocValueFormat[] { DocValueFormat.RAW })); + for (SortAndFormats searchSortAndFormat : searchSortAndFormats) { + ScrollContext scrollContext = new ScrollContext(); + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader), scrollContext); + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + scrollContext.lastEmittedDoc = null; + scrollContext.maxScore = Float.NaN; + scrollContext.totalHits = null; + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.setSize(10); + context.sort(searchSortAndFormat); + + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertNull(context.queryResult().terminatedEarly()); + assertThat(context.terminateAfter(), equalTo(0)); + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); + assertProfileData(context, "MatchAllDocsQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_top_hits")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + + int sizeMinus1 = context.queryResult().topDocs().topDocs.scoreDocs.length - 1; + FieldDoc lastDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[sizeMinus1]; + + context.setSearcher(newEarlyTerminationContextSearcher(reader, 10)); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); + assertNull(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.terminateAfter(), equalTo(0)); + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); + assertProfileData(context, "ConstantScoreQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + + assertThat(query.getProfiledChildren(), hasSize(1)); + assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("SearchAfterSortedDocQuery")); + assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_top_hits")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + FieldDoc firstDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0]; + for (int i = 0; i < searchSortAndFormat.sort.getSort().length; i++) { + @SuppressWarnings("unchecked") + FieldComparator comparator = (FieldComparator) searchSortAndFormat.sort.getSort()[i].getComparator( + i, + false + ); + int cmp = comparator.compareValues(firstDoc.fields[i], lastDoc.fields[i]); + if (cmp == 0) { + continue; + } + assertThat(cmp, equalTo(1)); + break; + } + } + reader.close(); + dir.close(); + } + + public void testDisableTopScoreCollection() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = newIndexWriterConfig(new StandardAnalyzer()); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + Document doc = new Document(); + final int numDocs = 2 * scaledRandomIntBetween(50, 450); + for (int i = 0; i < numDocs; i++) { + doc.clear(); + if (i % 2 == 0) { + doc.add(new TextField("title", "foo bar", Store.NO)); + } else { + doc.add(new TextField("title", "foo", Store.NO)); + } + w.addDocument(doc); + } + w.close(); + + IndexReader reader = DirectoryReader.open(dir); + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader)); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + Query q = new SpanNearQuery.Builder("title", true).addClause(new SpanTermQuery(new Term("title", "foo"))) + .addClause(new SpanTermQuery(new Term("title", "bar"))) + .build(); + + context.parsedQuery(new ParsedQuery(q)); + context.setSize(3); + context.trackTotalHitsUpTo(3); + TopDocsCollectorContext topDocsContext = TopDocsCollectorContext.createTopDocsCollectorContext(context, false); + assertEquals(topDocsContext.create(null).scoreMode(), org.apache.lucene.search.ScoreMode.COMPLETE); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); + assertEquals(numDocs / 2, context.queryResult().topDocs().topDocs.totalHits.value); + assertEquals(context.queryResult().topDocs().topDocs.totalHits.relation, TotalHits.Relation.EQUAL_TO); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(3)); + assertProfileData(context, "SpanNearQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_top_hits")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + + context.sort(new SortAndFormats(new Sort(new SortField("other", SortField.Type.INT)), new DocValueFormat[] { DocValueFormat.RAW })); + topDocsContext = TopDocsCollectorContext.createTopDocsCollectorContext(context, false); + assertEquals(topDocsContext.create(null).scoreMode(), org.apache.lucene.search.ScoreMode.TOP_DOCS); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); + assertEquals(numDocs / 2, context.queryResult().topDocs().topDocs.totalHits.value); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(3)); + assertEquals(context.queryResult().topDocs().topDocs.totalHits.relation, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO); + assertProfileData(context, "SpanNearQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_top_hits")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + + reader.close(); + dir.close(); + } + + public void testMinScore() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = newIndexWriterConfig(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + for (int i = 0; i < 10; i++) { + Document doc = new Document(); + doc.add(new StringField("foo", "bar", Store.NO)); + doc.add(new StringField("filter", "f1", Store.NO)); + w.addDocument(doc); + } + w.close(); + + IndexReader reader = DirectoryReader.open(dir); + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader)); + context.parsedQuery( + new ParsedQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.MUST) + .add(new TermQuery(new Term("filter", "f1")), Occur.SHOULD) + .build() + ) + ); + context.minimumScore(0.01f); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.setSize(1); + context.trackTotalHitsUpTo(5); + + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); + assertEquals(10, context.queryResult().topDocs().topDocs.totalHits.value); + assertProfileData(context, "BooleanQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), equalTo(10L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + + assertThat(query.getProfiledChildren(), hasSize(2)); + assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("TermQuery")); + assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + + assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery")); + assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_min_score")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), hasSize(1)); + assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits")); + assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + }); + + reader.close(); + dir.close(); + } + + public void testMaxScore() throws Exception { + Directory dir = newDirectory(); + final Sort sort = new Sort(new SortField("filter", SortField.Type.STRING)); + IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + + final int numDocs = scaledRandomIntBetween(600, 900); + for (int i = 0; i < numDocs; i++) { + Document doc = new Document(); + doc.add(new StringField("foo", "bar", Store.NO)); + doc.add(new StringField("filter", "f1" + ((i > 0) ? " " + Integer.toString(i) : ""), Store.NO)); + doc.add(new SortedDocValuesField("filter", newBytesRef("f1" + ((i > 0) ? " " + Integer.toString(i) : "")))); + w.addDocument(doc); + } + w.close(); + + IndexReader reader = DirectoryReader.open(dir); + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader)); + context.trackScores(true); + context.parsedQuery( + new ParsedQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.MUST) + .add(new TermQuery(new Term("filter", "f1")), Occur.SHOULD) + .build() + ) + ); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.setSize(1); + context.trackTotalHitsUpTo(5); + + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); + assertFalse(Float.isNaN(context.queryResult().getMaxScore())); + assertEquals(1, context.queryResult().topDocs().topDocs.scoreDocs.length); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, greaterThanOrEqualTo(6L)); + assertProfileData(context, "BooleanQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + + assertThat(query.getProfiledChildren(), hasSize(2)); + assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("TermQuery")); + assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + + assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery")); + assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_top_hits")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + + context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW })); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); + assertFalse(Float.isNaN(context.queryResult().getMaxScore())); + assertEquals(1, context.queryResult().topDocs().topDocs.scoreDocs.length); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, greaterThanOrEqualTo(6L)); + assertProfileData(context, "BooleanQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + + assertThat(query.getProfiledChildren(), hasSize(2)); + assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("TermQuery")); + assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + + assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery")); + assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_top_hits")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + + reader.close(); + dir.close(); + } + + public void testCollapseQuerySearchResults() throws Exception { + Directory dir = newDirectory(); + final Sort sort = new Sort(new SortField("user", SortField.Type.INT)); + IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + + // Always end up with uneven buckets so collapsing is predictable + final int numDocs = 2 * scaledRandomIntBetween(600, 900) - 1; + for (int i = 0; i < numDocs; i++) { + Document doc = new Document(); + doc.add(new StringField("foo", "bar", Store.NO)); + doc.add(new NumericDocValuesField("user", i & 1)); + w.addDocument(doc); + } + w.close(); + + IndexReader reader = DirectoryReader.open(dir); + QueryShardContext queryShardContext = mock(QueryShardContext.class); + when(queryShardContext.fieldMapper("user")).thenReturn( + new NumberFieldType("user", NumberType.INTEGER, true, false, true, false, null, Collections.emptyMap()) + ); + + TestSearchContext context = new TestSearchContext(queryShardContext, indexShard, newContextSearcher(reader)); + context.collapse(new CollapseBuilder("user").build(context.getQueryShardContext())); + context.trackScores(true); + context.parsedQuery(new ParsedQuery(new TermQuery(new Term("foo", "bar")))); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.setSize(2); + context.trackTotalHitsUpTo(5); + + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); + assertFalse(Float.isNaN(context.queryResult().getMaxScore())); + assertEquals(2, context.queryResult().topDocs().topDocs.scoreDocs.length); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs, instanceOf(CollapseTopFieldDocs.class)); + + assertProfileData(context, "TermQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + assertThat(query.getProfiledChildren(), empty()); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_top_hits")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + + context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW })); + QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); + assertFalse(Float.isNaN(context.queryResult().getMaxScore())); + assertEquals(2, context.queryResult().topDocs().topDocs.scoreDocs.length); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs, instanceOf(CollapseTopFieldDocs.class)); + + assertProfileData(context, "TermQuery", query -> { + assertThat(query.getTimeBreakdown().keySet(), not(empty())); + assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + assertThat(query.getProfiledChildren(), empty()); + }, collector -> { + assertThat(collector.getReason(), equalTo("search_top_hits")); + assertThat(collector.getTime(), greaterThan(0L)); + assertThat(collector.getProfiledChildren(), empty()); + }); + + reader.close(); + dir.close(); + } + + private void assertProfileData(SearchContext context, String type, Consumer query, Consumer collector) + throws IOException { + assertProfileData(context, collector, (profileResult) -> { + assertThat(profileResult.getQueryName(), equalTo(type)); + assertThat(profileResult.getTime(), greaterThan(0L)); + query.accept(profileResult); + }); + } + + private void assertProfileData(SearchContext context, Consumer collector, Consumer query1) + throws IOException { + assertProfileData(context, Arrays.asList(query1), collector, false); + } + + private void assertProfileData( + SearchContext context, + Consumer collector, + Consumer query1, + Consumer query2 + ) throws IOException { + assertProfileData(context, Arrays.asList(query1, query2), collector, false); + } + + private final void assertProfileData( + SearchContext context, + List> queries, + Consumer collector, + boolean debug + ) throws IOException { + assertThat(context.getProfilers(), not(nullValue())); + + final ProfileShardResult result = SearchProfileShardResults.buildShardResults(context.getProfilers(), null); + if (debug) { + final SearchProfileShardResults results = new SearchProfileShardResults( + Collections.singletonMap(indexShard.shardId().toString(), result) + ); + + try (final XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint()) { + builder.startObject(); + results.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + builder.flush(); + + final OutputStream out = builder.getOutputStream(); + assertThat(out, instanceOf(ByteArrayOutputStream.class)); + + logger.info(new String(((ByteArrayOutputStream) out).toByteArray(), StandardCharsets.UTF_8)); + } + } + + assertThat(result.getQueryProfileResults(), hasSize(1)); + + final QueryProfileShardResult queryProfileShardResult = result.getQueryProfileResults().get(0); + assertThat(queryProfileShardResult.getQueryResults(), hasSize(queries.size())); + + for (int i = 0; i < queries.size(); ++i) { + queries.get(i).accept(queryProfileShardResult.getQueryResults().get(i)); + } + + collector.accept(queryProfileShardResult.getCollectorResult()); + } + + private static ContextIndexSearcher newContextSearcher(IndexReader reader) throws IOException { + return new ContextIndexSearcher( + reader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + true, + null + ); + } + + private static ContextIndexSearcher newEarlyTerminationContextSearcher(IndexReader reader, int size) throws IOException { + return new ContextIndexSearcher( + reader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + true, + null + ) { + + @Override + public void search(List leaves, Weight weight, Collector collector) throws IOException { + final Collector in = new AssertingEarlyTerminationFilterCollector(collector, size); + super.search(leaves, weight, in); + } + }; + } + + private static class AssertingEarlyTerminationFilterCollector extends FilterCollector { + private final int size; + + AssertingEarlyTerminationFilterCollector(Collector in, int size) { + super(in); + this.size = size; + } + + @Override + public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { + final LeafCollector in = super.getLeafCollector(context); + return new FilterLeafCollector(in) { + int collected; + + @Override + public void collect(int doc) throws IOException { + assert collected <= size : "should not collect more than " + size + " doc per segment, got " + collected; + ++collected; + super.collect(doc); + } + }; + } + } +} diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index 38a0253305833..832328cb0242f 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -334,7 +334,8 @@ public boolean shouldCache(Query query) { indexSearcher.getSimilarity(), queryCache, queryCachingPolicy, - false + false, + null ); SearchContext searchContext = mock(SearchContext.class); diff --git a/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java b/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java index 0e91332892a55..0b2235a0afedd 100644 --- a/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java +++ b/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java @@ -32,6 +32,7 @@ package org.opensearch.test; import org.apache.lucene.search.Collector; +import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.Query; import org.opensearch.action.OriginalIndices; @@ -70,6 +71,7 @@ import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.profile.Profilers; import org.opensearch.search.query.QuerySearchResult; +import org.opensearch.search.query.ReduceableSearchResult; import org.opensearch.search.rescore.RescoreContext; import org.opensearch.search.sort.SortAndFormats; import org.opensearch.search.suggest.SuggestionSearchContext; @@ -90,7 +92,7 @@ public class TestSearchContext extends SearchContext { final BigArrays bigArrays; final IndexService indexService; final BitsetFilterCache fixedBitSetFilterCache; - final Map, Collector> queryCollectors = new HashMap<>(); + final Map, CollectorManager> queryCollectorManagers = new HashMap<>(); final IndexShard indexShard; final QuerySearchResult queryResult = new QuerySearchResult(); final QueryShardContext queryShardContext; @@ -110,7 +112,9 @@ public class TestSearchContext extends SearchContext { private SearchContextAggregations aggregations; private ScrollContext scrollContext; private FieldDoc searchAfter; - private final long originNanoTime = System.nanoTime(); + private Profilers profilers; + private CollapseContext collapse; + private final Map searchExtBuilders = new HashMap<>(); public TestSearchContext(BigArrays bigArrays, IndexService indexService) { @@ -405,12 +409,13 @@ public FieldDoc searchAfter() { @Override public SearchContext collapse(CollapseContext collapse) { - return null; + this.collapse = collapse; + return this; } @Override public CollapseContext collapse() { - return null; + return collapse; } @Override @@ -596,12 +601,12 @@ public long getRelativeTimeInMillis() { @Override public Profilers getProfilers() { - return null; // no profiling + return profilers; } @Override - public Map, Collector> queryCollectors() { - return queryCollectors; + public Map, CollectorManager> queryCollectorManagers() { + return queryCollectorManagers; } @Override @@ -633,4 +638,21 @@ public void addRescore(RescoreContext rescore) { public ReaderContext readerContext() { throw new UnsupportedOperationException(); } + + /** + * Clean the query results by consuming all of it + */ + public TestSearchContext withCleanQueryResult() { + queryResult.consumeAll(); + profilers = null; + return this; + } + + /** + * Add profilers to the query + */ + public TestSearchContext withProfilers() { + this.profilers = new Profilers(searcher); + return this; + } } From cc0e66b1dcc2cfe17b76bcea1168ebb996fbb090 Mon Sep 17 00:00:00 2001 From: Owais Kazi Date: Thu, 24 Mar 2022 19:50:54 -0700 Subject: [PATCH 006/653] Replaced "master" terminology in Log message (#2575) Changed the log message to cluster-manager from master Signed-off-by: Owais Kazi --- .../rest/discovery/Zen2RestApiIT.java | 2 +- .../UnsafeBootstrapAndDetachCommandIT.java | 20 +++--- .../DedicatedClusterSnapshotRestoreIT.java | 20 +++--- .../AddVotingConfigExclusionsRequest.java | 10 +-- .../cluster/InternalClusterInfoService.java | 10 +-- .../coordination/ClusterBootstrapService.java | 4 +- .../ClusterFormationFailureHelper.java | 14 ++-- .../cluster/coordination/Coordinator.java | 22 ++++--- .../coordination/DetachClusterCommand.java | 4 +- .../cluster/coordination/JoinHelper.java | 2 +- .../cluster/coordination/Reconfigurator.java | 4 +- .../UnsafeBootstrapMasterCommand.java | 10 +-- .../cluster/node/DiscoveryNodeRole.java | 2 +- .../cluster/node/DiscoveryNodes.java | 4 +- .../HandshakingTransportAddressConnector.java | 7 +- .../org/opensearch/discovery/PeerFinder.java | 6 +- .../opensearch/env/NodeRepurposeCommand.java | 10 +-- .../opensearch/gateway/GatewayMetaState.java | 6 +- .../IncrementalClusterStateWriter.java | 3 +- .../gateway/PersistedClusterStateService.java | 2 +- .../PersistentTasksClusterService.java | 2 +- .../repositories/blobstore/package-info.java | 2 +- ...AddVotingConfigExclusionsRequestTests.java | 2 +- ...tAddVotingConfigExclusionsActionTests.java | 4 +- .../ClusterBootstrapServiceTests.java | 2 +- .../ClusterFormationFailureHelperTests.java | 64 +++++++++---------- .../coordination/CoordinatorTests.java | 8 ++- .../discovery/AbstractDisruptionTestCase.java | 2 +- .../AbstractCoordinatorTestCase.java | 2 +- .../opensearch/test/InternalTestCluster.java | 7 +- 30 files changed, 135 insertions(+), 122 deletions(-) diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java index 198cc11d824e7..f7899d91e0cb9 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java @@ -176,7 +176,7 @@ public void testFailsOnUnknownNode() throws Exception { assertThat(e.getResponse().getStatusLine().getStatusCode(), is(400)); assertThat( e.getMessage(), - Matchers.containsString("add voting config exclusions request for [invalid] matched no master-eligible nodes") + Matchers.containsString("add voting config exclusions request for [invalid] matched no cluster-manager-eligible nodes") ); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java index 1447379b93ec8..292469c6e7b79 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java @@ -287,7 +287,7 @@ public void test3MasterNodes2Failed() throws Exception { internalCluster().setBootstrapMasterNodeIndex(2); List masterNodes = new ArrayList<>(); - logger.info("--> start 1st master-eligible node"); + logger.info("--> start 1st cluster-manager-eligible node"); masterNodes.add( internalCluster().startMasterOnlyNode( Settings.builder().put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s").build() @@ -299,7 +299,7 @@ public void test3MasterNodes2Failed() throws Exception { Settings.builder().put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s").build() ); // node ordinal 1 - logger.info("--> start 2nd and 3rd master-eligible nodes and bootstrap"); + logger.info("--> start 2nd and 3rd cluster-manager-eligible nodes and bootstrap"); masterNodes.addAll(internalCluster().startMasterOnlyNodes(2)); // node ordinals 2 and 3 logger.info("--> wait for all nodes to join the cluster"); @@ -335,19 +335,19 @@ public void test3MasterNodes2Failed() throws Exception { assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); }); - logger.info("--> try to unsafely bootstrap 1st master-eligible node, while node lock is held"); + logger.info("--> try to unsafely bootstrap 1st cluster-manager-eligible node, while node lock is held"); Environment environmentMaster1 = TestEnvironment.newEnvironment( Settings.builder().put(internalCluster().getDefaultSettings()).put(master1DataPathSettings).build() ); expectThrows(() -> unsafeBootstrap(environmentMaster1), UnsafeBootstrapMasterCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG); - logger.info("--> stop 1st master-eligible node and data-only node"); + logger.info("--> stop 1st cluster-manager-eligible node and data-only node"); NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(0))); assertBusy(() -> internalCluster().getInstance(GatewayMetaState.class, dataNode).allPendingAsyncStatesWritten()); internalCluster().stopRandomDataNode(); - logger.info("--> unsafely-bootstrap 1st master-eligible node"); + logger.info("--> unsafely-bootstrap 1st cluster-manager-eligible node"); MockTerminal terminal = unsafeBootstrap(environmentMaster1, false, true); Metadata metadata = OpenSearchNodeCommand.createPersistedClusterStateService(Settings.EMPTY, nodeEnvironment.nodeDataPaths()) .loadBestOnDiskState().metadata; @@ -363,7 +363,7 @@ public void test3MasterNodes2Failed() throws Exception { ) ); - logger.info("--> start 1st master-eligible node"); + logger.info("--> start 1st cluster-manager-eligible node"); String masterNode2 = internalCluster().startMasterOnlyNode(master1DataPathSettings); logger.info("--> detach-cluster on data-only node"); @@ -399,7 +399,7 @@ public void test3MasterNodes2Failed() throws Exception { IndexMetadata indexMetadata = clusterService().state().metadata().index("test"); assertThat(indexMetadata.getSettings().get(IndexMetadata.SETTING_HISTORY_UUID), notNullValue()); - logger.info("--> detach-cluster on 2nd and 3rd master-eligible nodes"); + logger.info("--> detach-cluster on 2nd and 3rd cluster-manager-eligible nodes"); Environment environmentMaster2 = TestEnvironment.newEnvironment( Settings.builder().put(internalCluster().getDefaultSettings()).put(master2DataPathSettings).build() ); @@ -409,7 +409,7 @@ public void test3MasterNodes2Failed() throws Exception { ); detachCluster(environmentMaster3, false); - logger.info("--> start 2nd and 3rd master-eligible nodes and ensure 4 nodes stable cluster"); + logger.info("--> start 2nd and 3rd cluster-manager-eligible nodes and ensure 4 nodes stable cluster"); bootstrappedNodes.add(internalCluster().startMasterOnlyNode(master2DataPathSettings)); bootstrappedNodes.add(internalCluster().startMasterOnlyNode(master3DataPathSettings)); ensureStableCluster(4); @@ -422,7 +422,7 @@ public void testAllMasterEligibleNodesFailedDanglingIndexImport() throws Excepti Settings settings = Settings.builder().put(AUTO_IMPORT_DANGLING_INDICES_SETTING.getKey(), true).build(); - logger.info("--> start mixed data and master-eligible node and bootstrap cluster"); + logger.info("--> start mixed data and cluster-manager-eligible node and bootstrap cluster"); String masterNode = internalCluster().startNode(settings); // node ordinal 0 logger.info("--> start data-only node and ensure 2 nodes stable cluster"); @@ -457,7 +457,7 @@ public void testAllMasterEligibleNodesFailedDanglingIndexImport() throws Excepti ); detachCluster(environment, false); - logger.info("--> stop master-eligible node, clear its data and start it again - new cluster should form"); + logger.info("--> stop cluster-manager-eligible node, clear its data and start it again - new cluster should form"); internalCluster().restartNode(masterNode, new InternalTestCluster.RestartCallback() { @Override public boolean clearData(String nodeName) { diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 47d57e1260b5f..0c392dbe8bbe6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -482,8 +482,8 @@ public void testSnapshotWithStuckNode() throws Exception { try { assertAcked(deleteSnapshotResponseFuture.actionGet()); } catch (SnapshotMissingException ex) { - // When master node is closed during this test, it sometime manages to delete the snapshot files before - // completely stopping. In this case the retried delete snapshot operation on the new master can fail + // When cluster-manager node is closed during this test, it sometime manages to delete the snapshot files before + // completely stopping. In this case the retried delete snapshot operation on the new cluster-manager can fail // with SnapshotMissingException } @@ -759,7 +759,7 @@ public void testRegistrationFailure() { logger.info("--> start first node"); internalCluster().startNode(); logger.info("--> start second node"); - // Make sure the first node is elected as master + // Make sure the first node is elected as cluster-manager internalCluster().startNode(nonMasterNode()); // Register mock repositories for (int i = 0; i < 5; i++) { @@ -836,7 +836,7 @@ public void sendResponse(RestResponse response) { } public void testMasterShutdownDuringSnapshot() throws Exception { - logger.info("--> starting two master nodes and two data nodes"); + logger.info("--> starting two cluster-manager nodes and two data nodes"); internalCluster().startMasterOnlyNodes(2); internalCluster().startDataOnlyNodes(2); @@ -859,7 +859,7 @@ public void testMasterShutdownDuringSnapshot() throws Exception { .setIndices("test-idx") .get(); - logger.info("--> stopping master node"); + logger.info("--> stopping cluster-manager node"); internalCluster().stopCurrentMasterNode(); logger.info("--> wait until the snapshot is done"); @@ -874,7 +874,7 @@ public void testMasterShutdownDuringSnapshot() throws Exception { } public void testMasterAndDataShutdownDuringSnapshot() throws Exception { - logger.info("--> starting three master nodes and two data nodes"); + logger.info("--> starting three cluster-manager nodes and two data nodes"); internalCluster().startMasterOnlyNodes(3); internalCluster().startDataOnlyNodes(2); @@ -902,7 +902,7 @@ public void testMasterAndDataShutdownDuringSnapshot() throws Exception { logger.info("--> stopping data node {}", dataNode); stopNode(dataNode); - logger.info("--> stopping master node {} ", masterNode); + logger.info("--> stopping cluster-manager node {} ", masterNode); internalCluster().stopCurrentMasterNode(); logger.info("--> wait until the snapshot is done"); @@ -925,7 +925,7 @@ public void testMasterAndDataShutdownDuringSnapshot() throws Exception { * the cluster. */ public void testRestoreShrinkIndex() throws Exception { - logger.info("--> starting a master node and a data node"); + logger.info("--> starting a cluster-manager node and a data node"); internalCluster().startMasterOnlyNode(); internalCluster().startDataOnlyNode(); @@ -1144,7 +1144,7 @@ public void testDeduplicateIndexMetadata() throws Exception { } public void testDataNodeRestartWithBusyMasterDuringSnapshot() throws Exception { - logger.info("--> starting a master node and two data nodes"); + logger.info("--> starting a cluster-manager node and two data nodes"); internalCluster().startMasterOnlyNode(); internalCluster().startDataOnlyNodes(2); final Path repoPath = randomRepoPath(); @@ -1200,7 +1200,7 @@ public void testDataNodeRestartWithBusyMasterDuringSnapshot() throws Exception { } public void testDataNodeRestartAfterShardSnapshotFailure() throws Exception { - logger.info("--> starting a master node and two data nodes"); + logger.info("--> starting a cluster-manager node and two data nodes"); internalCluster().startMasterOnlyNode(); final List dataNodes = internalCluster().startDataOnlyNodes(2); final Path repoPath = randomRepoPath(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java index 99291742145f0..e0e5bf622b99e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java @@ -54,7 +54,7 @@ import java.util.stream.StreamSupport; /** - * A request to add voting config exclusions for certain master-eligible nodes, and wait for these nodes to be removed from the voting + * A request to add voting config exclusions for certain cluster-manager-eligible nodes, and wait for these nodes to be removed from the voting * configuration. */ public class AddVotingConfigExclusionsRequest extends MasterNodeRequest { @@ -66,7 +66,7 @@ public class AddVotingConfigExclusionsRequest extends MasterNodeRequest resolveVotingConfigExclusions(ClusterState currentSta if (newVotingConfigExclusions.isEmpty()) { throw new IllegalArgumentException( - "add voting config exclusions request for " + Arrays.asList(nodeDescriptions) + " matched no master-eligible nodes" + "add voting config exclusions request for " + + Arrays.asList(nodeDescriptions) + + " matched no cluster-manager-eligible nodes" ); } } else if (nodeIds.length >= 1) { diff --git a/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java index 05d91fdfd9ebb..5b1c026e5259b 100644 --- a/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java @@ -77,7 +77,7 @@ * InternalClusterInfoService provides the ClusterInfoService interface, * routinely updated on a timer. The timer can be dynamically changed by * setting the cluster.info.update.interval setting (defaulting - * to 30 seconds). The InternalClusterInfoService only runs on the master node. + * to 30 seconds). The InternalClusterInfoService only runs on the cluster-manager node. * Listens for changes in the number of data nodes and immediately submits a * ClusterInfoUpdateJob if a node has been added. * @@ -109,7 +109,7 @@ public class InternalClusterInfoService implements ClusterInfoService, ClusterSt private volatile ImmutableOpenMap leastAvailableSpaceUsages; private volatile ImmutableOpenMap mostAvailableSpaceUsages; private volatile IndicesStatsSummary indicesStatsSummary; - // null if this node is not currently the master + // null if this node is not currently the cluster-manager private final AtomicReference refreshAndRescheduleRunnable = new AtomicReference<>(); private volatile boolean enabled; private volatile TimeValue fetchTimeout; @@ -150,8 +150,8 @@ void setUpdateFrequency(TimeValue updateFrequency) { @Override public void clusterChanged(ClusterChangedEvent event) { if (event.localNodeMaster() && refreshAndRescheduleRunnable.get() == null) { - logger.trace("elected as master, scheduling cluster info update tasks"); - executeRefresh(event.state(), "became master"); + logger.trace("elected as cluster-manager, scheduling cluster info update tasks"); + executeRefresh(event.state(), "became cluster-manager"); final RefreshAndRescheduleRunnable newRunnable = new RefreshAndRescheduleRunnable(); refreshAndRescheduleRunnable.set(newRunnable); @@ -535,7 +535,7 @@ protected void doRun() { if (this == refreshAndRescheduleRunnable.get()) { super.doRun(); } else { - logger.trace("master changed, scheduled refresh job is stale"); + logger.trace("cluster-manager changed, scheduled refresh job is stale"); } } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java b/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java index ce34a21e4adb6..8df561149eb3d 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java @@ -135,7 +135,7 @@ public ClusterBootstrapService( + DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey() + "] set to [" + DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE - + "] must be master-eligible" + + "] must be cluster-manager-eligible" ); } bootstrapRequirements = Collections.singleton(Node.NODE_NAME_SETTING.get(settings)); @@ -219,7 +219,7 @@ void scheduleUnconfiguredBootstrap() { logger.info( "no discovery configuration found, will perform best-effort cluster bootstrapping after [{}] " - + "unless existing master is discovered", + + "unless existing cluster-manager is discovered", unconfiguredBootstrapTimeout ); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java b/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java index c36a2983a011a..0f419aa7a0937 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java @@ -192,7 +192,7 @@ String getDescription() { ); if (clusterState.nodes().getLocalNode().isMasterNode() == false) { - return String.format(Locale.ROOT, "master not discovered yet: %s", discoveryStateIgnoringQuorum); + return String.format(Locale.ROOT, "cluster-manager not discovered yet: %s", discoveryStateIgnoringQuorum); } if (clusterState.getLastAcceptedConfiguration().isEmpty()) { @@ -203,14 +203,14 @@ String getDescription() { } else { bootstrappingDescription = String.format( Locale.ROOT, - "this node must discover master-eligible nodes %s to bootstrap a cluster", + "this node must discover cluster-manager-eligible nodes %s to bootstrap a cluster", INITIAL_CLUSTER_MANAGER_NODES_SETTING.get(settings) ); } return String.format( Locale.ROOT, - "master not discovered yet, this node has not previously joined a bootstrapped cluster, and %s: %s", + "cluster-manager not discovered yet, this node has not previously joined a bootstrapped cluster, and %s: %s", bootstrappingDescription, discoveryStateIgnoringQuorum ); @@ -221,7 +221,7 @@ String getDescription() { if (clusterState.getLastCommittedConfiguration().equals(VotingConfiguration.MUST_JOIN_ELECTED_MASTER)) { return String.format( Locale.ROOT, - "master not discovered yet and this node was detached from its previous cluster, have discovered %s; %s", + "cluster-manager not discovered yet and this node was detached from its previous cluster, have discovered %s; %s", foundPeers, discoveryWillContinueDescription ); @@ -250,7 +250,7 @@ String getDescription() { return String.format( Locale.ROOT, - "master not discovered or elected yet, an election requires %s, have discovered %s which %s; %s", + "cluster-manager not discovered or elected yet, an election requires %s, have discovered %s which %s; %s", quorumDescription, foundPeers, isQuorumOrNot, @@ -269,8 +269,8 @@ private String describeQuorum(VotingConfiguration votingConfiguration) { if (nodeIds.size() == 1) { if (nodeIds.contains(GatewayMetaState.STALE_STATE_CONFIG_NODE_ID)) { - return "one or more nodes that have already participated as master-eligible nodes in the cluster but this node was " - + "not master-eligible the last time it joined the cluster"; + return "one or more nodes that have already participated as cluster-manager-eligible nodes in the cluster but this node was " + + "not cluster-manager-eligible the last time it joined the cluster"; } else { return "a node with id " + realNodeIds; } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index 557f11f75d969..89e5b9b4cfbcc 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -510,7 +510,7 @@ private void startElection() { private void abdicateTo(DiscoveryNode newMaster) { assert Thread.holdsLock(mutex); assert mode == Mode.LEADER : "expected to be leader on abdication but was " + mode; - assert newMaster.isMasterNode() : "should only abdicate to master-eligible node but was " + newMaster; + assert newMaster.isMasterNode() : "should only abdicate to cluster-manager-eligible node but was " + newMaster; final StartJoinRequest startJoinRequest = new StartJoinRequest(newMaster, Math.max(getCurrentTerm(), maxTermSeen) + 1); logger.info("abdicating to {} with term {}", newMaster, startJoinRequest.getTerm()); getLastAcceptedState().nodes().mastersFirstStream().forEach(node -> { @@ -563,7 +563,7 @@ private Join joinLeaderInTerm(StartJoinRequest startJoinRequest) { private void handleJoinRequest(JoinRequest joinRequest, JoinHelper.JoinCallback joinCallback) { assert Thread.holdsLock(mutex) == false; - assert getLocalNode().isMasterNode() : getLocalNode() + " received a join but is not master-eligible"; + assert getLocalNode().isMasterNode() : getLocalNode() + " received a join but is not cluster-manager-eligible"; logger.trace("handleJoinRequest: as {}, handling {}", mode, joinRequest); if (singleNodeDiscovery && joinRequest.getSourceNode().equals(getLocalNode()) == false) { @@ -683,7 +683,7 @@ void becomeCandidate(String method) { void becomeLeader(String method) { assert Thread.holdsLock(mutex) : "Coordinator mutex not held"; assert mode == Mode.CANDIDATE : "expected candidate but was " + mode; - assert getLocalNode().isMasterNode() : getLocalNode() + " became a leader but is not master-eligible"; + assert getLocalNode().isMasterNode() : getLocalNode() + " became a leader but is not cluster-manager-eligible"; logger.debug( "{}: coordinator becoming LEADER in term {} (was {}, lastKnownLeader was [{}])", @@ -709,7 +709,7 @@ void becomeLeader(String method) { void becomeFollower(String method, DiscoveryNode leaderNode) { assert Thread.holdsLock(mutex) : "Coordinator mutex not held"; - assert leaderNode.isMasterNode() : leaderNode + " became a leader but is not master-eligible"; + assert leaderNode.isMasterNode() : leaderNode + " became a leader but is not cluster-manager-eligible"; assert mode != Mode.LEADER : "do not switch to follower from leader (should be candidate first)"; if (mode == Mode.FOLLOWER && Optional.of(leaderNode).equals(lastKnownLeader)) { @@ -751,11 +751,11 @@ void becomeFollower(String method, DiscoveryNode leaderNode) { } private void cleanMasterService() { - masterService.submitStateUpdateTask("clean-up after stepping down as master", new LocalClusterUpdateTask() { + masterService.submitStateUpdateTask("clean-up after stepping down as cluster-manager", new LocalClusterUpdateTask() { @Override public void onFailure(String source, Exception e) { // ignore - logger.trace("failed to clean-up after stepping down as master", e); + logger.trace("failed to clean-up after stepping down as cluster-manager", e); } @Override @@ -987,9 +987,9 @@ public boolean setInitialConfiguration(final VotingConfiguration votingConfigura } if (getLocalNode().isMasterNode() == false) { - logger.debug("skip setting initial configuration as local node is not a master-eligible node"); + logger.debug("skip setting initial configuration as local node is not a cluster-manager-eligible node"); throw new CoordinationStateRejectedException( - "this node is not master-eligible, but cluster bootstrapping can only happen on a master-eligible node" + "this node is not cluster-manager-eligible, but cluster bootstrapping can only happen on a cluster-manager-eligible node" ); } @@ -1046,8 +1046,10 @@ ClusterState improveConfiguration(ClusterState clusterState) { // exclude any nodes whose ID is in the voting config exclusions list ... final Stream excludedNodeIds = clusterState.getVotingConfigExclusions().stream().map(VotingConfigExclusion::getNodeId); - // ... and also automatically exclude the node IDs of master-ineligible nodes that were previously master-eligible and are still in - // the voting config. We could exclude all the master-ineligible nodes here, but there could be quite a few of them and that makes + // ... and also automatically exclude the node IDs of cluster-manager-ineligible nodes that were previously cluster-manager-eligible + // and are still in + // the voting config. We could exclude all the cluster-manager-ineligible nodes here, but there could be quite a few of them and + // that makes // the logging much harder to follow. final Stream masterIneligibleNodeIdsInVotingConfig = StreamSupport.stream(clusterState.nodes().spliterator(), false) .filter( diff --git a/server/src/main/java/org/opensearch/cluster/coordination/DetachClusterCommand.java b/server/src/main/java/org/opensearch/cluster/coordination/DetachClusterCommand.java index 49d88fd33c724..efa5a5ee600ab 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/DetachClusterCommand.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/DetachClusterCommand.java @@ -47,9 +47,9 @@ public class DetachClusterCommand extends OpenSearchNodeCommand { static final String CONFIRMATION_MSG = DELIMITER + "\n" + "You should only run this tool if you have permanently lost all of the\n" - + "master-eligible nodes in this cluster and you cannot restore the cluster\n" + + "cluster-manager-eligible nodes in this cluster and you cannot restore the cluster\n" + "from a snapshot, or you have already unsafely bootstrapped a new cluster\n" - + "by running `opensearch-node unsafe-bootstrap` on a master-eligible\n" + + "by running `opensearch-node unsafe-bootstrap` on a cluster-manager-eligible\n" + "node that belonged to the same cluster as this node. This tool can cause\n" + "arbitrary data loss and its use should be your last resort.\n" + "\n" diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java index 6d2fb99e04f86..5975e5b64214f 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java @@ -489,7 +489,7 @@ public void close(Mode newMode) { pendingAsTasks.put(task, new JoinTaskListener(task, value)); }); - final String stateUpdateSource = "elected-as-master ([" + pendingAsTasks.size() + "] nodes joined)"; + final String stateUpdateSource = "elected-as-cluster-manager ([" + pendingAsTasks.size() + "] nodes joined)"; pendingAsTasks.put(JoinTaskExecutor.newBecomeMasterTask(), (source, e) -> {}); pendingAsTasks.put(JoinTaskExecutor.newFinishElectionTask(), (source, e) -> {}); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java b/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java index 26f289f5547d6..b38b0cf0f4693 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java @@ -57,14 +57,14 @@ public class Reconfigurator { * the best resilience it makes automatic adjustments to the voting configuration as master nodes join or leave the cluster. Adjustments * that fix or increase the size of the voting configuration are always a good idea, but the wisdom of reducing the voting configuration * size is less clear. For instance, automatically reducing the voting configuration down to a single node means the cluster requires - * this node to operate, which is not resilient: if it broke we could restore every other master-eligible node in the cluster to health + * this node to operate, which is not resilient: if it broke we could restore every other cluster-manager-eligible node in the cluster to health * and still the cluster would be unavailable. However not reducing the voting configuration size can also hamper resilience: in a * five-node cluster we could lose two nodes and by reducing the voting configuration to the remaining three nodes we could tolerate the * loss of a further node before failing. * * We offer two options: either we auto-shrink the voting configuration as long as it contains more than three nodes, or we don't and we * require the user to control the voting configuration manually using the retirement API. The former, default, option, guarantees that - * as long as there have been at least three master-eligible nodes in the cluster and no more than one of them is currently unavailable, + * as long as there have been at least three cluster-manager-eligible nodes in the cluster and no more than one of them is currently unavailable, * then the cluster will still operate, which is what almost everyone wants. Manual control is for users who want different guarantees. */ public static final Setting CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION = Setting.boolSetting( diff --git a/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java b/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java index e61b6448f6ac9..c6c7e75497e29 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java @@ -60,15 +60,15 @@ public class UnsafeBootstrapMasterCommand extends OpenSearchNodeCommand { static final String CONFIRMATION_MSG = DELIMITER + "\n" + "You should only run this tool if you have permanently lost half or more\n" - + "of the master-eligible nodes in this cluster, and you cannot restore the\n" + + "of the cluster-manager-eligible nodes in this cluster, and you cannot restore the\n" + "cluster from a snapshot. This tool can cause arbitrary data loss and its\n" - + "use should be your last resort. If you have multiple surviving master\n" + + "use should be your last resort. If you have multiple surviving cluster-manager\n" + "eligible nodes, you should run this tool on the node with the highest\n" + "cluster state (term, version) pair.\n" + "\n" + "Do you want to proceed?\n"; - static final String NOT_MASTER_NODE_MSG = "unsafe-bootstrap tool can only be run on master eligible node"; + static final String NOT_MASTER_NODE_MSG = "unsafe-bootstrap tool can only be run on cluster-manager eligible node"; static final String EMPTY_LAST_COMMITTED_VOTING_CONFIG_MSG = "last committed voting voting configuration is empty, cluster has never been bootstrapped?"; @@ -81,7 +81,9 @@ public class UnsafeBootstrapMasterCommand extends OpenSearchNodeCommand { private OptionSpec applyClusterReadOnlyBlockOption; UnsafeBootstrapMasterCommand() { - super("Forces the successful election of the current node after the permanent loss of the half or more master-eligible nodes"); + super( + "Forces the successful election of the current node after the permanent loss of the half or more cluster-manager-eligible nodes" + ); applyClusterReadOnlyBlockOption = parser.accepts("apply-cluster-read-only-block", "Optional cluster.blocks.read_only setting") .withOptionalArg() .ofType(Boolean.class); diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java index cff1a77f4cdb7..83e35c0ee18ab 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java @@ -206,7 +206,7 @@ public Setting legacySetting() { }; /** - * Represents the role for a master-eligible node. + * Represents the role for a cluster-manager-eligible node. * @deprecated As of 2.0, because promoting inclusive language, replaced by {@link #CLUSTER_MANAGER_ROLE} */ @Deprecated diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java index 1097f3bc245ac..8d84869bc8bec 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java @@ -574,7 +574,7 @@ public List addedNodes() { public String shortSummary() { final StringBuilder summary = new StringBuilder(); if (masterNodeChanged()) { - summary.append("master node changed {previous ["); + summary.append("cluster-manager node changed {previous ["); if (previousMasterNode() != null) { summary.append(previousMasterNode()); } @@ -799,7 +799,7 @@ public boolean isLocalNodeElectedMaster() { } /** - * Check if the given name of the node role is 'cluster_manger' or 'master'. + * Check if the given name of the node role is 'cluster_manager' or 'master'. * The method is added for {@link #resolveNodes} to keep the code clear, when support the both above roles. * @deprecated As of 2.0, because promoting inclusive language. MASTER_ROLE is deprecated. * @param matchAttrName a given String for a name of the node role. diff --git a/server/src/main/java/org/opensearch/discovery/HandshakingTransportAddressConnector.java b/server/src/main/java/org/opensearch/discovery/HandshakingTransportAddressConnector.java index 906e28cbb6b51..d2e2c5fbea8ac 100644 --- a/server/src/main/java/org/opensearch/discovery/HandshakingTransportAddressConnector.java +++ b/server/src/main/java/org/opensearch/discovery/HandshakingTransportAddressConnector.java @@ -137,7 +137,9 @@ protected void innerOnResponse(DiscoveryNode remoteNode) { if (remoteNode.equals(transportService.getLocalNode())) { listener.onFailure(new ConnectTransportException(remoteNode, "local node found")); } else if (remoteNode.isMasterNode() == false) { - listener.onFailure(new ConnectTransportException(remoteNode, "non-master-eligible node found")); + listener.onFailure( + new ConnectTransportException(remoteNode, "non-cluster-manager-eligible node found") + ); } else { transportService.connectToNode(remoteNode, new ActionListener() { @Override @@ -153,7 +155,8 @@ public void onResponse(Void ignored) { @Override public void onFailure(Exception e) { // we opened a connection and successfully performed a handshake, so we're definitely - // talking to a master-eligible node with a matching cluster name and a good version, + // talking to a cluster-manager-eligible node with a matching cluster name and a good + // version, // but the attempt to open a full connection to its publish address failed; a common // reason is that the remote node is listening on 0.0.0.0 but has made an inappropriate // choice for its publish address. diff --git a/server/src/main/java/org/opensearch/discovery/PeerFinder.java b/server/src/main/java/org/opensearch/discovery/PeerFinder.java index 37f07c5d56a9a..fe669e7b6d073 100644 --- a/server/src/main/java/org/opensearch/discovery/PeerFinder.java +++ b/server/src/main/java/org/opensearch/discovery/PeerFinder.java @@ -225,7 +225,7 @@ public List getLastResolvedAddresses() { public interface TransportAddressConnector { /** - * Identify the node at the given address and, if it is a master node and not the local node then establish a full connection to it. + * Identify the node at the given address and, if it is a cluster-manager node and not the local node then establish a full connection to it. */ void connectToRemoteMasterNode(TransportAddress transportAddress, ActionListener listener); } @@ -275,7 +275,7 @@ private boolean handleWakeUp() { return peersRemoved; } - logger.trace("probing master nodes from cluster state: {}", lastAcceptedNodes); + logger.trace("probing cluster-manager nodes from cluster state: {}", lastAcceptedNodes); for (ObjectCursor discoveryNodeObjectCursor : lastAcceptedNodes.getMasterNodes().values()) { startProbe(discoveryNodeObjectCursor.value.getAddress()); } @@ -381,7 +381,7 @@ void establishConnection() { transportAddressConnector.connectToRemoteMasterNode(transportAddress, new ActionListener() { @Override public void onResponse(DiscoveryNode remoteNode) { - assert remoteNode.isMasterNode() : remoteNode + " is not master-eligible"; + assert remoteNode.isMasterNode() : remoteNode + " is not cluster-manager-eligible"; assert remoteNode.equals(getLocalNode()) == false : remoteNode + " is the local node"; synchronized (mutex) { if (active == false) { diff --git a/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java b/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java index d14b7df8b747a..cb431a6a5d0de 100644 --- a/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java +++ b/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java @@ -68,7 +68,7 @@ public class NodeRepurposeCommand extends OpenSearchNodeCommand { static final String NO_SHARD_DATA_TO_CLEAN_UP_FOUND = "No shard data to clean-up found"; public NodeRepurposeCommand() { - super("Repurpose this node to another master/data role, cleaning up any excess persisted data"); + super("Repurpose this node to another cluster-manager/data role, cleaning up any excess persisted data"); } void testExecute(Terminal terminal, OptionSet options, Environment env) throws Exception { @@ -129,7 +129,7 @@ private void processNoMasterNoDataNode(Terminal terminal, Path[] dataPaths, Envi terminal.println(noMasterMessage(indexUUIDs.size(), shardDataPaths.size(), indexMetadataPaths.size())); outputHowToSeeVerboseInformation(terminal); - terminal.println("Node is being re-purposed as no-master and no-data. Clean-up of index data will be performed."); + terminal.println("Node is being re-purposed as no-cluster-manager and no-data. Clean-up of index data will be performed."); confirm(terminal, "Do you want to proceed?"); removePaths(terminal, indexPaths); // clean-up shard dirs @@ -137,7 +137,7 @@ private void processNoMasterNoDataNode(Terminal terminal, Path[] dataPaths, Envi MetadataStateFormat.deleteMetaState(dataPaths); IOUtils.rm(Stream.of(dataPaths).map(path -> path.resolve(INDICES_FOLDER)).toArray(Path[]::new)); - terminal.println("Node successfully repurposed to no-master and no-data."); + terminal.println("Node successfully repurposed to no-cluster-manager and no-data."); } private void processMasterNoDataNode(Terminal terminal, Path[] dataPaths, Environment env) throws IOException { @@ -162,12 +162,12 @@ private void processMasterNoDataNode(Terminal terminal, Path[] dataPaths, Enviro terminal.println(shardMessage(shardDataPaths.size(), indexUUIDs.size())); outputHowToSeeVerboseInformation(terminal); - terminal.println("Node is being re-purposed as master and no-data. Clean-up of shard data will be performed."); + terminal.println("Node is being re-purposed as cluster-manager and no-data. Clean-up of shard data will be performed."); confirm(terminal, "Do you want to proceed?"); removePaths(terminal, shardDataPaths); // clean-up shard dirs - terminal.println("Node successfully repurposed to master and no-data."); + terminal.println("Node successfully repurposed to cluster-manager and no-data."); } private ClusterState loadClusterState(Terminal terminal, Environment env, PersistedClusterStateService psf) throws IOException { diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java index fd978a9c8ed8b..3081c4da8f7a7 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java @@ -89,7 +89,7 @@ * * When started, ensures that this version is compatible with the state stored on disk, and performs a state upgrade if necessary. Note that * the state being loaded when constructing the instance of this class is not necessarily the state that will be used as {@link - * ClusterState#metadata()} because it might be stale or incomplete. Master-eligible nodes must perform an election to find a complete and + * ClusterState#metadata()} because it might be stale or incomplete. Cluster-manager-eligible nodes must perform an election to find a complete and * non-stale state, and master-ineligible nodes receive the real cluster state from the elected master after joining the cluster. */ public class GatewayMetaState implements Closeable { @@ -97,7 +97,7 @@ public class GatewayMetaState implements Closeable { /** * Fake node ID for a voting configuration written by a master-ineligible data node to indicate that its on-disk state is potentially * stale (since it is written asynchronously after application, rather than before acceptance). This node ID means that if the node is - * restarted as a master-eligible node then it does not win any elections until it has received a fresh cluster state. + * restarted as a cluster-manager-eligible node then it does not win any elections until it has received a fresh cluster state. */ public static final String STALE_STATE_CONFIG_NODE_ID = "STALE_STATE_CONFIG"; @@ -310,7 +310,7 @@ public void applyClusterState(ClusterChangedEvent event) { } try { - // Hack: This is to ensure that non-master-eligible Zen2 nodes always store a current term + // Hack: This is to ensure that non-cluster-manager-eligible Zen2 nodes always store a current term // that's higher than the last accepted term. // TODO: can we get rid of this hack? if (event.state().term() > incrementalClusterStateWriter.getPreviousManifest().getCurrentTerm()) { diff --git a/server/src/main/java/org/opensearch/gateway/IncrementalClusterStateWriter.java b/server/src/main/java/org/opensearch/gateway/IncrementalClusterStateWriter.java index 4c1a921e9c4ac..4933b70384960 100644 --- a/server/src/main/java/org/opensearch/gateway/IncrementalClusterStateWriter.java +++ b/server/src/main/java/org/opensearch/gateway/IncrementalClusterStateWriter.java @@ -333,7 +333,8 @@ void writeManifestAndCleanup(String reason, Manifest manifest) throws WriteState } catch (WriteStateException e) { // If the Manifest write results in a dirty WriteStateException it's not safe to roll back, removing the new metadata files, // because if the Manifest was actually written to disk and its deletion fails it will reference these new metadata files. - // On master-eligible nodes a dirty WriteStateException here is fatal to the node since we no longer really have any idea + // On cluster-manager-eligible nodes a dirty WriteStateException here is fatal to the node since we no longer really have + // any idea // what the state on disk is and the only sensible response is to start again from scratch. if (e.isDirty() == false) { rollback(); diff --git a/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java b/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java index 8ccf6375239a2..4bcd6bb9fc7a5 100644 --- a/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java @@ -108,7 +108,7 @@ import java.util.function.Supplier; /** - * Stores cluster metadata in a bare Lucene index (per data path) split across a number of documents. This is used by master-eligible nodes + * Stores cluster metadata in a bare Lucene index (per data path) split across a number of documents. This is used by cluster-manager-eligible nodes * to record the last-accepted cluster state during publication. The metadata is written incrementally where possible, leaving alone any * documents that have not changed. The index has the following fields: * diff --git a/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java b/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java index 00a5f335338c4..eaa623b53ac1c 100644 --- a/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java +++ b/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java @@ -398,7 +398,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS /** * Returns true if the cluster state change(s) require to reassign some persistent tasks. It can happen in the following - * situations: a node left or is added, the routing table changed, the master node changed, the metadata changed or the + * situations: a node left or is added, the routing table changed, the cluster-manager node changed, the metadata changed or the * persistent tasks changed. */ boolean shouldReassignPersistentTasks(final ClusterChangedEvent event) { diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java b/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java index 92ae4b69c45bc..a960cfe70aee7 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java @@ -38,7 +38,7 @@ * any {@code BlobStoreRepository} implementation must provide via its implementation of * {@link org.opensearch.repositories.blobstore.BlobStoreRepository#getBlobContainer()}.

* - *

The blob store is written to and read from by master-eligible nodes and data nodes. All metadata related to a snapshot's + *

The blob store is written to and read from by cluster-manager-eligible nodes and data nodes. All metadata related to a snapshot's * scope and health is written by the master node.

*

The data-nodes on the other hand, write the data for each individual shard but do not write any blobs outside of shard directories for * shards that they hold the primary of. For each shard, the data-node holding the shard's primary writes the actual data in form of diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequestTests.java index 8da65ba13b9cb..a92e4e4a6c536 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequestTests.java @@ -163,7 +163,7 @@ public void testResolve() { IllegalArgumentException.class, () -> makeRequestWithNodeDescriptions("not-a-node").resolveVotingConfigExclusions(clusterState) ).getMessage(), - equalTo("add voting config exclusions request for [not-a-node] matched no master-eligible nodes") + equalTo("add voting config exclusions request for [not-a-node] matched no cluster-manager-eligible nodes") ); assertWarnings(AddVotingConfigExclusionsRequest.DEPRECATION_MESSAGE); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java index a570db040a805..bff0689a153b3 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java @@ -344,7 +344,7 @@ public void testReturnsErrorIfNoMatchingNodeDescriptions() throws InterruptedExc assertThat(rootCause, instanceOf(IllegalArgumentException.class)); assertThat( rootCause.getMessage(), - equalTo("add voting config exclusions request for [not-a-node] matched no master-eligible nodes") + equalTo("add voting config exclusions request for [not-a-node] matched no cluster-manager-eligible nodes") ); assertWarnings(AddVotingConfigExclusionsRequest.DEPRECATION_MESSAGE); } @@ -368,7 +368,7 @@ public void testOnlyMatchesMasterEligibleNodes() throws InterruptedException { assertThat(rootCause, instanceOf(IllegalArgumentException.class)); assertThat( rootCause.getMessage(), - equalTo("add voting config exclusions request for [_all, master:false] matched no master-eligible nodes") + equalTo("add voting config exclusions request for [_all, master:false] matched no cluster-manager-eligible nodes") ); assertWarnings(AddVotingConfigExclusionsRequest.DEPRECATION_MESSAGE); } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java index 079b31f31f599..dd55d078fe2c6 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java @@ -705,7 +705,7 @@ public void testFailBootstrapNonMasterEligibleNodeWithSingleNodeDiscovery() { IllegalArgumentException.class, () -> new ClusterBootstrapService(settings.build(), transportService, () -> emptyList(), () -> false, vc -> fail()) ).getMessage(), - containsString("node with [discovery.type] set to [single-node] must be master-eligible") + containsString("node with [discovery.type] set to [single-node] must be cluster-manager-eligible") ); } } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java b/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java index 13cdc640008cb..391d7b0e56332 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java @@ -191,7 +191,7 @@ public void testDescriptionOnMasterIneligibleNodes() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered yet: have discovered []; discovery will continue using [] from hosts providers " + "cluster-manager not discovered yet: have discovered []; discovery will continue using [] from hosts providers " + "and [] from last-known cluster state; node term 15, last-accepted version 12 in term 4" ) ); @@ -208,7 +208,7 @@ public void testDescriptionOnMasterIneligibleNodes() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered yet: have discovered []; discovery will continue using [" + "cluster-manager not discovered yet: have discovered []; discovery will continue using [" + otherAddress + "] from hosts providers and [] from last-known cluster state; node term 16, last-accepted version 12 in term 4" ) @@ -226,7 +226,7 @@ public void testDescriptionOnMasterIneligibleNodes() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered yet: have discovered [" + "cluster-manager not discovered yet: have discovered [" + otherNode + "]; discovery will continue using [] from hosts providers " + "and [] from last-known cluster state; node term 17, last-accepted version 12 in term 4" @@ -257,7 +257,7 @@ public void testDescriptionForBWCState() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered yet: have discovered []; discovery will continue using [] from hosts providers " + "cluster-manager not discovered yet: have discovered []; discovery will continue using [] from hosts providers " + "and [] from last-known cluster state; node term 15, last-accepted version 42 in term 0" ) ); @@ -328,7 +328,7 @@ public void testDescriptionBeforeBootstrapping() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered yet, this node has not previously joined a bootstrapped cluster, and " + "cluster-manager not discovered yet, this node has not previously joined a bootstrapped cluster, and " + "[cluster.initial_cluster_manager_nodes] is empty on this node: have discovered []; " + "discovery will continue using [] from hosts providers and [" + localNode @@ -348,7 +348,7 @@ public void testDescriptionBeforeBootstrapping() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered yet, this node has not previously joined a bootstrapped cluster, and " + "cluster-manager not discovered yet, this node has not previously joined a bootstrapped cluster, and " + "[cluster.initial_cluster_manager_nodes] is empty on this node: have discovered []; " + "discovery will continue using [" + otherAddress @@ -370,7 +370,7 @@ public void testDescriptionBeforeBootstrapping() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered yet, this node has not previously joined a bootstrapped cluster, and " + "cluster-manager not discovered yet, this node has not previously joined a bootstrapped cluster, and " + "[cluster.initial_cluster_manager_nodes] is empty on this node: have discovered [" + otherNode + "]; " @@ -391,8 +391,8 @@ public void testDescriptionBeforeBootstrapping() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered yet, this node has not previously joined a bootstrapped cluster, and " - + "this node must discover master-eligible nodes [other] to bootstrap a cluster: have discovered []; " + "cluster-manager not discovered yet, this node has not previously joined a bootstrapped cluster, and " + + "this node must discover cluster-manager-eligible nodes [other] to bootstrap a cluster: have discovered []; " + "discovery will continue using [] from hosts providers and [" + localNode + "] from last-known cluster state; node term 4, last-accepted version 7 in term 4" @@ -442,7 +442,7 @@ public void testDescriptionAfterDetachCluster() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered yet and this node was detached from its previous cluster, " + "cluster-manager not discovered yet and this node was detached from its previous cluster, " + "have discovered []; " + "discovery will continue using [] from hosts providers and [" + localNode @@ -462,7 +462,7 @@ public void testDescriptionAfterDetachCluster() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered yet and this node was detached from its previous cluster, " + "cluster-manager not discovered yet and this node was detached from its previous cluster, " + "have discovered []; " + "discovery will continue using [" + otherAddress @@ -484,7 +484,7 @@ public void testDescriptionAfterDetachCluster() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered yet and this node was detached from its previous cluster, " + "cluster-manager not discovered yet and this node was detached from its previous cluster, " + "have discovered [" + otherNode + "]; " @@ -506,7 +506,7 @@ public void testDescriptionAfterDetachCluster() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered yet and this node was detached from its previous cluster, " + "cluster-manager not discovered yet and this node was detached from its previous cluster, " + "have discovered [" + yetAnotherNode + "]; " @@ -534,7 +534,7 @@ public void testDescriptionAfterBootstrapping() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered or elected yet, an election requires a node with id [otherNode], " + "cluster-manager not discovered or elected yet, an election requires a node with id [otherNode], " + "have discovered [] which is not a quorum; " + "discovery will continue using [] from hosts providers and [" + localNode @@ -554,7 +554,7 @@ public void testDescriptionAfterBootstrapping() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered or elected yet, an election requires a node with id [otherNode], " + "cluster-manager not discovered or elected yet, an election requires a node with id [otherNode], " + "have discovered [] which is not a quorum; " + "discovery will continue using [" + otherAddress @@ -576,7 +576,7 @@ public void testDescriptionAfterBootstrapping() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered or elected yet, an election requires a node with id [otherNode], " + "cluster-manager not discovered or elected yet, an election requires a node with id [otherNode], " + "have discovered [" + otherNode + "] which is a quorum; " @@ -598,7 +598,7 @@ public void testDescriptionAfterBootstrapping() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered or elected yet, an election requires a node with id [otherNode], " + "cluster-manager not discovered or elected yet, an election requires a node with id [otherNode], " + "have discovered [" + yetAnotherNode + "] which is not a quorum; " @@ -619,7 +619,7 @@ public void testDescriptionAfterBootstrapping() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered or elected yet, an election requires two nodes with ids [n1, n2], " + "cluster-manager not discovered or elected yet, an election requires two nodes with ids [n1, n2], " + "have discovered [] which is not a quorum; " + "discovery will continue using [] from hosts providers and [" + localNode @@ -638,7 +638,7 @@ public void testDescriptionAfterBootstrapping() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered or elected yet, an election requires at least 2 nodes with ids from [n1, n2, n3], " + "cluster-manager not discovered or elected yet, an election requires at least 2 nodes with ids from [n1, n2, n3], " + "have discovered [] which is not a quorum; " + "discovery will continue using [] from hosts providers and [" + localNode @@ -657,7 +657,7 @@ public void testDescriptionAfterBootstrapping() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered or elected yet, an election requires 2 nodes with ids [n1, n2], " + "cluster-manager not discovered or elected yet, an election requires 2 nodes with ids [n1, n2], " + "have discovered [] which is not a quorum; " + "discovery will continue using [] from hosts providers and [" + localNode @@ -676,7 +676,7 @@ public void testDescriptionAfterBootstrapping() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered or elected yet, an election requires at least 3 nodes with ids from [n1, n2, n3, n4], " + "cluster-manager not discovered or elected yet, an election requires at least 3 nodes with ids from [n1, n2, n3, n4], " + "have discovered [] which is not a quorum; " + "discovery will continue using [] from hosts providers and [" + localNode @@ -695,7 +695,7 @@ public void testDescriptionAfterBootstrapping() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered or elected yet, an election requires at least 3 nodes with ids from [n1, n2, n3, n4, n5], " + "cluster-manager not discovered or elected yet, an election requires at least 3 nodes with ids from [n1, n2, n3, n4, n5], " + "have discovered [] which is not a quorum; " + "discovery will continue using [] from hosts providers and [" + localNode @@ -714,7 +714,7 @@ public void testDescriptionAfterBootstrapping() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered or elected yet, an election requires at least 3 nodes with ids from [n1, n2, n3, n4], " + "cluster-manager not discovered or elected yet, an election requires at least 3 nodes with ids from [n1, n2, n3, n4], " + "have discovered [] which is not a quorum; " + "discovery will continue using [] from hosts providers and [" + localNode @@ -733,7 +733,7 @@ public void testDescriptionAfterBootstrapping() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered or elected yet, an election requires 3 nodes with ids [n1, n2, n3], " + "cluster-manager not discovered or elected yet, an election requires 3 nodes with ids [n1, n2, n3], " + "have discovered [] which is not a quorum; " + "discovery will continue using [] from hosts providers and [" + localNode @@ -752,7 +752,7 @@ public void testDescriptionAfterBootstrapping() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered or elected yet, an election requires a node with id [n1], " + "cluster-manager not discovered or elected yet, an election requires a node with id [n1], " + "have discovered [] which is not a quorum; " + "discovery will continue using [] from hosts providers and [" + localNode @@ -771,7 +771,7 @@ public void testDescriptionAfterBootstrapping() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered or elected yet, an election requires a node with id [n1] and a node with id [n2], " + "cluster-manager not discovered or elected yet, an election requires a node with id [n1] and a node with id [n2], " + "have discovered [] which is not a quorum; " + "discovery will continue using [] from hosts providers and [" + localNode @@ -790,7 +790,7 @@ public void testDescriptionAfterBootstrapping() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered or elected yet, an election requires a node with id [n1] and two nodes with ids [n2, n3], " + "cluster-manager not discovered or elected yet, an election requires a node with id [n1] and two nodes with ids [n2, n3], " + "have discovered [] which is not a quorum; " + "discovery will continue using [] from hosts providers and [" + localNode @@ -809,7 +809,7 @@ public void testDescriptionAfterBootstrapping() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered or elected yet, an election requires a node with id [n1] and " + "cluster-manager not discovered or elected yet, an election requires a node with id [n1] and " + "at least 2 nodes with ids from [n2, n3, n4], " + "have discovered [] which is not a quorum; " + "discovery will continue using [] from hosts providers and [" @@ -859,7 +859,7 @@ public void testDescriptionAfterBootstrapping() { // nodes from last-known cluster state could be in either order is( oneOf( - "master not discovered or elected yet, an election requires two nodes with ids [n1, n2], " + "cluster-manager not discovered or elected yet, an election requires two nodes with ids [n1, n2], " + "have discovered [] which is not a quorum; " + "discovery will continue using [] from hosts providers and [" + localNode @@ -867,7 +867,7 @@ public void testDescriptionAfterBootstrapping() { + otherMasterNode + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0", - "master not discovered or elected yet, an election requires two nodes with ids [n1, n2], " + "cluster-manager not discovered or elected yet, an election requires two nodes with ids [n1, n2], " + "have discovered [] which is not a quorum; " + "discovery will continue using [] from hosts providers and [" + otherMasterNode @@ -889,8 +889,8 @@ public void testDescriptionAfterBootstrapping() { new StatusInfo(HEALTHY, "healthy-info") ).getDescription(), is( - "master not discovered or elected yet, an election requires one or more nodes that have already participated as " - + "master-eligible nodes in the cluster but this node was not master-eligible the last time it joined the cluster, " + "cluster-manager not discovered or elected yet, an election requires one or more nodes that have already participated as " + + "cluster-manager-eligible nodes in the cluster but this node was not cluster-manager-eligible the last time it joined the cluster, " + "have discovered [] which is not a quorum; " + "discovery will continue using [] from hosts providers and [" + localNode diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java index 1cdea588564c4..f43d6ff4e6c02 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java @@ -109,7 +109,7 @@ public class CoordinatorTests extends AbstractCoordinatorTestCase { /** * This test was added to verify that state recovery is properly reset on a node after it has become master and successfully * recovered a state (see {@link GatewayService}). The situation which triggers this with a decent likelihood is as follows: - * 3 master-eligible nodes (leader, follower1, follower2), the followers are shut down (leader remains), when followers come back + * 3 cluster-manager-eligible nodes (leader, follower1, follower2), the followers are shut down (leader remains), when followers come back * one of them becomes leader and publishes first state (with STATE_NOT_RECOVERED_BLOCK) to old leader, which accepts it. * Old leader is initiating an election at the same time, and wins election. It becomes leader again, but as it previously * successfully completed state recovery, is never reset to a state where state recovery can be retried. @@ -1558,7 +1558,9 @@ public void match(LogEvent event) { final String message = event.getMessage().getFormattedMessage(); assertThat( message, - startsWith("master not discovered or elected yet, an election requires at least 2 nodes with ids from [") + startsWith( + "cluster-manager not discovered or elected yet, an election requires at least 2 nodes with ids from [" + ) ); final List matchingNodes = cluster.clusterNodes.stream() @@ -1729,7 +1731,7 @@ public void testDoesNotPerformElectionWhenRestartingFollower() { if (cluster.clusterNodes.stream().filter(n -> n.getLocalNode().isMasterNode()).count() == 2) { // in the 2-node case, auto-shrinking the voting configuration is required to reduce the voting configuration down to just - // the leader, otherwise restarting the other master-eligible node triggers an election + // the leader, otherwise restarting the other cluster-manager-eligible node triggers an election leader.submitSetAutoShrinkVotingConfiguration(true); cluster.stabilise(2 * DEFAULT_CLUSTER_STATE_UPDATE_DELAY); // 1st delay for the setting update, 2nd for the reconfiguration } diff --git a/server/src/test/java/org/opensearch/discovery/AbstractDisruptionTestCase.java b/server/src/test/java/org/opensearch/discovery/AbstractDisruptionTestCase.java index 5a61300caa89e..e690770b3d0a5 100644 --- a/server/src/test/java/org/opensearch/discovery/AbstractDisruptionTestCase.java +++ b/server/src/test/java/org/opensearch/discovery/AbstractDisruptionTestCase.java @@ -167,7 +167,7 @@ void assertNoMaster(final String node, @Nullable final ClusterBlock expectedBloc assertBusy(() -> { ClusterState state = getNodeClusterState(node); final DiscoveryNodes nodes = state.nodes(); - assertNull("node [" + node + "] still has [" + nodes.getMasterNode() + "] as master", nodes.getMasterNode()); + assertNull("node [" + node + "] still has [" + nodes.getMasterNode() + "] as cluster-manager", nodes.getMasterNode()); if (expectedBlocks != null) { for (ClusterBlockLevel level : expectedBlocks.levels()) { assertTrue( diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java index 9841daa5f81b7..6617102c12ffc 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -321,7 +321,7 @@ class Cluster implements Releasable { ); logger.info( - "--> creating cluster of {} nodes (master-eligible nodes: {}) with initial configuration {}", + "--> creating cluster of {} nodes (cluster-manager-eligible nodes: {}) with initial configuration {}", initialNodeCount, masterEligibleNodeIds, initialConfiguration diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index 3a28ec2efdd4b..a7c819609c619 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -1121,7 +1121,7 @@ private synchronized void reset(boolean wipeData) throws IOException { } assertTrue( - "expected at least one master-eligible node left in " + nodes, + "expected at least one cluster-manager-eligible node left in " + nodes, nodes.isEmpty() || nodes.values().stream().anyMatch(NodeAndClient::isMasterEligible) ); @@ -1848,7 +1848,8 @@ private void restartNode(NodeAndClient nodeAndClient, RestartCallback callback) publishNode(nodeAndClient); if (callback.validateClusterForming() || excludedNodeIds.isEmpty() == false) { - // we have to validate cluster size to ensure that the restarted node has rejoined the cluster if it was master-eligible; + // we have to validate cluster size to ensure that the restarted node has rejoined the cluster if it was + // cluster-manager-eligible; validateClusterFormed(); } } @@ -1999,7 +2000,7 @@ public synchronized Set nodesInclude(String index) { /** * Performs cluster bootstrap when node with index {@link #bootstrapMasterNodeIndex} is started - * with the names of all existing and new master-eligible nodes. + * with the names of all existing and new cluster-manager-eligible nodes. * Indexing starts from 0. * If {@link #bootstrapMasterNodeIndex} is -1 (default), this method does nothing. */ From 908682d437ec744395030fe8c3f01973f596de20 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 25 Mar 2022 14:45:34 -0400 Subject: [PATCH 007/653] Enable merge on refresh and merge on commit on Opensearch (#2535) Enables merge on refresh and merge on commit in Opensearch by way of two new index options: index.merge_on_flush.max_full_flush_merge_wait_time and index.merge_on_flush.enabled. Default merge_on_flush is disabled and wait time is 10s. Signed-off-by: Andriy Redko --- .../common/settings/IndexScopedSettings.java | 2 + .../org/opensearch/index/IndexSettings.java | 50 +++++ .../index/engine/InternalEngine.java | 16 ++ .../index/engine/InternalEngineTests.java | 206 ++++++++++++++++++ 4 files changed, 274 insertions(+) diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 4c7b3fe25296e..528d6cc9f5e23 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -187,6 +187,8 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.FINAL_PIPELINE, MetadataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING, ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING, + IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED, + IndexSettings.INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME, // validate that built-in similarities don't get redefined Setting.groupSetting("index.similarity.", (s) -> { diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 45d9a57442049..aa69417af1897 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -503,6 +503,27 @@ public final class IndexSettings { Setting.Property.IndexScope ); + /** + * Expert: sets the amount of time to wait for merges (during {@link org.apache.lucene.index.IndexWriter#commit} + * or {@link org.apache.lucene.index.IndexWriter#getReader(boolean, boolean)}) returned by MergePolicy.findFullFlushMerges(...). + * If this time is reached, we proceed with the commit based on segments merged up to that point. The merges are not + * aborted, and will still run to completion independent of the commit or getReader call, like natural segment merges. + */ + public static final Setting INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME = Setting.timeSetting( + "index.merge_on_flush.max_full_flush_merge_wait_time", + new TimeValue(10, TimeUnit.SECONDS), + new TimeValue(0, TimeUnit.MILLISECONDS), + Property.Dynamic, + Property.IndexScope + ); + + public static final Setting INDEX_MERGE_ON_FLUSH_ENABLED = Setting.boolSetting( + "index.merge_on_flush.enabled", + false, + Property.IndexScope, + Property.Dynamic + ); + private final Index index; private final Version version; private final Logger logger; @@ -584,6 +605,15 @@ private void setRetentionLeaseMillis(final TimeValue retentionLease) { */ private volatile int maxRegexLength; + /** + * The max amount of time to wait for merges + */ + private volatile TimeValue maxFullFlushMergeWaitTime; + /** + * Is merge of flush enabled or not + */ + private volatile boolean mergeOnFlushEnabled; + /** * Returns the default search fields for this index. */ @@ -696,6 +726,8 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti mappingTotalFieldsLimit = scopedSettings.get(INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING); mappingDepthLimit = scopedSettings.get(INDEX_MAPPING_DEPTH_LIMIT_SETTING); mappingFieldNameLengthLimit = scopedSettings.get(INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING); + maxFullFlushMergeWaitTime = scopedSettings.get(INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME); + mergeOnFlushEnabled = scopedSettings.get(INDEX_MERGE_ON_FLUSH_ENABLED); scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, mergePolicyConfig::setNoCFSRatio); scopedSettings.addSettingsUpdateConsumer( @@ -765,6 +797,8 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti scopedSettings.addSettingsUpdateConsumer(INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING, this::setMappingTotalFieldsLimit); scopedSettings.addSettingsUpdateConsumer(INDEX_MAPPING_DEPTH_LIMIT_SETTING, this::setMappingDepthLimit); scopedSettings.addSettingsUpdateConsumer(INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING, this::setMappingFieldNameLengthLimit); + scopedSettings.addSettingsUpdateConsumer(INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME, this::setMaxFullFlushMergeWaitTime); + scopedSettings.addSettingsUpdateConsumer(INDEX_MERGE_ON_FLUSH_ENABLED, this::setMergeOnFlushEnabled); } private void setSearchIdleAfter(TimeValue searchIdleAfter) { @@ -1328,4 +1362,20 @@ public long getMappingFieldNameLengthLimit() { private void setMappingFieldNameLengthLimit(long value) { this.mappingFieldNameLengthLimit = value; } + + private void setMaxFullFlushMergeWaitTime(TimeValue timeValue) { + this.maxFullFlushMergeWaitTime = timeValue; + } + + private void setMergeOnFlushEnabled(boolean enabled) { + this.mergeOnFlushEnabled = enabled; + } + + public TimeValue getMaxFullFlushMergeWaitTime() { + return this.maxFullFlushMergeWaitTime; + } + + public boolean isMergeOnFlushEnabled() { + return mergeOnFlushEnabled; + } } diff --git a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java index 84090047d68e8..6bef118e0b61f 100644 --- a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java @@ -50,6 +50,7 @@ import org.apache.lucene.index.ShuffleForcedMergePolicy; import org.apache.lucene.index.SoftDeletesRetentionMergePolicy; import org.apache.lucene.index.Term; +import org.apache.lucene.sandbox.index.MergeOnFlushMergePolicy; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.DocIdSetIterator; @@ -2425,6 +2426,21 @@ private IndexWriterConfig getIndexWriterConfig() { // to enable it. mergePolicy = new ShuffleForcedMergePolicy(mergePolicy); } + + if (config().getIndexSettings().isMergeOnFlushEnabled()) { + final long maxFullFlushMergeWaitMillis = config().getIndexSettings().getMaxFullFlushMergeWaitTime().millis(); + if (maxFullFlushMergeWaitMillis > 0) { + iwc.setMaxFullFlushMergeWaitMillis(maxFullFlushMergeWaitMillis); + mergePolicy = new MergeOnFlushMergePolicy(mergePolicy); + } else { + logger.warn( + "The {} is enabled but {} is set to 0, merge on flush will not be activated", + IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED.getKey(), + IndexSettings.INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME.getKey() + ); + } + } + iwc.setMergePolicy(new OpenSearchMergePolicy(mergePolicy)); iwc.setSimilarity(engineConfig.getSimilarity()); iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().getMbFrac()); diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java index 361013149578e..c33adf3bcb558 100644 --- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java @@ -494,6 +494,212 @@ public void testSegments() throws Exception { } } + public void testMergeSegmentsOnCommitIsDisabled() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + + final Settings.Builder settings = Settings.builder() + .put(defaultSettings.getSettings()) + .put(IndexSettings.INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME.getKey(), TimeValue.timeValueMillis(0)) + .put(IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED.getKey(), true); + final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build(); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata); + + try ( + Store store = createStore(); + InternalEngine engine = createEngine( + config(indexSettings, store, createTempDir(), NoMergePolicy.INSTANCE, null, null, globalCheckpoint::get) + ) + ) { + assertThat(engine.segments(false), empty()); + int numDocsFirstSegment = randomIntBetween(5, 50); + Set liveDocsFirstSegment = new HashSet<>(); + for (int i = 0; i < numDocsFirstSegment; i++) { + String id = Integer.toString(i); + ParsedDocument doc = testParsedDocument(id, null, testDocument(), B_1, null); + engine.index(indexForDoc(doc)); + liveDocsFirstSegment.add(id); + } + engine.refresh("test"); + List segments = engine.segments(randomBoolean()); + assertThat(segments, hasSize(1)); + assertThat(segments.get(0).getNumDocs(), equalTo(liveDocsFirstSegment.size())); + assertThat(segments.get(0).getDeletedDocs(), equalTo(0)); + assertFalse(segments.get(0).committed); + int deletes = 0; + int updates = 0; + int appends = 0; + int iterations = scaledRandomIntBetween(1, 50); + for (int i = 0; i < iterations && liveDocsFirstSegment.isEmpty() == false; i++) { + String idToUpdate = randomFrom(liveDocsFirstSegment); + liveDocsFirstSegment.remove(idToUpdate); + ParsedDocument doc = testParsedDocument(idToUpdate, null, testDocument(), B_1, null); + if (randomBoolean()) { + engine.delete(new Engine.Delete(doc.id(), newUid(doc), primaryTerm.get())); + deletes++; + } else { + engine.index(indexForDoc(doc)); + updates++; + } + if (randomBoolean()) { + engine.index(indexForDoc(testParsedDocument(UUIDs.randomBase64UUID(), null, testDocument(), B_1, null))); + appends++; + } + } + + boolean committed = randomBoolean(); + if (committed) { + engine.flush(); + } + + engine.refresh("test"); + segments = engine.segments(randomBoolean()); + + assertThat(segments, hasSize(2)); + assertThat(segments, hasSize(2)); + assertThat(segments.get(0).getNumDocs(), equalTo(liveDocsFirstSegment.size())); + assertThat(segments.get(0).getDeletedDocs(), equalTo(updates + deletes)); + assertThat(segments.get(0).committed, equalTo(committed)); + + assertThat(segments.get(1).getNumDocs(), equalTo(updates + appends)); + assertThat(segments.get(1).getDeletedDocs(), equalTo(deletes)); // delete tombstones + assertThat(segments.get(1).committed, equalTo(committed)); + } + } + + public void testMergeSegmentsOnCommit() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + + final Settings.Builder settings = Settings.builder() + .put(defaultSettings.getSettings()) + .put(IndexSettings.INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME.getKey(), TimeValue.timeValueMillis(5000)) + .put(IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED.getKey(), true); + final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build(); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata); + + try ( + Store store = createStore(); + InternalEngine engine = createEngine( + config(indexSettings, store, createTempDir(), NoMergePolicy.INSTANCE, null, null, globalCheckpoint::get) + ) + ) { + assertThat(engine.segments(false), empty()); + int numDocsFirstSegment = randomIntBetween(5, 50); + Set liveDocsFirstSegment = new HashSet<>(); + for (int i = 0; i < numDocsFirstSegment; i++) { + String id = Integer.toString(i); + ParsedDocument doc = testParsedDocument(id, null, testDocument(), B_1, null); + engine.index(indexForDoc(doc)); + liveDocsFirstSegment.add(id); + } + engine.refresh("test"); + List segments = engine.segments(randomBoolean()); + assertThat(segments, hasSize(1)); + assertThat(segments.get(0).getNumDocs(), equalTo(liveDocsFirstSegment.size())); + assertThat(segments.get(0).getDeletedDocs(), equalTo(0)); + assertFalse(segments.get(0).committed); + int deletes = 0; + int updates = 0; + int appends = 0; + int iterations = scaledRandomIntBetween(1, 50); + for (int i = 0; i < iterations && liveDocsFirstSegment.isEmpty() == false; i++) { + String idToUpdate = randomFrom(liveDocsFirstSegment); + liveDocsFirstSegment.remove(idToUpdate); + ParsedDocument doc = testParsedDocument(idToUpdate, null, testDocument(), B_1, null); + if (randomBoolean()) { + engine.delete(new Engine.Delete(doc.id(), newUid(doc), primaryTerm.get())); + deletes++; + } else { + engine.index(indexForDoc(doc)); + updates++; + } + if (randomBoolean()) { + engine.index(indexForDoc(testParsedDocument(UUIDs.randomBase64UUID(), null, testDocument(), B_1, null))); + appends++; + } + } + + boolean committed = randomBoolean(); + if (committed) { + engine.flush(); + } + + engine.refresh("test"); + segments = engine.segments(randomBoolean()); + + // All segments have to be merged into one + assertThat(segments, hasSize(1)); + assertThat(segments.get(0).getNumDocs(), equalTo(numDocsFirstSegment + appends - deletes)); + assertThat(segments.get(0).getDeletedDocs(), equalTo(0)); + assertThat(segments.get(0).committed, equalTo(committed)); + } + } + + // this test writes documents to the engine while concurrently flushing/commit + public void testConcurrentMergeSegmentsOnCommit() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + + final Settings.Builder settings = Settings.builder() + .put(defaultSettings.getSettings()) + .put(IndexSettings.INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME.getKey(), TimeValue.timeValueMillis(5000)) + .put(IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED.getKey(), true); + final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build(); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata); + + try ( + Store store = createStore(); + InternalEngine engine = createEngine( + config(indexSettings, store, createTempDir(), NoMergePolicy.INSTANCE, null, null, globalCheckpoint::get) + ) + ) { + final int numIndexingThreads = scaledRandomIntBetween(3, 8); + final int numDocsPerThread = randomIntBetween(500, 1000); + final CyclicBarrier barrier = new CyclicBarrier(numIndexingThreads + 1); + final List indexingThreads = new ArrayList<>(); + final CountDownLatch doneLatch = new CountDownLatch(numIndexingThreads); + // create N indexing threads to index documents simultaneously + for (int threadNum = 0; threadNum < numIndexingThreads; threadNum++) { + final int threadIdx = threadNum; + Thread indexingThread = new Thread(() -> { + try { + barrier.await(); // wait for all threads to start at the same time + // index random number of docs + for (int i = 0; i < numDocsPerThread; i++) { + final String id = "thread" + threadIdx + "#" + i; + ParsedDocument doc = testParsedDocument(id, null, testDocument(), B_1, null); + engine.index(indexForDoc(doc)); + } + } catch (Exception e) { + throw new RuntimeException(e); + } finally { + doneLatch.countDown(); + } + + }); + indexingThreads.add(indexingThread); + } + + // start the indexing threads + for (Thread thread : indexingThreads) { + thread.start(); + } + barrier.await(); // wait for indexing threads to all be ready to start + assertThat(doneLatch.await(10, TimeUnit.SECONDS), is(true)); + + boolean committed = randomBoolean(); + if (committed) { + engine.flush(); + } + + engine.refresh("test"); + List segments = engine.segments(randomBoolean()); + + // All segments have to be merged into one + assertThat(segments, hasSize(1)); + assertThat(segments.get(0).getNumDocs(), equalTo(numIndexingThreads * numDocsPerThread)); + assertThat(segments.get(0).committed, equalTo(committed)); + } + } + public void testCommitStats() throws IOException { final AtomicLong maxSeqNo = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); final AtomicLong localCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); From d4ce87bddc409beddf81f5ca583ec120c890edda Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 25 Mar 2022 17:53:16 -0400 Subject: [PATCH 008/653] Fix build-tools/reaper source/target compatibility to be JDK-11 (#2596) Signed-off-by: Andriy Redko --- buildSrc/reaper/build.gradle | 3 +++ 1 file changed, 3 insertions(+) diff --git a/buildSrc/reaper/build.gradle b/buildSrc/reaper/build.gradle index d5e8d6ebc7099..4ccbec894e30e 100644 --- a/buildSrc/reaper/build.gradle +++ b/buildSrc/reaper/build.gradle @@ -11,6 +11,9 @@ apply plugin: 'java' +targetCompatibility = JavaVersion.VERSION_11 +sourceCompatibility = JavaVersion.VERSION_11 + jar { archiveFileName = "${project.name}.jar" manifest { From f1d35d028ffc807303c75ba677f4c2cff9835041 Mon Sep 17 00:00:00 2001 From: Owais Kazi Date: Fri, 25 Mar 2022 16:45:06 -0700 Subject: [PATCH 009/653] Added jenkinsfile to run gradle check in OpenSearch (#2166) * Added jenkinsfile for gradle check Signed-off-by: Owais Kazi * Added jenkinsfile to run gradle check Signed-off-by: Owais Kazi * PR comment Signed-off-by: Owais Kazi --- jenkins/jenkinsfile | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 jenkins/jenkinsfile diff --git a/jenkins/jenkinsfile b/jenkins/jenkinsfile new file mode 100644 index 0000000000000..113cb27c4a610 --- /dev/null +++ b/jenkins/jenkinsfile @@ -0,0 +1,32 @@ +pipeline { + agent { + docker { + label 'AL2-X64' + /* See + https://github.com/opensearch-project/opensearch-build/blob/main/docker/ci/dockerfiles/build.ubuntu18.opensearch.x64.dockerfile + for docker image + */ + image 'opensearchstaging/ci-runner:ci-runner-ubuntu1804-build-v1' + alwaysPull true + } + } + + environment { + JAVA11_HOME="/opt/java/openjdk-11" + JAVA14_HOME="/opt/java/openjdk-14" + JAVA17_HOME="/opt/java/openjdk-17" + JAVA8_HOME="/opt/java/openjdk-8" + JAVA_HOME="/opt/java/openjdk-14" + } + + stages { + stage('gradle-check') { + steps { + script { + sh 'echo gradle check' + sh './gradlew check --no-daemon --no-scan' + } + } + } + } +} From 5dd75bb0aa4cb64c46e99812b5fd9422588067e6 Mon Sep 17 00:00:00 2001 From: Vacha Shah Date: Mon, 28 Mar 2022 09:32:18 -0700 Subject: [PATCH 010/653] Removing SLM check in tests for OpenSearch versions (#2604) Signed-off-by: Vacha Shah --- .../java/org/opensearch/test/rest/OpenSearchRestTestCase.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java index 27369e79e5dee..9624a9d3d0554 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java @@ -531,7 +531,8 @@ protected boolean waitForAllSnapshotsWiped() { private void wipeCluster() throws Exception { // Clean up SLM policies before trying to wipe snapshots so that no new ones get started by SLM after wiping - if (nodeVersions.first().onOrAfter(LegacyESVersion.V_7_4_0)) { // SLM was introduced in version 7.4 + if (nodeVersions.first().onOrAfter(LegacyESVersion.V_7_4_0) && nodeVersions.first().before(Version.V_1_0_0)) { // SLM was introduced + // in version 7.4 if (preserveSLMPoliciesUponCompletion() == false) { // Clean up SLM policies before trying to wipe snapshots so that no new ones get started by SLM after wiping deleteAllSLMPolicies(); From 8b997c1d84fcc3ee99dec915982bdd642a124ea3 Mon Sep 17 00:00:00 2001 From: Sruti Parthiban Date: Mon, 28 Mar 2022 10:00:59 -0700 Subject: [PATCH 011/653] Add resource stats to task framework (#2089) * Add resource stats to task framework Signed-off-by: sruti1312 * Update thread resource info and add tests Signed-off-by: sruti1312 --- .../org/opensearch/client/tasks/TaskInfo.java | 18 +- .../core/tasks/GetTaskResponseTests.java | 18 +- .../tasks/CancelTasksResponseTests.java | 3 +- .../TransportRethrottleActionTests.java | 6 +- .../admin/cluster/node/tasks/TasksIT.java | 3 +- .../rest/action/cat/RestTasksAction.java | 2 + .../org/opensearch/tasks/ResourceStats.java | 28 ++++ .../opensearch/tasks/ResourceStatsType.java | 32 ++++ .../opensearch/tasks/ResourceUsageInfo.java | 108 ++++++++++++ .../opensearch/tasks/ResourceUsageMetric.java | 27 +++ .../main/java/org/opensearch/tasks/Task.java | 155 +++++++++++++++++- .../java/org/opensearch/tasks/TaskInfo.java | 39 ++++- .../opensearch/tasks/TaskResourceStats.java | 106 ++++++++++++ .../opensearch/tasks/TaskResourceUsage.java | 105 ++++++++++++ .../opensearch/tasks/ThreadResourceInfo.java | 54 ++++++ .../admin/cluster/node/tasks/TaskTests.java | 84 +++++++++- .../tasks/CancelTasksResponseTests.java | 2 +- .../tasks/ListTasksResponseTests.java | 18 +- .../org/opensearch/tasks/TaskInfoTests.java | 79 +++++++-- 19 files changed, 844 insertions(+), 43 deletions(-) create mode 100644 server/src/main/java/org/opensearch/tasks/ResourceStats.java create mode 100644 server/src/main/java/org/opensearch/tasks/ResourceStatsType.java create mode 100644 server/src/main/java/org/opensearch/tasks/ResourceUsageInfo.java create mode 100644 server/src/main/java/org/opensearch/tasks/ResourceUsageMetric.java create mode 100644 server/src/main/java/org/opensearch/tasks/TaskResourceStats.java create mode 100644 server/src/main/java/org/opensearch/tasks/TaskResourceUsage.java create mode 100644 server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskInfo.java b/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskInfo.java index de8374b283ea6..375f004dc3052 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskInfo.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskInfo.java @@ -57,6 +57,7 @@ public class TaskInfo { private TaskId parentTaskId; private final Map status = new HashMap<>(); private final Map headers = new HashMap<>(); + private final Map resourceStats = new HashMap<>(); public TaskInfo(TaskId taskId) { this.taskId = taskId; @@ -150,6 +151,14 @@ public Map getStatus() { return status; } + void setResourceStats(Map resourceStats) { + this.resourceStats.putAll(resourceStats); + } + + public Map getResourceStats() { + return resourceStats; + } + private void noOpParse(Object s) {} public static final ObjectParser.NamedObjectParser PARSER; @@ -170,6 +179,7 @@ private void noOpParse(Object s) {} parser.declareBoolean(TaskInfo::setCancelled, new ParseField("cancelled")); parser.declareString(TaskInfo::setParentTaskId, new ParseField("parent_task_id")); parser.declareObject(TaskInfo::setHeaders, (p, c) -> p.mapStrings(), new ParseField("headers")); + parser.declareObject(TaskInfo::setResourceStats, (p, c) -> p.map(), new ParseField("resource_stats")); PARSER = (XContentParser p, Void v, String name) -> parser.parse(p, new TaskInfo(new TaskId(name)), null); } @@ -188,7 +198,8 @@ && isCancelled() == taskInfo.isCancelled() && Objects.equals(getDescription(), taskInfo.getDescription()) && Objects.equals(getParentTaskId(), taskInfo.getParentTaskId()) && Objects.equals(status, taskInfo.status) - && Objects.equals(getHeaders(), taskInfo.getHeaders()); + && Objects.equals(getHeaders(), taskInfo.getHeaders()) + && Objects.equals(getResourceStats(), taskInfo.getResourceStats()); } @Override @@ -204,7 +215,8 @@ public int hashCode() { isCancelled(), getParentTaskId(), status, - getHeaders() + getHeaders(), + getResourceStats() ); } @@ -236,6 +248,8 @@ public String toString() { + status + ", headers=" + headers + + ", resource_stats=" + + resourceStats + '}'; } } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/core/tasks/GetTaskResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/core/tasks/GetTaskResponseTests.java index 403e295303784..07ee0bedd4777 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/core/tasks/GetTaskResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/core/tasks/GetTaskResponseTests.java @@ -38,6 +38,8 @@ import org.opensearch.common.xcontent.ToXContent; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.tasks.RawTaskStatus; +import org.opensearch.tasks.TaskResourceStats; +import org.opensearch.tasks.TaskResourceUsage; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskId; import org.opensearch.tasks.TaskInfo; @@ -45,6 +47,7 @@ import java.io.IOException; import java.util.Collections; +import java.util.HashMap; import java.util.Map; import static org.opensearch.test.AbstractXContentTestCase.xContentTester; @@ -57,7 +60,7 @@ public void testFromXContent() throws IOException { ) .assertEqualsConsumer(this::assertEqualInstances) .assertToXContentEquivalence(true) - .randomFieldsExcludeFilter(field -> field.endsWith("headers") || field.endsWith("status")) + .randomFieldsExcludeFilter(field -> field.endsWith("headers") || field.endsWith("status") || field.contains("resource_stats")) .test(); } @@ -106,7 +109,8 @@ static TaskInfo randomTaskInfo() { cancellable, cancelled, parentTaskId, - headers + headers, + randomResourceStats() ); } @@ -127,4 +131,14 @@ private static RawTaskStatus randomRawTaskStatus() { throw new IllegalStateException(e); } } + + private static TaskResourceStats randomResourceStats() { + return randomBoolean() ? null : new TaskResourceStats(new HashMap() { + { + for (int i = 0; i < randomInt(5); i++) { + put(randomAlphaOfLength(5), new TaskResourceUsage(randomNonNegativeLong(), randomNonNegativeLong())); + } + } + }); + } } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/tasks/CancelTasksResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/tasks/CancelTasksResponseTests.java index 552a3712eea40..26be36b7162f6 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/tasks/CancelTasksResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/tasks/CancelTasksResponseTests.java @@ -96,7 +96,8 @@ protected CancelTasksResponseTests.ByNodeCancelTasksResponse createServerTestIns cancellable, cancelled, new TaskId("node1", randomLong()), - Collections.singletonMap("x-header-of", "some-value") + Collections.singletonMap("x-header-of", "some-value"), + null ) ); } diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/TransportRethrottleActionTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/TransportRethrottleActionTests.java index a9e1a59b7e443..6456aa0af9aac 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/TransportRethrottleActionTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/TransportRethrottleActionTests.java @@ -131,7 +131,8 @@ public void testRethrottleSuccessfulResponse() { true, false, new TaskId("test", task.getId()), - Collections.emptyMap() + Collections.emptyMap(), + null ) ); sliceStatuses.add(new BulkByScrollTask.StatusOrException(status)); @@ -167,7 +168,8 @@ public void testRethrottleWithSomeSucceeded() { true, false, new TaskId("test", task.getId()), - Collections.emptyMap() + Collections.emptyMap(), + null ) ); sliceStatuses.add(new BulkByScrollTask.StatusOrException(status)); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java index fbac2f7dbff6e..ac0ae44eb732e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java @@ -907,7 +907,8 @@ public void testNodeNotFoundButTaskFound() throws Exception { false, false, TaskId.EMPTY_TASK_ID, - Collections.emptyMap() + Collections.emptyMap(), + null ), new RuntimeException("test") ), diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestTasksAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestTasksAction.java index b87205593ce87..a6624c2f8cfdc 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestTasksAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestTasksAction.java @@ -137,6 +137,7 @@ protected Table getTableWithHeader(final RestRequest request) { // Task detailed info if (detailed) { table.addCell("description", "default:true;alias:desc;desc:task action"); + table.addCell("resource_stats", "default:false;desc:resource consumption info of the task"); } table.endHeaders(); return table; @@ -173,6 +174,7 @@ private void buildRow(Table table, boolean fullId, boolean detailed, DiscoveryNo if (detailed) { table.addCell(taskInfo.getDescription()); + table.addCell(taskInfo.getResourceStats()); } table.endRow(); } diff --git a/server/src/main/java/org/opensearch/tasks/ResourceStats.java b/server/src/main/java/org/opensearch/tasks/ResourceStats.java new file mode 100644 index 0000000000000..aab103ad08dcf --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/ResourceStats.java @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks; + +/** + * Different resource stats are defined. + */ +public enum ResourceStats { + CPU("cpu_time_in_nanos"), + MEMORY("memory_in_bytes"); + + private final String statsName; + + ResourceStats(String statsName) { + this.statsName = statsName; + } + + @Override + public String toString() { + return statsName; + } +} diff --git a/server/src/main/java/org/opensearch/tasks/ResourceStatsType.java b/server/src/main/java/org/opensearch/tasks/ResourceStatsType.java new file mode 100644 index 0000000000000..c670ac5ba689c --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/ResourceStatsType.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks; + +/** Defines the different types of resource stats. */ +public enum ResourceStatsType { + // resource stats of the worker thread reported directly from runnable. + WORKER_STATS("worker_stats", false); + + private final String statsType; + private final boolean onlyForAnalysis; + + ResourceStatsType(String statsType, boolean onlyForAnalysis) { + this.statsType = statsType; + this.onlyForAnalysis = onlyForAnalysis; + } + + public boolean isOnlyForAnalysis() { + return onlyForAnalysis; + } + + @Override + public String toString() { + return statsType; + } +} diff --git a/server/src/main/java/org/opensearch/tasks/ResourceUsageInfo.java b/server/src/main/java/org/opensearch/tasks/ResourceUsageInfo.java new file mode 100644 index 0000000000000..ae58f712b63c2 --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/ResourceUsageInfo.java @@ -0,0 +1,108 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.util.Collections; +import java.util.EnumMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +/** + * Thread resource usage information for particular resource stats type. + *

+ * It captures the resource usage information like memory, CPU about a particular execution of thread + * for a specific stats type. + */ +public class ResourceUsageInfo { + private static final Logger logger = LogManager.getLogger(ResourceUsageInfo.class); + private final EnumMap statsInfo = new EnumMap<>(ResourceStats.class); + + public ResourceUsageInfo(ResourceUsageMetric... resourceUsageMetrics) { + for (ResourceUsageMetric resourceUsageMetric : resourceUsageMetrics) { + this.statsInfo.put(resourceUsageMetric.getStats(), new ResourceStatsInfo(resourceUsageMetric.getValue())); + } + } + + public void recordResourceUsageMetrics(ResourceUsageMetric... resourceUsageMetrics) { + for (ResourceUsageMetric resourceUsageMetric : resourceUsageMetrics) { + final ResourceStatsInfo resourceStatsInfo = statsInfo.get(resourceUsageMetric.getStats()); + if (resourceStatsInfo != null) { + updateResourceUsageInfo(resourceStatsInfo, resourceUsageMetric); + } else { + throw new IllegalStateException( + "cannot update [" + + resourceUsageMetric.getStats().toString() + + "] entry as its not present current_stats_info:" + + statsInfo + ); + } + } + } + + private void updateResourceUsageInfo(ResourceStatsInfo resourceStatsInfo, ResourceUsageMetric resourceUsageMetric) { + long currentEndValue; + long newEndValue; + do { + currentEndValue = resourceStatsInfo.endValue.get(); + newEndValue = resourceUsageMetric.getValue(); + if (currentEndValue > newEndValue) { + logger.debug( + "dropping resource usage update as the new value is lower than current value [" + + "resource_stats=[{}], " + + "current_end_value={}, " + + "new_end_value={}]", + resourceUsageMetric.getStats(), + currentEndValue, + newEndValue + ); + return; + } + } while (!resourceStatsInfo.endValue.compareAndSet(currentEndValue, newEndValue)); + logger.debug( + "updated resource usage info [resource_stats=[{}], " + "old_end_value={}, new_end_value={}]", + resourceUsageMetric.getStats(), + currentEndValue, + newEndValue + ); + } + + public Map getStatsInfo() { + return Collections.unmodifiableMap(statsInfo); + } + + @Override + public String toString() { + return statsInfo.toString(); + } + + /** + * Defines resource stats information. + */ + static class ResourceStatsInfo { + private final long startValue; + private final AtomicLong endValue; + + private ResourceStatsInfo(long startValue) { + this.startValue = startValue; + this.endValue = new AtomicLong(startValue); + } + + public long getTotalValue() { + return endValue.get() - startValue; + } + + @Override + public String toString() { + return String.valueOf(getTotalValue()); + } + } +} diff --git a/server/src/main/java/org/opensearch/tasks/ResourceUsageMetric.java b/server/src/main/java/org/opensearch/tasks/ResourceUsageMetric.java new file mode 100644 index 0000000000000..0d13ffe6ec01a --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/ResourceUsageMetric.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks; + +public class ResourceUsageMetric { + private final ResourceStats stats; + private final long value; + + public ResourceUsageMetric(ResourceStats stats, long value) { + this.stats = stats; + this.value = value; + } + + public ResourceStats getStats() { + return stats; + } + + public long getValue() { + return value; + } +} diff --git a/server/src/main/java/org/opensearch/tasks/Task.java b/server/src/main/java/org/opensearch/tasks/Task.java index ad9d5c3f04411..62453d08724ce 100644 --- a/server/src/main/java/org/opensearch/tasks/Task.java +++ b/server/src/main/java/org/opensearch/tasks/Task.java @@ -32,6 +32,8 @@ package org.opensearch.tasks; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.opensearch.action.ActionResponse; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.io.stream.NamedWriteable; @@ -39,18 +41,27 @@ import org.opensearch.common.xcontent.ToXContentObject; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; /** * Current task information */ public class Task { + private static final Logger logger = LogManager.getLogger(Task.class); + /** * The request header to mark tasks with specific ids */ public static final String X_OPAQUE_ID = "X-Opaque-Id"; + private static final String TOTAL = "total"; + private final long id; private final String type; @@ -63,6 +74,8 @@ public class Task { private final Map headers; + private final Map> resourceStats; + /** * The task's start time as a wall clock time since epoch ({@link System#currentTimeMillis()} style). */ @@ -74,7 +87,7 @@ public class Task { private final long startTimeNanos; public Task(long id, String type, String action, String description, TaskId parentTask, Map headers) { - this(id, type, action, description, parentTask, System.currentTimeMillis(), System.nanoTime(), headers); + this(id, type, action, description, parentTask, System.currentTimeMillis(), System.nanoTime(), headers, new ConcurrentHashMap<>()); } public Task( @@ -85,7 +98,8 @@ public Task( TaskId parentTask, long startTime, long startTimeNanos, - Map headers + Map headers, + ConcurrentHashMap> resourceStats ) { this.id = id; this.type = type; @@ -95,6 +109,7 @@ public Task( this.startTime = startTime; this.startTimeNanos = startTimeNanos; this.headers = headers; + this.resourceStats = resourceStats; } /** @@ -108,19 +123,48 @@ public Task( * generate data? */ public final TaskInfo taskInfo(String localNodeId, boolean detailed) { + return taskInfo(localNodeId, detailed, detailed == false); + } + + /** + * Build a version of the task status you can throw over the wire and back + * with the option to include resource stats or not. + * This method is only used during creating TaskResult to avoid storing resource information into the task index. + * + * @param excludeStats should information exclude resource stats. + * By default, detailed flag is used to control including resource information. + * But inorder to avoid storing resource stats into task index as strict mapping is enforced and breaks when adding this field. + * In the future, task-index-mapping.json can be modified to add resource stats. + */ + private TaskInfo taskInfo(String localNodeId, boolean detailed, boolean excludeStats) { String description = null; Task.Status status = null; + TaskResourceStats resourceStats = null; if (detailed) { description = getDescription(); status = getStatus(); } - return taskInfo(localNodeId, description, status); + if (excludeStats == false) { + resourceStats = new TaskResourceStats(new HashMap<>() { + { + put(TOTAL, getTotalResourceStats()); + } + }); + } + return taskInfo(localNodeId, description, status, resourceStats); } /** - * Build a proper {@link TaskInfo} for this task. + * Build a {@link TaskInfo} for this task without resource stats. */ protected final TaskInfo taskInfo(String localNodeId, String description, Status status) { + return taskInfo(localNodeId, description, status, null); + } + + /** + * Build a proper {@link TaskInfo} for this task. + */ + protected final TaskInfo taskInfo(String localNodeId, String description, Status status, TaskResourceStats resourceStats) { return new TaskInfo( new TaskId(localNodeId, getId()), getType(), @@ -132,7 +176,8 @@ protected final TaskInfo taskInfo(String localNodeId, String description, Status this instanceof CancellableTask, this instanceof CancellableTask && ((CancellableTask) this).isCancelled(), parentTask, - headers + headers, + resourceStats ); } @@ -195,6 +240,102 @@ public Status getStatus() { return null; } + /** + * Returns thread level resource consumption of the task + */ + public Map> getResourceStats() { + return Collections.unmodifiableMap(resourceStats); + } + + /** + * Returns current total resource usage of the task. + * Currently, this method is only called on demand, during get and listing of tasks. + * In the future, these values can be cached as an optimization. + */ + public TaskResourceUsage getTotalResourceStats() { + return new TaskResourceUsage(getTotalResourceUtilization(ResourceStats.CPU), getTotalResourceUtilization(ResourceStats.MEMORY)); + } + + /** + * Returns total resource consumption for a specific task stat. + */ + public long getTotalResourceUtilization(ResourceStats stats) { + long totalResourceConsumption = 0L; + for (List threadResourceInfosList : resourceStats.values()) { + for (ThreadResourceInfo threadResourceInfo : threadResourceInfosList) { + final ResourceUsageInfo.ResourceStatsInfo statsInfo = threadResourceInfo.getResourceUsageInfo().getStatsInfo().get(stats); + if (threadResourceInfo.getStatsType().isOnlyForAnalysis() == false && statsInfo != null) { + totalResourceConsumption += statsInfo.getTotalValue(); + } + } + } + return totalResourceConsumption; + } + + /** + * Adds thread's starting resource consumption information + * @param threadId ID of the thread + * @param statsType stats type + * @param resourceUsageMetrics resource consumption metrics of the thread + * @throws IllegalStateException matching active thread entry was found which is not expected. + */ + public void startThreadResourceTracking(long threadId, ResourceStatsType statsType, ResourceUsageMetric... resourceUsageMetrics) { + final List threadResourceInfoList = resourceStats.computeIfAbsent(threadId, k -> new ArrayList<>()); + // active thread entry should not be present in the list + for (ThreadResourceInfo threadResourceInfo : threadResourceInfoList) { + if (threadResourceInfo.getStatsType() == statsType && threadResourceInfo.isActive()) { + throw new IllegalStateException( + "unexpected active thread resource entry present [" + threadId + "]:[" + threadResourceInfo + "]" + ); + } + } + threadResourceInfoList.add(new ThreadResourceInfo(statsType, resourceUsageMetrics)); + } + + /** + * This method is used to update the resource consumption stats so that the data isn't too stale for long-running task. + * If active thread entry is present in the list, the entry is updated. If one is not found, it throws an exception. + * @param threadId ID of the thread + * @param statsType stats type + * @param resourceUsageMetrics resource consumption metrics of the thread + * @throws IllegalStateException if no matching active thread entry was found. + */ + public void updateThreadResourceStats(long threadId, ResourceStatsType statsType, ResourceUsageMetric... resourceUsageMetrics) { + final List threadResourceInfoList = resourceStats.get(threadId); + if (threadResourceInfoList != null) { + for (ThreadResourceInfo threadResourceInfo : threadResourceInfoList) { + // the active entry present in the list is updated + if (threadResourceInfo.getStatsType() == statsType && threadResourceInfo.isActive()) { + threadResourceInfo.recordResourceUsageMetrics(resourceUsageMetrics); + return; + } + } + } + throw new IllegalStateException("cannot update if active thread resource entry is not present"); + } + + /** + * Record the thread's final resource consumption values. + * If active thread entry is present in the list, the entry is updated. If one is not found, it throws an exception. + * @param threadId ID of the thread + * @param statsType stats type + * @param resourceUsageMetrics resource consumption metrics of the thread + * @throws IllegalStateException if no matching active thread entry was found. + */ + public void stopThreadResourceTracking(long threadId, ResourceStatsType statsType, ResourceUsageMetric... resourceUsageMetrics) { + final List threadResourceInfoList = resourceStats.get(threadId); + if (threadResourceInfoList != null) { + for (ThreadResourceInfo threadResourceInfo : threadResourceInfoList) { + if (threadResourceInfo.getStatsType() == statsType && threadResourceInfo.isActive()) { + threadResourceInfo.setActive(false); + threadResourceInfo.recordResourceUsageMetrics(resourceUsageMetrics); + return; + } + } + } + throw new IllegalStateException("cannot update final values if active thread resource entry is not present"); + } + /** * Report of the internal status of a task. These can vary wildly from task * to task because each task is implemented differently but we should try @@ -217,12 +358,12 @@ public String getHeader(String header) { } public TaskResult result(DiscoveryNode node, Exception error) throws IOException { - return new TaskResult(taskInfo(node.getId(), true), error); + return new TaskResult(taskInfo(node.getId(), true, true), error); } public TaskResult result(DiscoveryNode node, ActionResponse response) throws IOException { if (response instanceof ToXContent) { - return new TaskResult(taskInfo(node.getId(), true), (ToXContent) response); + return new TaskResult(taskInfo(node.getId(), true, true), (ToXContent) response); } else { throw new IllegalStateException("response has to implement ToXContent to be able to store the results"); } diff --git a/server/src/main/java/org/opensearch/tasks/TaskInfo.java b/server/src/main/java/org/opensearch/tasks/TaskInfo.java index cf77eaf540ee6..e6ba94a71b61d 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskInfo.java +++ b/server/src/main/java/org/opensearch/tasks/TaskInfo.java @@ -86,6 +86,8 @@ public final class TaskInfo implements Writeable, ToXContentFragment { private final Map headers; + private final TaskResourceStats resourceStats; + public TaskInfo( TaskId taskId, String type, @@ -97,7 +99,8 @@ public TaskInfo( boolean cancellable, boolean cancelled, TaskId parentTaskId, - Map headers + Map headers, + TaskResourceStats resourceStats ) { if (cancellable == false && cancelled == true) { throw new IllegalArgumentException("task cannot be cancelled"); @@ -113,11 +116,13 @@ public TaskInfo( this.cancelled = cancelled; this.parentTaskId = parentTaskId; this.headers = headers; + this.resourceStats = resourceStats; } /** * Read from a stream. */ + @SuppressWarnings("unchecked") public TaskInfo(StreamInput in) throws IOException { taskId = TaskId.readFromStream(in); type = in.readString(); @@ -137,6 +142,11 @@ public TaskInfo(StreamInput in) throws IOException { } parentTaskId = TaskId.readFromStream(in); headers = in.readMap(StreamInput::readString, StreamInput::readString); + if (in.getVersion().onOrAfter(Version.V_2_0_0)) { + resourceStats = in.readOptionalWriteable(TaskResourceStats::new); + } else { + resourceStats = null; + } } @Override @@ -154,6 +164,9 @@ public void writeTo(StreamOutput out) throws IOException { } parentTaskId.writeTo(out); out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); + if (out.getVersion().onOrAfter(Version.V_2_0_0)) { + out.writeOptionalWriteable(resourceStats); + } } public TaskId getTaskId() { @@ -226,6 +239,13 @@ public Map getHeaders() { return headers; } + /** + * Returns the task resource information + */ + public TaskResourceStats getResourceStats() { + return resourceStats; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field("node", taskId.getNodeId()); @@ -253,6 +273,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(attribute.getKey(), attribute.getValue()); } builder.endObject(); + if (resourceStats != null) { + builder.startObject("resource_stats"); + resourceStats.toXContent(builder, params); + builder.endObject(); + } return builder; } @@ -278,6 +303,8 @@ public static TaskInfo fromXContent(XContentParser parser) { // This might happen if we are reading an old version of task info headers = Collections.emptyMap(); } + @SuppressWarnings("unchecked") + TaskResourceStats resourceStats = (TaskResourceStats) a[i++]; RawTaskStatus status = statusBytes == null ? null : new RawTaskStatus(statusBytes); TaskId parentTaskId = parentTaskIdString == null ? TaskId.EMPTY_TASK_ID : new TaskId(parentTaskIdString); return new TaskInfo( @@ -291,7 +318,8 @@ public static TaskInfo fromXContent(XContentParser parser) { cancellable, cancelled, parentTaskId, - headers + headers, + resourceStats ); }); static { @@ -309,6 +337,7 @@ public static TaskInfo fromXContent(XContentParser parser) { PARSER.declareBoolean(optionalConstructorArg(), new ParseField("cancelled")); PARSER.declareString(optionalConstructorArg(), new ParseField("parent_task_id")); PARSER.declareObject(optionalConstructorArg(), (p, c) -> p.mapStrings(), new ParseField("headers")); + PARSER.declareObject(optionalConstructorArg(), (p, c) -> TaskResourceStats.fromXContent(p), new ParseField("resource_stats")); } @Override @@ -333,7 +362,8 @@ public boolean equals(Object obj) { && Objects.equals(cancellable, other.cancellable) && Objects.equals(cancelled, other.cancelled) && Objects.equals(status, other.status) - && Objects.equals(headers, other.headers); + && Objects.equals(headers, other.headers) + && Objects.equals(resourceStats, other.resourceStats); } @Override @@ -349,7 +379,8 @@ public int hashCode() { cancellable, cancelled, status, - headers + headers, + resourceStats ); } } diff --git a/server/src/main/java/org/opensearch/tasks/TaskResourceStats.java b/server/src/main/java/org/opensearch/tasks/TaskResourceStats.java new file mode 100644 index 0000000000000..c35e08ebb34ec --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/TaskResourceStats.java @@ -0,0 +1,106 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks; + +import org.opensearch.common.Strings; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.xcontent.ToXContentFragment; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * Resource information about a currently running task. + *

+ * Writeable TaskResourceStats objects are used to represent resource + * snapshot information about currently running task. + */ +public class TaskResourceStats implements Writeable, ToXContentFragment { + private final Map resourceUsage; + + public TaskResourceStats(Map resourceUsage) { + this.resourceUsage = Objects.requireNonNull(resourceUsage, "resource usage is required"); + } + + /** + * Read from a stream. + */ + public TaskResourceStats(StreamInput in) throws IOException { + resourceUsage = in.readMap(StreamInput::readString, TaskResourceUsage::readFromStream); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(resourceUsage, StreamOutput::writeString, (stream, stats) -> stats.writeTo(stream)); + } + + public Map getResourceUsageInfo() { + return resourceUsage; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + for (Map.Entry resourceUsageEntry : resourceUsage.entrySet()) { + builder.startObject(resourceUsageEntry.getKey()); + if (resourceUsageEntry.getValue() != null) { + resourceUsageEntry.getValue().toXContent(builder, params); + } + builder.endObject(); + } + return builder; + } + + public static TaskResourceStats fromXContent(XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + if (token == null) { + token = parser.nextToken(); + } + if (token == XContentParser.Token.START_OBJECT) { + token = parser.nextToken(); + } + final Map resourceStats = new HashMap<>(); + if (token == XContentParser.Token.FIELD_NAME) { + assert parser.currentToken() == XContentParser.Token.FIELD_NAME : "Expected field name but saw [" + parser.currentToken() + "]"; + do { + // Must point to field name + String fieldName = parser.currentName(); + // And then the value + TaskResourceUsage value = TaskResourceUsage.fromXContent(parser); + resourceStats.put(fieldName, value); + } while (parser.nextToken() == XContentParser.Token.FIELD_NAME); + } + return new TaskResourceStats(resourceStats); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + // Implements equals and hashcode for testing + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != TaskResourceStats.class) { + return false; + } + TaskResourceStats other = (TaskResourceStats) obj; + return Objects.equals(resourceUsage, other.resourceUsage); + } + + @Override + public int hashCode() { + return Objects.hash(resourceUsage); + } +} diff --git a/server/src/main/java/org/opensearch/tasks/TaskResourceUsage.java b/server/src/main/java/org/opensearch/tasks/TaskResourceUsage.java new file mode 100644 index 0000000000000..6af3de2b78c06 --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/TaskResourceUsage.java @@ -0,0 +1,105 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks; + +import org.opensearch.common.ParseField; +import org.opensearch.common.Strings; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.xcontent.ConstructingObjectParser; +import org.opensearch.common.xcontent.ToXContentFragment; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Task resource usage information + *

+ * Writeable TaskResourceUsage objects are used to represent resource usage + * information of running tasks. + */ +public class TaskResourceUsage implements Writeable, ToXContentFragment { + private static final ParseField CPU_TIME_IN_NANOS = new ParseField("cpu_time_in_nanos"); + private static final ParseField MEMORY_IN_BYTES = new ParseField("memory_in_bytes"); + + private final long cpuTimeInNanos; + private final long memoryInBytes; + + public TaskResourceUsage(long cpuTimeInNanos, long memoryInBytes) { + this.cpuTimeInNanos = cpuTimeInNanos; + this.memoryInBytes = memoryInBytes; + } + + /** + * Read from a stream. + */ + public static TaskResourceUsage readFromStream(StreamInput in) throws IOException { + return new TaskResourceUsage(in.readVLong(), in.readVLong()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(cpuTimeInNanos); + out.writeVLong(memoryInBytes); + } + + public long getCpuTimeInNanos() { + return cpuTimeInNanos; + } + + public long getMemoryInBytes() { + return memoryInBytes; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(CPU_TIME_IN_NANOS.getPreferredName(), cpuTimeInNanos); + builder.field(MEMORY_IN_BYTES.getPreferredName(), memoryInBytes); + return builder; + } + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "task_resource_usage", + a -> new TaskResourceUsage((Long) a[0], (Long) a[1]) + ); + + static { + PARSER.declareLong(constructorArg(), CPU_TIME_IN_NANOS); + PARSER.declareLong(constructorArg(), MEMORY_IN_BYTES); + } + + public static TaskResourceUsage fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + // Implements equals and hashcode for testing + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != TaskResourceUsage.class) { + return false; + } + TaskResourceUsage other = (TaskResourceUsage) obj; + return Objects.equals(cpuTimeInNanos, other.cpuTimeInNanos) && Objects.equals(memoryInBytes, other.memoryInBytes); + } + + @Override + public int hashCode() { + return Objects.hash(cpuTimeInNanos, memoryInBytes); + } +} diff --git a/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java b/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java new file mode 100644 index 0000000000000..8b45c38c8fb63 --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java @@ -0,0 +1,54 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks; + +/** + * Resource consumption information about a particular execution of thread. + *

+ * It captures the resource usage information about a particular execution of thread + * for a specific stats type like worker_stats or response_stats etc., + */ +public class ThreadResourceInfo { + private volatile boolean isActive = true; + private final ResourceStatsType statsType; + private final ResourceUsageInfo resourceUsageInfo; + + public ThreadResourceInfo(ResourceStatsType statsType, ResourceUsageMetric... resourceUsageMetrics) { + this.statsType = statsType; + this.resourceUsageInfo = new ResourceUsageInfo(resourceUsageMetrics); + } + + /** + * Updates thread's resource consumption information. + */ + public void recordResourceUsageMetrics(ResourceUsageMetric... resourceUsageMetrics) { + resourceUsageInfo.recordResourceUsageMetrics(resourceUsageMetrics); + } + + public void setActive(boolean isActive) { + this.isActive = isActive; + } + + public boolean isActive() { + return isActive; + } + + public ResourceStatsType getStatsType() { + return statsType; + } + + public ResourceUsageInfo getResourceUsageInfo() { + return resourceUsageInfo; + } + + @Override + public String toString() { + return resourceUsageInfo + ", stats_type=" + statsType + ", is_active=" + isActive; + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskTests.java index 5f8d5992c9f2f..45db94577f15f 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskTests.java @@ -31,16 +31,23 @@ package org.opensearch.action.admin.cluster.node.tasks; +import org.opensearch.action.search.SearchAction; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskId; import org.opensearch.tasks.TaskInfo; +import org.opensearch.tasks.ResourceUsageMetric; +import org.opensearch.tasks.ResourceStats; +import org.opensearch.tasks.ResourceStatsType; import org.opensearch.test.OpenSearchTestCase; import java.nio.charset.StandardCharsets; import java.util.Collections; import java.util.Map; +import static org.opensearch.tasks.TaskInfoTests.randomResourceStats; + public class TaskTests extends OpenSearchTestCase { public void testTaskInfoToString() { @@ -61,7 +68,8 @@ public void testTaskInfoToString() { cancellable, cancelled, TaskId.EMPTY_TASK_ID, - Collections.singletonMap("foo", "bar") + Collections.singletonMap("foo", "bar"), + randomResourceStats(randomBoolean()) ); String taskInfoString = taskInfo.toString(); Map map = XContentHelper.convertToMap(new BytesArray(taskInfoString.getBytes(StandardCharsets.UTF_8)), true).v2(); @@ -94,7 +102,8 @@ public void testCancellableOptionWhenCancelledTrue() { cancellable, cancelled, TaskId.EMPTY_TASK_ID, - Collections.singletonMap("foo", "bar") + Collections.singletonMap("foo", "bar"), + randomResourceStats(randomBoolean()) ); String taskInfoString = taskInfo.toString(); Map map = XContentHelper.convertToMap(new BytesArray(taskInfoString.getBytes(StandardCharsets.UTF_8)), true).v2(); @@ -120,7 +129,8 @@ public void testCancellableOptionWhenCancelledFalse() { cancellable, cancelled, TaskId.EMPTY_TASK_ID, - Collections.singletonMap("foo", "bar") + Collections.singletonMap("foo", "bar"), + randomResourceStats(randomBoolean()) ); String taskInfoString = taskInfo.toString(); Map map = XContentHelper.convertToMap(new BytesArray(taskInfoString.getBytes(StandardCharsets.UTF_8)), true).v2(); @@ -148,9 +158,75 @@ public void testNonCancellableOption() { cancellable, cancelled, TaskId.EMPTY_TASK_ID, - Collections.singletonMap("foo", "bar") + Collections.singletonMap("foo", "bar"), + randomResourceStats(randomBoolean()) ) ); assertEquals(e.getMessage(), "task cannot be cancelled"); } + + public void testTaskResourceStats() { + final Task task = new Task( + randomLong(), + "transport", + SearchAction.NAME, + "description", + new TaskId(randomLong() + ":" + randomLong()), + Collections.emptyMap() + ); + + long totalMemory = 0L; + long totalCPU = 0L; + + // reporting resource consumption events and checking total consumption values + for (int i = 0; i < randomInt(10); i++) { + long initial_memory = randomLongBetween(1, 100); + long initial_cpu = randomLongBetween(1, 100); + + ResourceUsageMetric[] initialTaskResourceMetrics = new ResourceUsageMetric[] { + new ResourceUsageMetric(ResourceStats.MEMORY, initial_memory), + new ResourceUsageMetric(ResourceStats.CPU, initial_cpu) }; + task.startThreadResourceTracking(i, ResourceStatsType.WORKER_STATS, initialTaskResourceMetrics); + + long memory = initial_memory + randomLongBetween(1, 10000); + long cpu = initial_cpu + randomLongBetween(1, 10000); + + totalMemory += memory - initial_memory; + totalCPU += cpu - initial_cpu; + + ResourceUsageMetric[] taskResourceMetrics = new ResourceUsageMetric[] { + new ResourceUsageMetric(ResourceStats.MEMORY, memory), + new ResourceUsageMetric(ResourceStats.CPU, cpu) }; + task.updateThreadResourceStats(i, ResourceStatsType.WORKER_STATS, taskResourceMetrics); + task.stopThreadResourceTracking(i, ResourceStatsType.WORKER_STATS); + } + assertEquals(task.getTotalResourceStats().getMemoryInBytes(), totalMemory); + assertEquals(task.getTotalResourceStats().getCpuTimeInNanos(), totalCPU); + + // updating should throw an IllegalStateException when active entry is not present. + try { + task.updateThreadResourceStats(randomInt(), ResourceStatsType.WORKER_STATS); + fail("update should not be successful as active entry is not present!"); + } catch (IllegalStateException e) { + // pass + } + + // re-adding a thread entry that is already present, should throw an exception + int threadId = randomInt(); + task.startThreadResourceTracking(threadId, ResourceStatsType.WORKER_STATS, new ResourceUsageMetric(ResourceStats.MEMORY, 100)); + try { + task.startThreadResourceTracking(threadId, ResourceStatsType.WORKER_STATS); + fail("add/start should not be successful as active entry is already present!"); + } catch (IllegalStateException e) { + // pass + } + + // existing active entry is present only for memory, update cannot be called with cpu values. + try { + task.updateThreadResourceStats(threadId, ResourceStatsType.WORKER_STATS, new ResourceUsageMetric(ResourceStats.CPU, 200)); + fail("update should not be successful as entry for CPU is not present!"); + } catch (IllegalStateException e) { + // pass + } + } } diff --git a/server/src/test/java/org/opensearch/tasks/CancelTasksResponseTests.java b/server/src/test/java/org/opensearch/tasks/CancelTasksResponseTests.java index 64d2979c2c5a0..c0ec4ca3d31fd 100644 --- a/server/src/test/java/org/opensearch/tasks/CancelTasksResponseTests.java +++ b/server/src/test/java/org/opensearch/tasks/CancelTasksResponseTests.java @@ -62,7 +62,7 @@ protected CancelTasksResponse createTestInstance() { private static List randomTasks() { List randomTasks = new ArrayList<>(); for (int i = 0; i < randomInt(10); i++) { - randomTasks.add(TaskInfoTests.randomTaskInfo()); + randomTasks.add(TaskInfoTests.randomTaskInfo(false)); } return randomTasks; } diff --git a/server/src/test/java/org/opensearch/tasks/ListTasksResponseTests.java b/server/src/test/java/org/opensearch/tasks/ListTasksResponseTests.java index 4d5feb46de1d0..0201509d03a2b 100644 --- a/server/src/test/java/org/opensearch/tasks/ListTasksResponseTests.java +++ b/server/src/test/java/org/opensearch/tasks/ListTasksResponseTests.java @@ -45,6 +45,7 @@ import java.net.ConnectException; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.function.Predicate; import java.util.function.Supplier; @@ -72,7 +73,12 @@ public void testNonEmptyToString() { true, false, new TaskId("node1", 0), - Collections.singletonMap("foo", "bar") + Collections.singletonMap("foo", "bar"), + new TaskResourceStats(new HashMap() { + { + put("dummy-type1", new TaskResourceUsage(100, 100)); + } + }) ); ListTasksResponse tasksResponse = new ListTasksResponse(singletonList(info), emptyList(), emptyList()); assertEquals( @@ -93,6 +99,12 @@ public void testNonEmptyToString() { + " \"parent_task_id\" : \"node1:0\",\n" + " \"headers\" : {\n" + " \"foo\" : \"bar\"\n" + + " },\n" + + " \"resource_stats\" : {\n" + + " \"dummy-type1\" : {\n" + + " \"cpu_time_in_nanos\" : 100,\n" + + " \"memory_in_bytes\" : 100\n" + + " }\n" + " }\n" + " }\n" + " ]\n" @@ -127,8 +139,8 @@ protected boolean supportsUnknownFields() { @Override protected Predicate getRandomFieldsExcludeFilter() { - // status and headers hold arbitrary content, we can't inject random fields in them - return field -> field.endsWith("status") || field.endsWith("headers"); + // status, headers and resource_stats hold arbitrary content, we can't inject random fields in them + return field -> field.endsWith("status") || field.endsWith("headers") || field.contains("resource_stats"); } @Override diff --git a/server/src/test/java/org/opensearch/tasks/TaskInfoTests.java b/server/src/test/java/org/opensearch/tasks/TaskInfoTests.java index 89b690d81a4ea..7c8cb3230659b 100644 --- a/server/src/test/java/org/opensearch/tasks/TaskInfoTests.java +++ b/server/src/test/java/org/opensearch/tasks/TaskInfoTests.java @@ -77,13 +77,13 @@ protected boolean supportsUnknownFields() { @Override protected Predicate getRandomFieldsExcludeFilter() { - // status and headers hold arbitrary content, we can't inject random fields in them - return field -> "status".equals(field) || "headers".equals(field); + // status, headers and resource_stats hold arbitrary content, we can't inject random fields in them + return field -> "status".equals(field) || "headers".equals(field) || field.contains("resource_stats"); } @Override protected TaskInfo mutateInstance(TaskInfo info) { - switch (between(0, 9)) { + switch (between(0, 10)) { case 0: TaskId taskId = new TaskId(info.getTaskId().getNodeId() + randomAlphaOfLength(5), info.getTaskId().getId()); return new TaskInfo( @@ -97,7 +97,8 @@ protected TaskInfo mutateInstance(TaskInfo info) { info.isCancellable(), info.isCancelled(), info.getParentTaskId(), - info.getHeaders() + info.getHeaders(), + info.getResourceStats() ); case 1: return new TaskInfo( @@ -111,7 +112,8 @@ protected TaskInfo mutateInstance(TaskInfo info) { info.isCancellable(), info.isCancelled(), info.getParentTaskId(), - info.getHeaders() + info.getHeaders(), + info.getResourceStats() ); case 2: return new TaskInfo( @@ -125,7 +127,8 @@ protected TaskInfo mutateInstance(TaskInfo info) { info.isCancellable(), info.isCancelled(), info.getParentTaskId(), - info.getHeaders() + info.getHeaders(), + info.getResourceStats() ); case 3: return new TaskInfo( @@ -139,7 +142,8 @@ protected TaskInfo mutateInstance(TaskInfo info) { info.isCancellable(), info.isCancelled(), info.getParentTaskId(), - info.getHeaders() + info.getHeaders(), + info.getResourceStats() ); case 4: Task.Status newStatus = randomValueOtherThan(info.getStatus(), TaskInfoTests::randomRawTaskStatus); @@ -154,7 +158,8 @@ protected TaskInfo mutateInstance(TaskInfo info) { info.isCancellable(), info.isCancelled(), info.getParentTaskId(), - info.getHeaders() + info.getHeaders(), + info.getResourceStats() ); case 5: return new TaskInfo( @@ -168,7 +173,8 @@ protected TaskInfo mutateInstance(TaskInfo info) { info.isCancellable(), info.isCancelled(), info.getParentTaskId(), - info.getHeaders() + info.getHeaders(), + info.getResourceStats() ); case 6: return new TaskInfo( @@ -182,7 +188,8 @@ protected TaskInfo mutateInstance(TaskInfo info) { info.isCancellable(), info.isCancelled(), info.getParentTaskId(), - info.getHeaders() + info.getHeaders(), + info.getResourceStats() ); case 7: return new TaskInfo( @@ -196,7 +203,8 @@ protected TaskInfo mutateInstance(TaskInfo info) { info.isCancellable() == false, false, info.getParentTaskId(), - info.getHeaders() + info.getHeaders(), + info.getResourceStats() ); case 8: TaskId parentId = new TaskId(info.getParentTaskId().getNodeId() + randomAlphaOfLength(5), info.getParentTaskId().getId()); @@ -211,7 +219,8 @@ protected TaskInfo mutateInstance(TaskInfo info) { info.isCancellable(), info.isCancelled(), parentId, - info.getHeaders() + info.getHeaders(), + info.getResourceStats() ); case 9: Map headers = info.getHeaders(); @@ -232,7 +241,30 @@ protected TaskInfo mutateInstance(TaskInfo info) { info.isCancellable(), info.isCancelled(), info.getParentTaskId(), - headers + headers, + info.getResourceStats() + ); + case 10: + Map resourceUsageMap; + if (info.getResourceStats() == null) { + resourceUsageMap = new HashMap<>(1); + } else { + resourceUsageMap = new HashMap<>(info.getResourceStats().getResourceUsageInfo()); + } + resourceUsageMap.put(randomAlphaOfLength(5), new TaskResourceUsage(randomNonNegativeLong(), randomNonNegativeLong())); + return new TaskInfo( + info.getTaskId(), + info.getType(), + info.getAction(), + info.getDescription(), + info.getStatus(), + info.getStartTime(), + info.getRunningTimeNanos(), + info.isCancellable(), + info.isCancelled(), + info.getParentTaskId(), + info.getHeaders(), + new TaskResourceStats(resourceUsageMap) ); default: throw new IllegalStateException(); @@ -240,11 +272,15 @@ protected TaskInfo mutateInstance(TaskInfo info) { } static TaskInfo randomTaskInfo() { + return randomTaskInfo(randomBoolean()); + } + + static TaskInfo randomTaskInfo(boolean detailed) { TaskId taskId = randomTaskId(); String type = randomAlphaOfLength(5); String action = randomAlphaOfLength(5); - Task.Status status = randomBoolean() ? randomRawTaskStatus() : null; - String description = randomBoolean() ? randomAlphaOfLength(5) : null; + Task.Status status = detailed ? randomRawTaskStatus() : null; + String description = detailed ? randomAlphaOfLength(5) : null; long startTime = randomLong(); long runningTimeNanos = randomLong(); boolean cancellable = randomBoolean(); @@ -264,7 +300,8 @@ static TaskInfo randomTaskInfo() { cancellable, cancelled, parentTaskId, - headers + headers, + randomResourceStats(detailed) ); } @@ -285,4 +322,14 @@ private static RawTaskStatus randomRawTaskStatus() { throw new IllegalStateException(e); } } + + public static TaskResourceStats randomResourceStats(boolean detailed) { + return detailed ? new TaskResourceStats(new HashMap() { + { + for (int i = 0; i < randomInt(5); i++) { + put(randomAlphaOfLength(5), new TaskResourceUsage(randomNonNegativeLong(), randomNonNegativeLong())); + } + } + }) : null; + } } From dd79352baa5d3ca95c5349c72a6f89142f2b6c8e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Mar 2022 12:40:23 -0500 Subject: [PATCH 012/653] Bump json-schema-validator from 1.0.67 to 1.0.68 in /buildSrc (#2610) Bumps [json-schema-validator](https://github.com/networknt/json-schema-validator) from 1.0.67 to 1.0.68. - [Release notes](https://github.com/networknt/json-schema-validator/releases) - [Changelog](https://github.com/networknt/json-schema-validator/blob/master/CHANGELOG.md) - [Commits](https://github.com/networknt/json-schema-validator/compare/1.0.67...1.0.68) --- updated-dependencies: - dependency-name: com.networknt:json-schema-validator dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- buildSrc/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index f940eec593306..6134e7cd0250e 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -115,7 +115,7 @@ dependencies { api 'de.thetaphi:forbiddenapis:3.2' api 'com.avast.gradle:gradle-docker-compose-plugin:0.14.12' api 'org.apache.maven:maven-model:3.6.2' - api 'com.networknt:json-schema-validator:1.0.67' + api 'com.networknt:json-schema-validator:1.0.68' api "com.fasterxml.jackson.core:jackson-databind:${props.getProperty('jackson')}" testFixturesApi "junit:junit:${props.getProperty('junit')}" From e44706e500b375396bb9e8450909d0a052ff6589 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Mar 2022 12:42:31 -0500 Subject: [PATCH 013/653] Bump jettison from 1.1 to 1.4.1 in /plugins/discovery-azure-classic (#2614) * Bump jettison from 1.1 to 1.4.1 in /plugins/discovery-azure-classic Bumps [jettison](https://github.com/jettison-json/jettison) from 1.1 to 1.4.1. - [Release notes](https://github.com/jettison-json/jettison/releases) - [Commits](https://github.com/jettison-json/jettison/compare/jettison-1.1...jettison-1.4.1) --- updated-dependencies: - dependency-name: org.codehaus.jettison:jettison dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/discovery-azure-classic/build.gradle | 2 +- plugins/discovery-azure-classic/licenses/jettison-1.1.jar.sha1 | 1 - .../discovery-azure-classic/licenses/jettison-1.4.1.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/discovery-azure-classic/licenses/jettison-1.1.jar.sha1 create mode 100644 plugins/discovery-azure-classic/licenses/jettison-1.4.1.jar.sha1 diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index 28cbc647ac31a..575b8858b16ba 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -59,7 +59,7 @@ dependencies { api "com.sun.jersey:jersey-client:${versions.jersey}" api "com.sun.jersey:jersey-core:${versions.jersey}" api "com.sun.jersey:jersey-json:${versions.jersey}" - api 'org.codehaus.jettison:jettison:1.1' + api 'org.codehaus.jettison:jettison:1.4.1' api 'com.sun.xml.bind:jaxb-impl:2.2.3-1' // HACK: javax.xml.bind was removed from default modules in java 9, so we pull the api in here, diff --git a/plugins/discovery-azure-classic/licenses/jettison-1.1.jar.sha1 b/plugins/discovery-azure-classic/licenses/jettison-1.1.jar.sha1 deleted file mode 100644 index 53133f3b018e6..0000000000000 --- a/plugins/discovery-azure-classic/licenses/jettison-1.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1a01a2a1218fcf9faa2cc2a6ced025bdea687262 diff --git a/plugins/discovery-azure-classic/licenses/jettison-1.4.1.jar.sha1 b/plugins/discovery-azure-classic/licenses/jettison-1.4.1.jar.sha1 new file mode 100644 index 0000000000000..815d87d917f2e --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/jettison-1.4.1.jar.sha1 @@ -0,0 +1 @@ +8d16bbcbac93446942c9e5da04530159afbe3e65 \ No newline at end of file From 0216ab24356a2bd6f01b1debd6327942b35575ad Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Mar 2022 12:44:43 -0500 Subject: [PATCH 014/653] Bump google-oauth-client from 1.31.0 to 1.33.1 in /plugins/repository-gcs (#2616) * Bump google-oauth-client in /plugins/repository-gcs Bumps [google-oauth-client](https://github.com/googleapis/google-oauth-java-client) from 1.31.0 to 1.33.1. - [Release notes](https://github.com/googleapis/google-oauth-java-client/releases) - [Changelog](https://github.com/googleapis/google-oauth-java-client/blob/main/CHANGELOG.md) - [Commits](https://github.com/googleapis/google-oauth-java-client/compare/v1.31.0...v1.33.1) --- updated-dependencies: - dependency-name: com.google.oauth-client:google-oauth-client dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/repository-gcs/build.gradle | 2 +- .../repository-gcs/licenses/google-oauth-client-1.31.0.jar.sha1 | 1 - .../repository-gcs/licenses/google-oauth-client-1.33.1.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/google-oauth-client-1.31.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-oauth-client-1.33.1.jar.sha1 diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index e1ecf3c65a0f9..2cfbd76394bcb 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -69,7 +69,7 @@ dependencies { api 'com.google.cloud:google-cloud-core-http:1.93.3' api 'com.google.auth:google-auth-library-credentials:0.20.0' api 'com.google.auth:google-auth-library-oauth2-http:0.20.0' - api 'com.google.oauth-client:google-oauth-client:1.31.0' + api 'com.google.oauth-client:google-oauth-client:1.33.1' api 'com.google.api-client:google-api-client:1.30.10' api 'com.google.http-client:google-http-client-appengine:1.35.0' api 'com.google.http-client:google-http-client-jackson2:1.35.0' diff --git a/plugins/repository-gcs/licenses/google-oauth-client-1.31.0.jar.sha1 b/plugins/repository-gcs/licenses/google-oauth-client-1.31.0.jar.sha1 deleted file mode 100644 index 942dbb5d167a4..0000000000000 --- a/plugins/repository-gcs/licenses/google-oauth-client-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bf1cfbbaa2497d0a841ea0363df4a61170d5823b \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-oauth-client-1.33.1.jar.sha1 b/plugins/repository-gcs/licenses/google-oauth-client-1.33.1.jar.sha1 new file mode 100644 index 0000000000000..3897a85310ec6 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-oauth-client-1.33.1.jar.sha1 @@ -0,0 +1 @@ +0a431f1a677c5f89507591ab47a7ccdb0b18b6f7 \ No newline at end of file From 2425f64baab25fe593937f4b78a2964c1797e90a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Mar 2022 15:02:44 -0400 Subject: [PATCH 015/653] Bump htrace-core4 from 4.1.0-incubating to 4.2.0-incubating in /plugins/repository-hdfs (#2618) * Bump htrace-core4 in /plugins/repository-hdfs Bumps htrace-core4 from 4.1.0-incubating to 4.2.0-incubating. --- updated-dependencies: - dependency-name: org.apache.htrace:htrace-core4 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/repository-hdfs/build.gradle | 2 +- .../licenses/htrace-core4-4.1.0-incubating.jar.sha1 | 1 - .../licenses/htrace-core4-4.2.0-incubating.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/htrace-core4-4.1.0-incubating.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/htrace-core4-4.2.0-incubating.jar.sha1 diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index dc1f55b686044..19f58bf48366d 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -61,7 +61,7 @@ dependencies { api "org.apache.hadoop:hadoop-client-api:${versions.hadoop3}" runtimeOnly "org.apache.hadoop:hadoop-client-runtime:${versions.hadoop3}" api "org.apache.hadoop:hadoop-hdfs:${versions.hadoop3}" - api 'org.apache.htrace:htrace-core4:4.1.0-incubating' + api 'org.apache.htrace:htrace-core4:4.2.0-incubating' api "org.apache.logging.log4j:log4j-core:${versions.log4j}" api 'org.apache.avro:avro:1.10.2' api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" diff --git a/plugins/repository-hdfs/licenses/htrace-core4-4.1.0-incubating.jar.sha1 b/plugins/repository-hdfs/licenses/htrace-core4-4.1.0-incubating.jar.sha1 deleted file mode 100644 index 806c624c02cf0..0000000000000 --- a/plugins/repository-hdfs/licenses/htrace-core4-4.1.0-incubating.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -12b3e2adda95e8c41d9d45d33db075137871d2e2 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/htrace-core4-4.2.0-incubating.jar.sha1 b/plugins/repository-hdfs/licenses/htrace-core4-4.2.0-incubating.jar.sha1 new file mode 100644 index 0000000000000..e2eafb09dba00 --- /dev/null +++ b/plugins/repository-hdfs/licenses/htrace-core4-4.2.0-incubating.jar.sha1 @@ -0,0 +1 @@ +94b3f1966922bc45d0f8a86a2aa867a4b0df288b \ No newline at end of file From 932cab67bda39b1b57a2ff72f191847914f6fe13 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Mar 2022 15:03:19 -0400 Subject: [PATCH 016/653] Bump asm-tree from 7.2 to 9.2 in /modules/lang-painless (#2617) * Bump asm-tree from 7.2 to 9.2 in /modules/lang-painless Bumps asm-tree from 7.2 to 9.2. --- updated-dependencies: - dependency-name: org.ow2.asm:asm-tree dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- modules/lang-painless/build.gradle | 2 +- modules/lang-painless/licenses/asm-tree-7.2.jar.sha1 | 1 - modules/lang-painless/licenses/asm-tree-9.2.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 modules/lang-painless/licenses/asm-tree-7.2.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-tree-9.2.jar.sha1 diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index 7f37b5e76d904..c524f9a7e2f2c 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -47,7 +47,7 @@ testClusters.all { dependencies { api 'org.antlr:antlr4-runtime:4.9.3' api 'org.ow2.asm:asm-util:9.2' - api 'org.ow2.asm:asm-tree:7.2' + api 'org.ow2.asm:asm-tree:9.2' api 'org.ow2.asm:asm-commons:9.2' api 'org.ow2.asm:asm-analysis:7.2' api 'org.ow2.asm:asm:9.2' diff --git a/modules/lang-painless/licenses/asm-tree-7.2.jar.sha1 b/modules/lang-painless/licenses/asm-tree-7.2.jar.sha1 deleted file mode 100644 index 986a1c55f5e8f..0000000000000 --- a/modules/lang-painless/licenses/asm-tree-7.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3a23cc36edaf8fc5a89cb100182758ccb5991487 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-tree-9.2.jar.sha1 b/modules/lang-painless/licenses/asm-tree-9.2.jar.sha1 new file mode 100644 index 0000000000000..7b486521ecef3 --- /dev/null +++ b/modules/lang-painless/licenses/asm-tree-9.2.jar.sha1 @@ -0,0 +1 @@ +d96c99a30f5e1a19b0e609dbb19a44d8518ac01e \ No newline at end of file From 8f4aec109de82340290d35863217d0ddcd49f383 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Mar 2022 13:13:49 -0700 Subject: [PATCH 017/653] Bump forbiddenapis in /buildSrc/src/testKit/thirdPartyAudit (#2611) Bumps [forbiddenapis](https://github.com/policeman-tools/forbidden-apis) from 3.2 to 3.3. - [Release notes](https://github.com/policeman-tools/forbidden-apis/releases) - [Commits](https://github.com/policeman-tools/forbidden-apis/compare/3.2...3.3) --- updated-dependencies: - dependency-name: de.thetaphi:forbiddenapis dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- buildSrc/src/testKit/thirdPartyAudit/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildSrc/src/testKit/thirdPartyAudit/build.gradle b/buildSrc/src/testKit/thirdPartyAudit/build.gradle index 41e699db94dcf..2c86d28cf0206 100644 --- a/buildSrc/src/testKit/thirdPartyAudit/build.gradle +++ b/buildSrc/src/testKit/thirdPartyAudit/build.gradle @@ -40,7 +40,7 @@ repositories { } dependencies { - forbiddenApisCliJar 'de.thetaphi:forbiddenapis:3.2' + forbiddenApisCliJar 'de.thetaphi:forbiddenapis:3.3' jdkJarHell 'org.opensearch:opensearch-core:current' compileOnly "org.${project.properties.compileOnlyGroup}:${project.properties.compileOnlyVersion}" implementation "org.${project.properties.compileGroup}:${project.properties.compileVersion}" From 223efe68e61aa791b97273d717379d164bd2d00c Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 29 Mar 2022 12:23:08 -0400 Subject: [PATCH 018/653] Update Gradle to 7.4.1 (#2078) * Update Gradle to 7.4.1 Signed-off-by: Andriy Redko * Address code review comments, added @PathSensitive(PathSensitivity.RELATIVE) where applicable Signed-off-by: Andriy Redko --- buildSrc/build.gradle | 2 +- .../precommit/LicenseHeadersTask.groovy | 5 +++++ .../gradle/precommit/FilePermissionsTask.java | 5 +++++ .../precommit/ForbiddenPatternsTask.java | 5 +++++ .../gradle/precommit/LoggerUsageTask.java | 2 ++ .../gradle/precommit/ThirdPartyAuditTask.java | 2 ++ .../gradle/test/rest/CopyRestApiTask.java | 5 +++++ .../gradle/test/rest/CopyRestTestsTask.java | 5 +++++ .../src/main/resources/minimumGradleVersion | 2 +- gradle/missing-javadoc.gradle | 4 ++++ gradle/wrapper/gradle-wrapper.jar | Bin 59536 -> 59821 bytes gradle/wrapper/gradle-wrapper.properties | 4 ++-- 12 files changed, 37 insertions(+), 4 deletions(-) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 6134e7cd0250e..1ec66b582aed9 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -112,7 +112,7 @@ dependencies { api 'commons-io:commons-io:2.7' api "net.java.dev.jna:jna:5.10.0" api 'gradle.plugin.com.github.johnrengelman:shadow:7.1.2' - api 'de.thetaphi:forbiddenapis:3.2' + api 'de.thetaphi:forbiddenapis:3.3' api 'com.avast.gradle:gradle-docker-compose-plugin:0.14.12' api 'org.apache.maven:maven-model:3.6.2' api 'com.networknt:json-schema-validator:1.0.68' diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/precommit/LicenseHeadersTask.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/precommit/LicenseHeadersTask.groovy index b330934ed2d26..b8d0ed2b9c43c 100644 --- a/buildSrc/src/main/groovy/org/opensearch/gradle/precommit/LicenseHeadersTask.groovy +++ b/buildSrc/src/main/groovy/org/opensearch/gradle/precommit/LicenseHeadersTask.groovy @@ -35,7 +35,10 @@ import org.opensearch.gradle.AntTask import org.gradle.api.file.FileCollection import org.gradle.api.tasks.Input import org.gradle.api.tasks.InputFiles +import org.gradle.api.tasks.IgnoreEmptyDirectories; import org.gradle.api.tasks.OutputFile +import org.gradle.api.tasks.PathSensitive +import org.gradle.api.tasks.PathSensitivity import org.gradle.api.tasks.SkipWhenEmpty import java.nio.file.Files @@ -78,6 +81,8 @@ class LicenseHeadersTask extends AntTask { */ @InputFiles @SkipWhenEmpty + @IgnoreEmptyDirectories + @PathSensitive(PathSensitivity.RELATIVE) List getJavaFiles() { return project.sourceSets.collect({it.allJava}) } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/FilePermissionsTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/FilePermissionsTask.java index 9ffd472151b4b..d525a4a1e2c69 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/FilePermissionsTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/FilePermissionsTask.java @@ -46,8 +46,11 @@ import org.gradle.api.GradleException; import org.gradle.api.file.FileCollection; import org.gradle.api.file.FileTree; +import org.gradle.api.tasks.IgnoreEmptyDirectories; import org.gradle.api.tasks.InputFiles; import org.gradle.api.tasks.OutputFile; +import org.gradle.api.tasks.PathSensitive; +import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.SkipWhenEmpty; import org.gradle.api.tasks.StopExecutionException; import org.gradle.api.tasks.TaskAction; @@ -92,6 +95,8 @@ private static boolean isExecutableFile(File file) { */ @InputFiles @SkipWhenEmpty + @IgnoreEmptyDirectories + @PathSensitive(PathSensitivity.RELATIVE) public FileCollection getFiles() { return GradleUtils.getJavaSourceSets(getProject()) .stream() diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java index f57c190496452..754743b9b784c 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java @@ -37,9 +37,12 @@ import org.gradle.api.file.FileCollection; import org.gradle.api.file.FileTree; import org.gradle.api.plugins.JavaPluginConvention; +import org.gradle.api.tasks.IgnoreEmptyDirectories; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.InputFiles; import org.gradle.api.tasks.OutputFile; +import org.gradle.api.tasks.PathSensitive; +import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.SkipWhenEmpty; import org.gradle.api.tasks.TaskAction; import org.gradle.api.tasks.util.PatternFilterable; @@ -100,6 +103,8 @@ public ForbiddenPatternsTask() { @InputFiles @SkipWhenEmpty + @IgnoreEmptyDirectories + @PathSensitive(PathSensitivity.RELATIVE) public FileCollection getFiles() { return getProject().getConvention() .getPlugin(JavaPluginConvention.class) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/LoggerUsageTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/LoggerUsageTask.java index 1fd092b7f268f..ff9f6619d64e6 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/LoggerUsageTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/LoggerUsageTask.java @@ -37,6 +37,7 @@ import org.gradle.api.plugins.JavaPluginConvention; import org.gradle.api.tasks.CacheableTask; import org.gradle.api.tasks.Classpath; +import org.gradle.api.tasks.IgnoreEmptyDirectories; import org.gradle.api.tasks.InputFiles; import org.gradle.api.tasks.PathSensitive; import org.gradle.api.tasks.PathSensitivity; @@ -79,6 +80,7 @@ public void setClasspath(FileCollection classpath) { @InputFiles @PathSensitive(PathSensitivity.RELATIVE) @SkipWhenEmpty + @IgnoreEmptyDirectories public FileCollection getClassDirectories() { return getProject().getConvention() .getPlugin(JavaPluginConvention.class) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java index ee68d2740e279..097710b3f1a6e 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java @@ -47,6 +47,7 @@ import org.gradle.api.tasks.CacheableTask; import org.gradle.api.tasks.Classpath; import org.gradle.api.tasks.CompileClasspath; +import org.gradle.api.tasks.IgnoreEmptyDirectories; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.InputFile; import org.gradle.api.tasks.InputFiles; @@ -195,6 +196,7 @@ public Set getMissingClassExcludes() { @Classpath @SkipWhenEmpty + @IgnoreEmptyDirectories public Set getJarsToScan() { // These are SelfResolvingDependency, and some of them backed by file collections, like the Gradle API files, // or dependencies added as `files(...)`, we can't be sure if those are third party or not. diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java index 399cd39d236d7..1468c4cb1b537 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java @@ -43,9 +43,12 @@ import org.gradle.api.file.FileTree; import org.gradle.api.plugins.JavaPluginConvention; import org.gradle.api.provider.ListProperty; +import org.gradle.api.tasks.IgnoreEmptyDirectories; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.InputFiles; import org.gradle.api.tasks.OutputDirectory; +import org.gradle.api.tasks.PathSensitive; +import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.SkipWhenEmpty; import org.gradle.api.tasks.SourceSet; import org.gradle.api.tasks.TaskAction; @@ -112,8 +115,10 @@ public boolean isSkipHasRestTestCheck() { return skipHasRestTestCheck; } + @IgnoreEmptyDirectories @SkipWhenEmpty @InputFiles + @PathSensitive(PathSensitivity.RELATIVE) public FileTree getInputDir() { FileTree coreFileTree = null; boolean projectHasYamlRestTests = skipHasRestTestCheck || projectHasYamlRestTests(); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java index 56ce449f4cf6f..dd94d040cb9d8 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java @@ -43,9 +43,12 @@ import org.gradle.api.file.FileTree; import org.gradle.api.plugins.JavaPluginConvention; import org.gradle.api.provider.ListProperty; +import org.gradle.api.tasks.IgnoreEmptyDirectories; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.InputFiles; import org.gradle.api.tasks.OutputDirectory; +import org.gradle.api.tasks.PathSensitive; +import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.SkipWhenEmpty; import org.gradle.api.tasks.SourceSet; import org.gradle.api.tasks.TaskAction; @@ -104,8 +107,10 @@ String getSourceSetName() { return sourceSetName; } + @IgnoreEmptyDirectories @SkipWhenEmpty @InputFiles + @PathSensitive(PathSensitivity.RELATIVE) public FileTree getInputDir() { FileTree coreFileTree = null; if (includeCore.get().isEmpty() == false) { diff --git a/buildSrc/src/main/resources/minimumGradleVersion b/buildSrc/src/main/resources/minimumGradleVersion index ba92e72f5775b..6b0e58e78f5ee 100644 --- a/buildSrc/src/main/resources/minimumGradleVersion +++ b/buildSrc/src/main/resources/minimumGradleVersion @@ -1 +1 @@ -6.6.1 \ No newline at end of file +7.4.1 \ No newline at end of file diff --git a/gradle/missing-javadoc.gradle b/gradle/missing-javadoc.gradle index df47a3796c825..05531487f35f3 100644 --- a/gradle/missing-javadoc.gradle +++ b/gradle/missing-javadoc.gradle @@ -7,6 +7,8 @@ */ import javax.annotation.Nullable +import org.gradle.api.tasks.PathSensitive; +import org.gradle.api.tasks.PathSensitivity; import org.gradle.internal.jvm.Jvm /** @@ -178,6 +180,8 @@ configure([ class MissingJavadocTask extends DefaultTask { @InputFiles @SkipWhenEmpty + @IgnoreEmptyDirectories + @PathSensitive(PathSensitivity.RELATIVE) SourceDirectorySet srcDirSet; @OutputDirectory diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index 7454180f2ae8848c63b8b4dea2cb829da983f2fa..41d9927a4d4fb3f96a785543079b8df6723c946b 100644 GIT binary patch delta 8958 zcmY+KWl$VIlZIh&f(Hri?gR<$?iyT!TL`X;1^2~W7YVSq1qtqM!JWlDxLm%}UESUM zndj}Uny%^UnjhVhFb!8V3s(a#fIy>`VW15{5nuy;_V&a5O#0S&!a4dSkUMz_VHu3S zGA@p9Q$T|Sj}tYGWdjH;Mpp8m&yu&YURcrt{K;R|kM~(*{v%QwrBJIUF+K1kX5ZmF zty3i{d`y0;DgE+de>vN@yYqFPe1Ud{!&G*Q?iUc^V=|H%4~2|N zW+DM)W!`b&V2mQ0Y4u_)uB=P@-2`v|Wm{>CxER1P^ z>c}ZPZ)xxdOCDu59{X^~2id7+6l6x)U}C4Em?H~F`uOxS1?}xMxTV|5@}PlN%Cg$( zwY6c}r60=z5ZA1L zTMe;84rLtYvcm?M(H~ZqU;6F7Evo{P7!LGcdwO|qf1w+)MsnvK5^c@Uzj<{ zUoej1>95tuSvDJ|5K6k%&UF*uE6kBn47QJw^yE&#G;u^Z9oYWrK(+oL97hBsUMc_^ z;-lmxebwlB`Er_kXp2$`&o+rPJAN<`WX3ws2K{q@qUp}XTfV{t%KrsZ5vM!Q#4{V& zq>iO$MCiLq#%wXj%`W$_%FRg_WR*quv65TdHhdpV&jlq<=K^K`&!Kl5mA6p4n~p3u zWE{20^hYpn1M}}VmSHBXl1*-)2MP=0_k)EPr#>EoZukiXFDz?Di1I>2@Z^P$pvaF+ zN+qUy63jek2m59;YG)`r^F3-O)0RDIXPhf)XOOdkmu`3SMMSW(g+`Ajt{=h1dt~ks ztrhhP|L4G%5x79N#kwAHh5N){@{fzE7n&%dnisCm65Za<8r_hKvfx4Bg*`%-*-Mvn zFvn~)VP@}1sAyD+B{{8l{EjD10Av&Mz9^Xff*t`lU=q=S#(|>ls520;n3<}X#pyh& z*{CJf7$*&~!9jMnw_D~ikUKJ2+UnXmN6qak{xx%W;BKuXt7@ky!LPI1qk?gDwG@@o zkY+BkIie>{{q==5)kXw(*t#I?__Kwi>`=+s?Gq6X+vtSsaAO&Tf+Bl$vKnzc&%BHM z=loWOQq~n}>l=EL(5&6((ESsQC3^@4jlO5Od{qN#sWV)vqXw}aA>*uvwZopNN(|-T zRTF%5Y_k1R$;(d-)n;hWex{;7b6KgdAVE@&0pd(*qDzBO#YZV%kh%pYt1`hnQ(Fa& zYiDrOTDqk5M7hzp9kI2h!PxNnuJ&xl*zF8sx6!67bA49R1bmUF5bpK&&{eI0U~cH}PM z3aW1$lRb|ItkG5~_eBNu$|I|vYIdAA9a!pVq<+UTx*M}fG`23zxXp&E=FfnY- zEzKj;Cu_s4v>leO7M2-mE(UzKHL4c$c`3dS*19OpLV^4NI*hWWnJQ9lvzP4c;c?do zqrcsKT*i~eIHl0D3r4N{)+RsB6XhrC^;sp2cf_Eq#6*CV;t8v=V!ISe>>9kPgh}NI z=1UZutslxcT$Ad;_P^;Oouoa(cs!Ctpvi>%aQ+Zp=1d|h{W9Wmf7JWxa(~<#tSZ?C%wu4_5F!fc!<@PIBeJ)Nr^$bB6!_Gic_7}c3J{QI~Gg5g5jTp9}V6KYgrgaX>pJt}7$!wOht&KO|+z{Iw@YL|@~D zMww}+lG}rm2^peNx>58ME||ZQxFQeVSX8iogHLq_vXb`>RnoEKaTWBF-$JD#Q4BMv zt2(2Qb*x-?ur1Y(NsW8AdtX0#rDB?O(Vs4_xA(u-o!-tBG03OI!pQD+2UytbL5>lG z*(F)KacHqMa4?dxa(Vcrw>IIAeB$3cx#;;5r2X;HE8|}eYdAgCw#tpXNy7C3w1q`9 zGxZ6;@1G%8shz9e+!K2MO*{_RjO}Jo6eL3{TSZ>nY7)Qs`Dhi5><@oh0r)gT7H-?3 zLDsd^@m%JvrS8sta5`QiZNs^*GT}Hiy^zjK2^Ni%`Z|ma)D2 zuyumbvw$M8$haCTI~6M%d4+P)uX%u{Sfg4Al+F7c6;O-*)DKI7E8izSOKB#FcV{M+ zEvY0FBkq!$J0EW$Cxl}3{JwV^ki-T?q6C30Y5e&p@8Rd?$ST-Ghn*-`tB{k54W<>F z5I)TFpUC!E9298=sk>m#FI4sUDy_!8?51FqqW!9LN1(zuDnB3$!pEUjL>N>RNgAG~-9Xm|1lqHseW(%v&6K(DZ3Pano(1-Qe?3%J&>0`~w^Q-p&@ zg@HjvhJk?*hpF7$9P|gkzz`zBz_5Z!C4_-%fCcAgiSilzFQef!@amHDrW!YZS@?7C zs2Y9~>yqO+rkih?kXztzvnB^6W=f52*iyuZPv$c42$WK7>PHb z6%MYIr5D32KPdwL1hJf{_#jn?`k(taW?mwmZVvrr=y~fNcV$`}v(8};o9AjOJumS4 z`889O91^pkF+|@$d9wVoZ3;^j;^sUs&Ubo_qD&MTL%O z&*SE0ujG~zm;?x)8TLC&ft))nyI zcg44@*Q{cYT+qGrA=In_X{NNCD+B0w#;@g)jvBU;_8od6U>;7HIo@F*=g8CQUo(u^ z3r4FJ7#<@)MXO&5+DgKE&^>^`r!loe7CWE*1k0*0wLFzSOV8jvlX~WOQ?$1v zk$Or}!;ix0g78^6W;+<=J>z@CBs!<<)HvF(Ls-&`matpesJ5kkjC)6nGB@b{ii6-Uoho$BT%iJgugTOeZ$5Xo4D7Pd< zC*LJh5V@2#5%aBZCgzlQi3@<_!VfiL07ywc)ZbwKPfcR|ElQoS(8x|a7#IR}7#Io= zwg4$8S{egr-NffD)Fg&X9bJSoM25pF&%hf>(T&9bI}=#dPQyNYz;ZZ7EZ=u1n701SWKkZ9n(-qU ztN`sdWL1uxQ1mKS@x11;O|@^AD9!NeoPx}?EKIr!2>1Qq4gjfGU)tr6?Z5l7JAS3j zZeq{vG{rb%DFE4%$szK}d2UzB{4>L?Tv+NAlE*&Nq6g+XauaSI+N2Y8PJLw+aNg1p zbxr|hI8wcMP&&+(Cu|%+Jq|r>+BHk@{AvfBXKiVldN)@}TBS0LdIpnANCVE26WL-} zV}HJ^?m&$Rkq;Zf*i-hoasnpJVyTH__dbGWrB_R55d*>pTyl6(?$EO@>RCmTX1Hzr zT2)rOng?D4FfZ_C49hjMV*UonG2DlG$^+k=Y%|?Dqae4}JOU=8=fgY4Uh!pa9eEqf zFX&WLPu!jArN*^(>|H>dj~g`ONZhaaD%h_HHrHkk%d~TR_RrX{&eM#P@3x=S^%_6h zh=A)A{id16$zEFq@-D7La;kTuE!oopx^9{uA3y<}9 z^bQ@U<&pJV6kq7LRF47&!UAvgkBx=)KS_X!NY28^gQr27P=gKh0+E>$aCx&^vj2uc}ycsfSEP zedhTgUwPx%?;+dESs!g1z}5q9EC+fol}tAH9#fhZQ?q1GjyIaR@}lGCSpM-014T~l zEwriqt~ftwz=@2tn$xP&-rJt?nn5sy8sJ5Roy;pavj@O+tm}d_qmAlvhG(&k>(arz z;e|SiTr+0<&6(-An0*4{7akwUk~Yf4M!!YKj^swp9WOa%al`%R>V7mi z+5+UodFAaPdi4(8_FO&O!Ymb#@yxkuVMrog(7gkj$G@FLA#ENMxG)4f<}S%Fn?Up$+C%{02AgMKa^ z4SFGWp6U>{Q6VRJV}yjxXT*e`1XaX}(dW1F&RNhpTzvCtzuu;LMhMfJ2LBEy?{^GHG!OF!! zDvs64TG)?MX&9NCE#H3(M0K>O>`ca0WT2YR>PTe&tn?~0FV!MRtdb@v?MAUG&Ef7v zW%7>H(;Mm)RJkt18GXv!&np z?RUxOrCfs;m{fBz5MVlq59idhov21di5>WXWD-594L-X5;|@kyWi@N+(jLuh=o+5l zGGTi~)nflP_G}Yg5Pi%pl88U4+^*ihDoMP&zA*^xJE_X*Ah!jODrijCqQ^{=&hD7& z^)qv3;cu?olaT3pc{)Kcy9jA2E8I)#Kn8qO>70SQ5P8YSCN=_+_&)qg)OYBg|-k^d3*@jRAeB?;yd-O1A0wJ z?K*RDm|wE<(PBz~+C%2CTtzCTUohxP2*1kE8Of~{KRAvMrO_}NN&@P7SUO{;zx0iK z@or9R8ydYOFZf(cHASCAatL%;62IL27~SmASr(7F&NMr+#gNw@z1VM z_ALFwo3)SoANEwRerBdRV`>y`t72#aF2ConmWQp(Xy|msN9$yxhZ1jAQ67lq{vbC5 zujj|MlGo`6Bfn0TfKgi(k=gq0`K~W+X(@GzYlPI4g0M;owH3yG14rhK>lG8lS{`!K z+Nc@glT-DGz?Ym?v#Hq|_mEdPAlHH5jZuh*6glq!+>Lk$S%ED2@+ea6CE@&1-9a?s znglt|fmIK}fg<9@XgHe4*q!aO<-;Xj$T?IzB-{&2`#eA6rdtCi80mpP&vw(Uytxu$#YzNI_cB>LS zmim>ys;ir;*Dzbr22ZDxO2s;671&J0U<9(n1yj)J zHFNz=ufPcQVEG+ePjB<5C;=H0{>Mi*xD>hQq8`Vi7TjJ$V04$`h3EZGL|}a07oQdR z?{cR(z+d>arn^AUug&voOzzi$ZqaS)blz-z3zr;10x;oP2)|Cyb^WtN2*wNn`YX!Y z+$Pji<7|!XyMCEw4so}xXLU)p)BA~2fl>y2Tt}o9*BPm?AXA8UE8a;>rOgyCwZBFa zyl42y`bc3}+hiZL_|L_LY29vVerM+BVE@YxK>TGm@dHi@Uw*7AIq?QA9?THL603J% zIBJ4y3n8OFzsOI;NH%DZ!MDwMl<#$)d9eVVeqVl(5ZX$PPbt*p_(_9VSXhaUPa9Qu z7)q4vqYKX7ieVSjOmVEbLj4VYtnDpe*0Y&+>0dS^bJ<8s*eHq3tjRAw^+Mu4W^-E= z4;&namG4G;3pVDyPkUw#0kWEO1;HI6M51(1<0|*pa(I!sj}F^)avrE`ShVMKBz}nE zzKgOPMSEp6M>hJzyTHHcjV%W*;Tdb}1xJjCP#=iQuBk_Eho6yCRVp&e!}4IBJ&?ksVc&u#g3+G$oNlJ?mWfADjeBS-Ph3`DKk-~Z70XugH8sq2eba@4 zIC1H_J$`9b$K`J)sGX3d!&>OmC@@rx1TL~NinQOYy72Q_+^&Mg>Ku(fTgaXdr$p_V z#gav1o{k~c>#)u3r@~6v^o)Lf=C{rAlL@!s457pq)pO;Cojx7U{urO4cvXP|E>+dV zmr2?!-5)tk-&*ap^D^2x7NG6nOop2zNFQ9v8-EZ{WCz-h36C)<^|f{V#R_WE^@(T0+d-at5hXX{U?zak*ac-XnyINo+yBD~~3O1I=a z99|CI>502&s-Qi5bv>^2#cQ%ut<4d7KgQ^kE|=%6#VlGiY8$rdJUH{sra;P~cyb_i zeX(kS%w0C?mjhJl9TZp8RS;N~y3(EXEz13oPhOSE4WaTljGkVXWd~|#)vsG6_76I)Kb z8ro?;{j^lxNsaxE-cfP;g(e;mhh3)&ba}li?woV2#7ByioiD>s%L_D;?#;C#z;a(N z-_WY<=SH42m9bFQ>Nb z@4K$@4l8pD7AKxCR>t0%`Qoy9=hA?<<^Vcj8;-E+oBe3ReW1`el8np8E$k{LgFQ}2 z2t8a`wOXFdJ9!5$&mEfD1CnJ)TB+RJih88-Zos9@HZ# zL#{qfbF0ARTXkR@G{lwlOH~nnL)1jcyu!qv2`57S&%oKz0}r{~l9U_UHaJ5!8#nrs z?2FrL`mxnzu&{bweD&62)ilz*?pYIvt`T!XFVVA78})p1YEy7 z8fK#s?b~Yo$n7&_a?EBdXH-_W)Z44?!;DFx6pZ?~RArtBI*Qm4~6nX6Z_T*i$bQPE;Qz?DAPstpGSqr-AJ zo%m9cA`oDDm?&dTaoh_>@F>a?!y4qt_;NGN9Z<%SS;fX-cSu|>+Pba22`CRb#|HZa z;{)yHE>M-pc1C0mrnT~80!u&dvVTYFV8xTQ#g;6{c<9d!FDqU%TK5T6h*w*p980D~ zUyCb`y3{-?(mJFP)0*-Nt;mI$-gc4VQumh|rs&j_^R{sgTPF`1Xja2YWstsKFuQ(d zmZMxV$p$|qQUXchu&8%J(9|)B?`~rIx&)LqDS>ob5%gTeTP#Sbny#y*rnJ&?(l=!( zoV~}LJ1DPLnF8oyM(2ScrQ0{Q4m4-BWnS4wilgCW-~~;}pw=&<+HggRD_3c@3RQIr z9+-%!%}u_{`YS=&>h%kPO3ce}>y!d-zqiniNR-b5r97u;+K6HA2tS>Z#cV{+eFI`* zd8RMGAUtX1KWfPV;q<-5JAykS+2sY$2~UX+4461a(%{P#{rwFPu0xpIuYlbgD{C7C z=U{FUarVTYX6ZUq3wE@G^QT4H2Re;n$Fz9cJ>hABl)9T8pozqbA1)H-%1=WKm^QMu zjnUZ&Pu>q+X&6Co*y#@pxc-4waKMInEPGmE_>3@Ym3S*dedSradmc5mlJn`i0vMW6 zhBnGQD^Z;&S0lnS0curqDO@({J7kTtRE+Ra?nl^HP9<)W&C>~`!258f$XDbyQOQXG zP8hhySnarOpgu8xv8@WlXnm(Uk~)_3$Sg0vTbU3 z{W!5B(L3{Yy3K5PN<@jEarAtja`}@KYva&zFRF*s+_%jIXh$T(S=an8?=Ry3H*NRqWgsM`&!#|@kf1>=4q%bFw7^Rhz!z5I zyI^zU8_R1WN9`88Z=n>pIZQ`Ixr~_9G%Q}@A7rd#*%y7G zXl^Id=^ZL?Rx}}gWXCqzj9C6;x(~mAH|$JteXa1MH<6UQig@!Hf~t}B%tP0I|H&;y zO6N0}svOa1a^PyP9N5?4W6VF%=Bj{qHUgc8@siw4bafT=UPFSoQqKgyUX>sXTBZ=x zOh^Ad!{kOM9v{%5y}`-8u*T&C7Vq6mD%GR}UeU(*epO&qgC-CkD;%=l)ZuinSzHM` z{@`j&_vC6dDe{Yb9k@1zeV_K6!l(@=6ucoI=R^cH=6{i71%4W3$J-?<8Qn#$-DMtA z6Qqi)t?4ifrt%3jSA#6ji#{f(($KBL-iQh-xrC||3U3lq`9>r)>X%oLvtimuHW-)} zy}>9~|M>w4eES`g7;iBM%Se5-OP%1U6gNWp3AZqT8C6OlFFfQ$|7LL;tBV)(qlp4K zruar^K8FnJN3@_}B;G`a~H`t|3+6d>q3#`ctTkE-D^1#d9NalQ04lH*qUW2!V zhk7#z8OwHhSl8w14;KctfO8ubZJ4$dEdpXE78wABz=n5*=q9ex3S}`e7x~~V-jmHOhtX2*n+pBslo3uosdE7xABK=V#-t{1Hd~?i z{i~%Bw6NYF+F$aK$M`r#xe=NxhA5=p%i7!$);sd>Q}#`G?Q~fygrMXmZw?0#5#17W}6Tj+&kFexG{!mYl5FoA99}3G9l;3lVQ^ z48^~gsVppE*x91WheqI(A%F0Z#$#1UJP1R12Mj9r)y(A?a+iquX+d8WD4WAQJ_!oq z9rTISr7bPd(GTP57xm$}C}&kjMivi;zi^Y9g3&X0A;ovdJ?{%_wHgt%%9P&N4H z^XzV(uNA4 zAP`hgP6BEN5`YXh|DF~6Pud?~gWfhUKoPX4>z|}0aocC&K+AoV%|SX*N!wGq3|y< zg4lP(04XIPmt6}$N!dTk+pZv>u;MTB{L4hp9uXk7>aS!6jqM2lVr%{)H3$O127TSZ z0x9hi0k-P?nWFdQ0K`pykqUIT&jD~B0tHP{ffS(}fZ(aW$oBWTSfHO!A^><6vA?qar%tzN-5NQO zL&|F{nGiQyzNJ+bM$Y`n=Lx^3wTG^o2bGB@cwr1eb+6c-1tN=U+Db;bc~eJ!hwM{SbI=#g?$!PjDB+) zPgU_2EIxocr*EOJG52-~!gml&|D|C2OQ3Y(zAhL}iae4-Ut0F*!z!VEdfw8#`LAi# zhJ_EM*~;S|FMV6y%-SduHjPOI3cFM(GpH|HES<}*=vqY+64%dJYc|k?n6Br7)D#~# zEqO(xepfaf2F{>{E2`xb=AO%A<7RtUq6kU_Iu0m?@0K(+<}u3gVw5fy=Y4CC*{IE3 zLP3YBJ7x+U(os5=&NT%gKi23bbaZ`@;%ln)wp4GpDUT$J8NtFDHJzIe_-t}{!HAsh zJ4<^WovY};)9IKAskSebdQiXv$y5}THuJZ}ouoElIZRui=6lrupV|_Jz=9^&;@HwL;J#@23k?A;k`0Bgf;ioO>W`IQ+4? z7A)eKoY4%+g%=w;=Vm8}H>@U*=*AWNtPqgWRqib#5RTGA@Q=43FrQn3J`GkTUV5yp0U`EOTqjfp+-9;0F8!dMEwwcK%(6`8sDD^aR04 zd6O5vh|Xk?&3dy4f|1QK&Ulf{h6Iq;d-&*ti#Ck>wZFG;GHwc?b;X~eBITx49>2d8 z4HcK&1&DvEGT6kXdzAm4oO8%c}8OBt~8H956_;YP-ss*uMf==a+%w~F>Qkm7r)IAuxuoX}h92$gHqbFUun#8m zWHdy`Zrm#=Pa98x8cO0vd@Tgkr*lm0{dky+Gocr0P8y%HGEI#c3qLqIRc`Oq_C%*; zG+QTr(#Q|yHKv6R@!DmLlwJQ3FAB)Yor-I4zyDyqM4yp5n2TrQH>gRt*Zw0+WI-Sj`EgmYHh=t9! zF6lz^xpqGGpo6!5`sc0a^FVhy_Uxq|@~(1@IIzV)nTpY9sY`CV!?8e&bB8=M&sYEb z2i}fvKdhp9Hs68Y-!QJ<=wE(iQ5+49tqt;Rh|jhYrI5VW-mIz|UY{h8E=rC5sh#DU z?wGgk-Tn!I?+Zer7pHlF_Z^!Kd1qkS3&lv#%s6-<5Y%jQL${cge5=G5Ab?D&|9$Y~ zf%rJC2+=2vg;y0-SJb3<@3%}BO$T$C66q$L_H33a`VUbgW~N(4B=v5(<=My|#|J7q z*Ox4wL4kbJd_~EjLTABSu4U7Jk#`y(6O*U6(k6XxM}CtGZB(H@3~kh*zaGRXM}Iwp zQ%xFk2>@wiZrVCV_G4G~v;NebCQ%T7{SDyPpSv&dT@Cn)Mx@IK*IdNrj{*4pkV4wv z)y0J538h>cpB7iPSzA~x24T`{dzNkpvGIqvt1Dvdq@o-`B=$hkczX8$yFMhsWNK-X zxr$kR$tMD0@W)Vxe1^t9qVmsg&K^F@u84)(n2dttIEAZFN6VD$&tskpG%SI7whGL3 z)DeRiwe&?8m7U{G`oW8!SCi*dM>oYL%UKQnKxV_0RXAEBQg1kStExGEUVwLJ0orGGwb7uv+kPDl7_E2*iD|J*=8A@;XCvwq0aw5oJYN*Yh&o=l} z2z8YKb-fIAH5spql4eXqp*)o2*b>#1@DSt?zZi{GPj0gH&Nm+EI<3^z0w%YTEV4xw zI6$+=Faa|Y4o5i0zm5lOg|&tmnJ806DBovU@Ll6XsA;NRrTK~t*AAJIAS=v-UZ%Pr z$oddI@NRir&erzCwq|)ciJemr-E061j{0Vc@Ys7K(mW|JYj*$+i1Q8XlIK8T?TYS(AXu$`2U zQ@fHxc=AVHl_}cRZQ)w0anMEoqRKKIvS^`<-aMf*FM`NsG&Uowneo+Ji$7DUDYc7*Hjg;-&aHM%3 zXO6cz$$G};Uqh+iY7Wpme>PHG4cu(q;xyskNLs$^uRRMfEg?8Cj~aE-ajM%CXkx0F z>C?g3tIA#9sBQOpe`J+04{q7^TqhFk^F1jFtk4JDRO*`d-fx`GYHb=&(JiaM1b?Y^ zO3Kj3sj76ieol|N$;>j@t#tKj=@*gP+mv}KwlTcPYgR$+)2(gk)2JNE=jSauPq!$< z<|?Sb%W)wS)b>b6i{8!x!^!xIdU3{CJFVnTcw0j{M%DUCF=_>eYYEUWnA-|B(+KYL z_W_`JI&&u^@t0})@DH^1LDuT0s3dMpCHIbYBgOT4Zh_4yHbSqRbtIKndeT4Q*Jg91 z@>rO!^t-G~*AIW;FQ$3J=b;oGg8?CTa~qNCb>&cgp@e;?0AqA&paz~(%PYO+QBo4( zp?}ZdSMWx0iJm7HVNk9A#^9Osa#GPJ!_pYEW}($8>&2}fbr@&ygZ?${A7_9?X$(&5 z#~-hxdPQwCNEpf=^+WH-3`2LxrrBMTa}~qJC9S;VzhG!On^JLyW6WkF{8aAE$sM+( zxr8xLW(KIjI`Rm(24r3OJBk<3GF=G!uSP0-G&AY32mLm8q=#Xom&Pqv=1C{d3>1^ zAjsmV@XZ%BKq^eUfBpa8KvO8ob|F3hAjJv*yo2Bhl0)KUus{qA9m8jf)KnOGGTa6~4>3@J_VzkL|vYPl*uL+Ot*Q7W!f5rJw5+AsjP_IfL+-S*2p| zB7!FhjvkUTxQkGWGSg{X;h~dK>gAJivW?88Nu!3o>ySDaABn$rAYt086#27fbjPQS zhq>55ASvm*60qRdVOY9=bU^+{Pi#!OaZwENN;zy5?EztOHK-Q5;rCuiFl}BSc1YaQ zC-S{=KsGDz@Ji9O5W;XxE0xI|@3o6(2~i4b8Ii9VT;^G$*dRw(V?=br)D&q^XkeBX z+gl~+R@rVD-Hwv@7RHV?Bip5KMI)aV^&snt?H<$Nt=OPx#VxF&BGi?2A2+lNOYywNUGMeGL;|(=UjGDtLG0sN&LpGx;|U;xa13s z;W_|SPk^G}!M9_^pO zA3bt3-tca%^42sHeDtfcC0S3w3H1ny!Bxpa=*k?XRPpx9Bb-gx1J9Yvx)4J(8cG+q z(iCPZ9dsf3#QVyZgD_MW#G#qgV)olu$59&3(PzQfw@%4uZ~<5J=ABvdY43(Qnp{;G zHg3>@T#>DbTuhFl3)fb3TFqdh)V2aq7!;&JOHseTWukvA7}(iGUq;v-{2J0iHSNHq z;+)h!p6Ok^+Sp8-jgL($n6Qu47xyE`cFO5SdZR6;R!FET`tm#0D37z339Suxjpv+s z*=%2-N$N?X&0?x_uut3erF@aBGj;9$k9?3FlbDO{RQa1_qtxrh4!4#fjp4x~akvdTp@ zos?^Q&XE;3N93s4rHQGPrV7+au1$$aB6$hLy*Yz_kN$~dweb9PcB!eYVQTGjFuJP> zZCEwBtb>TIgIO^qAzq@Bv-qud_ZD-2W<_at&ml-gv`tPt$@DF5`HlA zM>DmmMkpv&Zm-8)Y#0bLQf4MpD4_-7M8eu6rh(tL8dq8onHs#R9J~dGd2IaXXMC~h z91pKhnQa%Fsn29nAA1;x(%oC zhca~qQDJaMf?wFrl-Pj;e$bZMYmMF!Y3Lv&Sb?Sjn#!NVx&NDyc^$b4uYyo2OmERa zRz;yDGd@JTykzFLe|Wk-y7#3x`6$wt$zR8r48mdUvfbeL+4D|Z``~7$PrE@qc7rZe zVsIoIbCwzjLZ@_M1*bD{HaYn();Z1-q*-I{tEnTZ(}Zmk&%MXSNBX>o| z-u*RNkAyKC-Srp7c-=@5f)xMWg>o2WWl}j6j9=8+D8;T z>0*0q#;qw8%U8i;6s0fu#I*%(g*@@a2Er@@nyI}{=@W{Z-;`=wN4N~>6Xrh&z#g}l zN1g5}0-#(nHUTv_rl2{yUZ;h#t&Fd?tY!7L%ClY)>uH-Ny2ET$lW$S)IQiN79H)D^ zb&0AXYkupy0~w8)*>Sj_p9}4L?lGTq%VG|2p`nWGhnM^!g|j-|O{%9Q%swOq63|*W zw$(N_laI}`ilB+o!a-wl?er~;;3+)$_akSQ!8YO_&-e*SI7n^(QQ;X0ZE`{4f!gAl z5$d+9CKVNonM!NO_frREICIAxOv)wm>}-k?iRisM`R7;=lyo|E_YR~FpS&PS`Lg0f zl-ON<0S%Uix8J%#yZdkCz4YNhcec<|7*P(JsM#>-L>+tYg_71q9~70FAc^6KW5jql zw!crdgVLH1G_eET=|SEc977;)ezVC|{PJZfra|}@rD;0s&@61mTEBJtILllg{%{vN zfhb&lq0yChaLhnJ-Qb62MB7`>M;|_ceHKZAeeh@#8tbrK!ArP6oXIhMK;dhEJTY`@ z0Tq>MIe0`7tGv)N*F0IGYSJv0vN?Az8g+4K9S!pW2~9F4W(_U_T=jCZrzuZ3*|__T zONp_UWmyePv8C~rckc?Xji;Z5OEqg zC*Um)i;Wh4TEwqReQdVVbUKT^2>Tpi6z_^-uF*adUFug4i@JhzpWT^Sk&E>CyP2?H zWf6x}ehuTs6wvzCnTU&gYzT029Nz19(In1WC z`(1IGmi!O%2AR|BjQa4Q0~u)kM%}?xQyjWuQ16^Gp++;`vr7!k--UZWM*~7Zl|ceO@I3`OpaRhD;YoCuo5IC0uHx>9 z478hu@H|e0Zlo)Zj@01#;8BDs@991xe~^9uG2}UXLM(m7fa}AMwX*tjioBeV&Q8Gx zSq$6wZFkRBK`cMI>R(@W@+lo2t)L+4q-negWRLWZBz*|%=W4v62JrmzNuOtA*x)QE z5L%=OH#@KMdB%Jp^r?0tE}5-*6oP`-lO7Sf)0)n*e<{HA=&qhLR)oD8-+V}Z4=md) z+k9lKf64DB2hAT)UaCP~di?-V3~JBH7itYyk~L6hrnxM%?RKntqd`=!b|e7eFnAcu z3*V;g{xr7TSTm$}DY%~SMpl>m{Sj!We+WfxSEor?YeiAxYUy25pn(?T()E>ByP^c@ zipwvWrhIK((R((VU+;@LmOnDu)ZXB3YArzzin!Z^0;PyJWnlfflo|q8(QY;o1*5CO z##hnkO{uynTMdk`~DOC#1 zdiYxQoy}=@7(ke#A8$YZZVtk4wo$8x28&I;cY3Ro-|kW=*yiiHgCLZeAr)UtVx>Tu z|LvL0hq|1-jC0I4x#>&QZCfrVB=zT!nR|~Uz`9%~2 znl{uZ{VEszW`Fad^q_HB!K9*|U-stK%?~;g?&&+12A}Rq$z($Bzuk^2X(Y=hF?-dQ ztc3DsQKI;qhWIV`99Q#R3xnU0AvY!i*BECj-z9l74|%O=V@nlv|qqC^r^-~C?E zGW%c|uYgnfJ(gjsTm_cIqcv*mYM{+i+&@F@+69ZQOK&u#v4oxUSQJ=tvqQ3W=*m;| z>SkBi8LYb-qRY7Sthh*0%3XAC%$z1rhOJzuX=PkTOa=DlocZUpE#KxVNH5)_4n=T( zGi3YrH7e~sPNYVBd~Grcq#CF~rN{p9Zza-Ntnwfma@TB)=3g36*0lSZg#ixEjFe%+ zX=&LDZ5zqculZ`=RYc^ln(~;nN|Qh6gN=!6f9-N2h+3NWbIxYud&;4SX*tWf5slk4 z{q@@l71UAZgj~*6edXb57fBUxvAS7s(RI=X868JM0+^DCn2yC>;v%S;qPOjB>YVsz(Zx9a>>BK&M zIQK>7_n)4ud0X5YM}^i*keH{ehLsiy9@NvOpsFeQjdI6anLGvVbBw_*fU1TzdVS$i z*4j7z!I5RF#rSz|8ibi$;qE{4`aqWYik7QB5U&F5C*;TO_x+gtzPGpzNt!7~nsBT7)Ckc(K~%uv&{{6A`mmBJVAk-{s~52Vu|HbCH7_W1~ZCX^RflOakGg=jo2Z z<*s;5-J+2@^LRDZ-7EV&Pq+FTErw@pfFqvx^i%E7Fx#^n(E`m2(c>K-O5`M`Yek9el zzTGs5qD6*G;y#~xu3>qWuO?-amKYtvRA}I9z#UspEeM;wOERYeot_n_EUMJf$4_u?E!6X~?q)tPoZb^_;8Y_Ox2h1m<+Le-fsRd|T8db<8#$bqez zua^Z|>h%zdnuU^ww$#-dZ9NTM`FN+!IlLkz*FqWb!x^Z|C{KyGjZ+>G;;7Mb@LY|H zc+Gp`L((Dw7pnDlHNm&;SfHedhx*kad$I^uGz{`0BYelq0yEUHpNKSkvj$|dpvY3{7*YGyhXA^LP0&wOw9oNoC=QoVx1<2Dne8qqZL zm>nFh5DX(-RnQwvHCZQwn^#Z=E!SPVlaRJ78Bo@}!!9dRt^qZy?-*`Pt4WSmgucJv zV1yFkcjlEM^uz-;b#Q7ZCP@Lk)m}uPX={R4B=56k7WNh11BN~0T*vr@!!ow^B0hOR zQ)4)&(e%>bNNL%bm<&8H{*l_L7s0$2GUgX2Vd;=4d9Dm2v3TaL+;L>{K7h7 zV#k?xDPm(NDE31$ z<}|X)pEY6myjK+^gaIMk&Yj2~F0rSKemNqlsVm4c|N7mp_C*L01s;GNx#D-*&gk!qQr}^?_r@q!8fuXw!)fA7xkd} zb>vHvdx~H$5qqAWrow7}+8zBM65-JOt5z za=T6f7MK`XJuQog8kIEboPdhcaVJeHy)5z7EBLK5NRr()E|#K0L0N^JD@pUA^Czb` zbUZ_558y+vqAGeyHCbrvOvLD67Ph}06959VzQ_|>RrXQAqE+AQ(-AaKdxoWaF8hdt z{O3W@b^*o#-f1VuU>YMV03ELF7zkCN4Q&b#prz%3Nne0lSbRo@@ z^ihv%oIl~Qyl6Q;a#$*jOC%x0_;eis*)J7=f@Ct*)xF5 zo}u~@-I}2|$b%5L7>@+Z?4o+1r&v6ceIy+vroK&jCQ<4q&45HP2wCol4hVm3pZtjf zHz1D7oyaSKJ~T{Gx}7ONLA)D5k(%%`WswrDyzX*rn}i}}TB4^y#@mAwPzoC)`?rYv zHgx|trUN#mu*VzUV~8TnJM2Qh*ZM5B{x&y>5An`(M7=Z*Q>TdiH@j*2=moNuOtvpz z+G`@~-`%~+AgPKgke@XiRPgndh@bp*-HRsh;HTtz@-y_uhb%7ylVOTqG0#u?Vn5c5 zEp*XRo|8hcgG^$#{$O9CJ&NE;TrfRpSnLmes&MO{m=N%zc`}gb!eQ7odl$oy1%PI} z#AIxx%oRVy&{O~9xnK4$EY>(eQj}!HKIV$Fz*H=-=Kn)N0D6u`(;iO|VraI4fu_W` z;b5{7;Lyx4za}DU#+U7}=H0dAS#YJJ&g2!P@Htu-AL&w=-)*%P9h2{wR|@?Ff9~)b z^+e_3Hetq7W%ls{!?<6&Y$Z;NNB41pvrv)|MET6AZXFXJeFqbFW5@i5WGzl?bP+~? z*&_puH;wKv2)9T_d+P`bLvJFqX#j&xa*-;0nGBbQf0DC>o~=J_Wmtf*2SZQr?{i~X z9-IbRH8{iy?<0v9Ir1?$66+igy|yDQ5J~A9sFX@Pe<*kCY8+MwH?I z`P}zfQ6l^AO8ehZ=l^ZR;R%uu4;BK*=?W9t|0{+-at(MQZ(CtG=EJFNaFMlKCMXu30(gJUqj5+ z`GM|!keqcj;FKTa_qq;{*dHRXAq157hlB@kL#8%yAm2AgfU|*rDKX@FLlp=HL8ddv zAWLCHe@DcDeB2}fl7#=0+#<05c3=VqM*O3bkr@9X4nO|)q0hU;Gye{L8ZN*NH8Id@mP-u;Fmb8YuorjLrW&ndip8CN%_qp982r w1WEnz9^$&s1hkp_3#lPJQ~!HI7WYYjA7>z!`?f%npAh2%rB@vD|Lau$2O)#1n*aa+ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index daf75f8e132cb..30b8947900f87 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,7 +11,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.3.3-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-7.4.1-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=c9490e938b221daf0094982288e4038deed954a3f12fb54cbf270ddf4e37d879 +distributionSha256Sum=a9a7b7baba105f6557c9dcf9c3c6e8f7e57e6b49889c5f1d133f015d0727e4be From d8a1ba691204976e1b0e4ffc8e62b08a22e63692 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 29 Mar 2022 12:24:37 -0400 Subject: [PATCH 019/653] [CVE-2020-36518] Update jackson-databind to 2.13.2.2 (#2599) Signed-off-by: Andriy Redko --- buildSrc/build.gradle | 2 +- buildSrc/version.properties | 1 + distribution/tools/upgrade-cli/build.gradle | 2 +- .../upgrade-cli/licenses/jackson-databind-2.13.2.2.jar.sha1 | 1 + .../tools/upgrade-cli/licenses/jackson-databind-2.13.2.jar.sha1 | 1 - libs/dissect/build.gradle | 2 +- modules/ingest-geoip/build.gradle | 2 +- .../ingest-geoip/licenses/jackson-databind-2.13.2.2.jar.sha1 | 1 + modules/ingest-geoip/licenses/jackson-databind-2.13.2.jar.sha1 | 1 - plugins/discovery-ec2/build.gradle | 2 +- .../discovery-ec2/licenses/jackson-databind-2.13.2.2.jar.sha1 | 1 + plugins/discovery-ec2/licenses/jackson-databind-2.13.2.jar.sha1 | 1 - plugins/repository-azure/build.gradle | 2 +- .../licenses/jackson-databind-2.13.2.2.jar.sha1 | 1 + .../repository-azure/licenses/jackson-databind-2.13.2.jar.sha1 | 1 - plugins/repository-hdfs/build.gradle | 2 +- .../repository-hdfs/licenses/jackson-databind-2.13.2.2.jar.sha1 | 1 + .../repository-hdfs/licenses/jackson-databind-2.13.2.jar.sha1 | 1 - plugins/repository-s3/build.gradle | 2 +- .../repository-s3/licenses/jackson-databind-2.13.2.2.jar.sha1 | 1 + plugins/repository-s3/licenses/jackson-databind-2.13.2.jar.sha1 | 1 - qa/os/build.gradle | 2 +- qa/wildfly/build.gradle | 2 +- test/fixtures/hdfs-fixture/build.gradle | 2 +- 24 files changed, 18 insertions(+), 17 deletions(-) create mode 100644 distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.2.2.jar.sha1 delete mode 100644 distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.2.jar.sha1 create mode 100644 modules/ingest-geoip/licenses/jackson-databind-2.13.2.2.jar.sha1 delete mode 100644 modules/ingest-geoip/licenses/jackson-databind-2.13.2.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/jackson-databind-2.13.2.2.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/jackson-databind-2.13.2.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-databind-2.13.2.2.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-databind-2.13.2.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/jackson-databind-2.13.2.2.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/jackson-databind-2.13.2.jar.sha1 create mode 100644 plugins/repository-s3/licenses/jackson-databind-2.13.2.2.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/jackson-databind-2.13.2.jar.sha1 diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 1ec66b582aed9..cc7742a0d4390 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -116,7 +116,7 @@ dependencies { api 'com.avast.gradle:gradle-docker-compose-plugin:0.14.12' api 'org.apache.maven:maven-model:3.6.2' api 'com.networknt:json-schema-validator:1.0.68' - api "com.fasterxml.jackson.core:jackson-databind:${props.getProperty('jackson')}" + api "com.fasterxml.jackson.core:jackson-databind:${props.getProperty('jackson_databind')}" testFixturesApi "junit:junit:${props.getProperty('junit')}" testFixturesApi "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${props.getProperty('randomizedrunner')}" diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 34934d63a8975..41d8aa41ac631 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -10,6 +10,7 @@ bundled_jdk = 17.0.2+8 spatial4j = 0.7 jts = 1.15.0 jackson = 2.13.2 +jackson_databind = 2.13.2.2 snakeyaml = 1.26 icu4j = 70.1 supercsv = 2.4.0 diff --git a/distribution/tools/upgrade-cli/build.gradle b/distribution/tools/upgrade-cli/build.gradle index 0e1996f3d68fa..d29c808562168 100644 --- a/distribution/tools/upgrade-cli/build.gradle +++ b/distribution/tools/upgrade-cli/build.gradle @@ -15,7 +15,7 @@ dependencies { compileOnly project(":server") compileOnly project(":libs:opensearch-cli") implementation "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - implementation "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" + implementation "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" implementation "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" testImplementation project(":test:framework") testImplementation 'com.google.jimfs:jimfs:1.2' diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.2.2.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.2.2.jar.sha1 new file mode 100644 index 0000000000000..9d9266300feef --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.2.2.jar.sha1 @@ -0,0 +1 @@ +ffeb635597d093509f33e1e94274d14be610f933 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.2.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.2.jar.sha1 deleted file mode 100644 index 5d356f3fd045f..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -926e48c451166a291f1ce6c6276d9abbefa7c00f \ No newline at end of file diff --git a/libs/dissect/build.gradle b/libs/dissect/build.gradle index 0f0b8407e7e6b..47f7970ea5ac0 100644 --- a/libs/dissect/build.gradle +++ b/libs/dissect/build.gradle @@ -34,7 +34,7 @@ dependencies { } testImplementation "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" testImplementation "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" - testImplementation "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" + testImplementation "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" } tasks.named('forbiddenApisMain').configure { diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index f78dc49e9fb8a..b1d5afbe68a17 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -42,7 +42,7 @@ dependencies { api('com.maxmind.geoip2:geoip2:2.16.1') // geoip2 dependencies: api("com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}") - api("com.fasterxml.jackson.core:jackson-databind:${versions.jackson}") + api("com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}") api('com.maxmind.db:maxmind-db:2.0.0') testImplementation 'org.elasticsearch:geolite2-databases:20191119' diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.13.2.2.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.13.2.2.jar.sha1 new file mode 100644 index 0000000000000..9d9266300feef --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-databind-2.13.2.2.jar.sha1 @@ -0,0 +1 @@ +ffeb635597d093509f33e1e94274d14be610f933 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.13.2.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.13.2.jar.sha1 deleted file mode 100644 index 5d356f3fd045f..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-databind-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -926e48c451166a291f1ce6c6276d9abbefa7c00f \ No newline at end of file diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 7998e0861c7b1..0e096958538a4 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -50,7 +50,7 @@ dependencies { api "commons-logging:commons-logging:${versions.commonslogging}" api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" - api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" + api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" } diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.13.2.2.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.13.2.2.jar.sha1 new file mode 100644 index 0000000000000..9d9266300feef --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-databind-2.13.2.2.jar.sha1 @@ -0,0 +1 @@ +ffeb635597d093509f33e1e94274d14be610f933 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.13.2.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.13.2.jar.sha1 deleted file mode 100644 index 5d356f3fd045f..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-databind-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -926e48c451166a291f1ce6c6276d9abbefa7c00f \ No newline at end of file diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 60fb99f459454..a491e766eb7c7 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -62,7 +62,7 @@ dependencies { api 'io.projectreactor.netty:reactor-netty-http:1.0.16' api "org.slf4j:slf4j-api:${versions.slf4j}" api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" - api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" + api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api "com.fasterxml.jackson.datatype:jackson-datatype-jsr310:${versions.jackson}" api "com.fasterxml.jackson.dataformat:jackson-dataformat-xml:${versions.jackson}" api "com.fasterxml.jackson.module:jackson-module-jaxb-annotations:${versions.jackson}" diff --git a/plugins/repository-azure/licenses/jackson-databind-2.13.2.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.13.2.2.jar.sha1 new file mode 100644 index 0000000000000..9d9266300feef --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-databind-2.13.2.2.jar.sha1 @@ -0,0 +1 @@ +ffeb635597d093509f33e1e94274d14be610f933 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.13.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.13.2.jar.sha1 deleted file mode 100644 index 5d356f3fd045f..0000000000000 --- a/plugins/repository-azure/licenses/jackson-databind-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -926e48c451166a291f1ce6c6276d9abbefa7c00f \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 19f58bf48366d..d17a4060b9ab6 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -64,7 +64,7 @@ dependencies { api 'org.apache.htrace:htrace-core4:4.2.0-incubating' api "org.apache.logging.log4j:log4j-core:${versions.log4j}" api 'org.apache.avro:avro:1.10.2' - api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" + api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api 'com.google.code.gson:gson:2.9.0' runtimeOnly 'com.google.guava:guava:30.1.1-jre' api 'com.google.protobuf:protobuf-java:3.19.3' diff --git a/plugins/repository-hdfs/licenses/jackson-databind-2.13.2.2.jar.sha1 b/plugins/repository-hdfs/licenses/jackson-databind-2.13.2.2.jar.sha1 new file mode 100644 index 0000000000000..9d9266300feef --- /dev/null +++ b/plugins/repository-hdfs/licenses/jackson-databind-2.13.2.2.jar.sha1 @@ -0,0 +1 @@ +ffeb635597d093509f33e1e94274d14be610f933 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/jackson-databind-2.13.2.jar.sha1 b/plugins/repository-hdfs/licenses/jackson-databind-2.13.2.jar.sha1 deleted file mode 100644 index 5d356f3fd045f..0000000000000 --- a/plugins/repository-hdfs/licenses/jackson-databind-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -926e48c451166a291f1ce6c6276d9abbefa7c00f \ No newline at end of file diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index c5939958c816a..072683e3bd5e5 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -58,7 +58,7 @@ dependencies { api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" api "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" + api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" api "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" api "joda-time:joda-time:${versions.joda}" diff --git a/plugins/repository-s3/licenses/jackson-databind-2.13.2.2.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.13.2.2.jar.sha1 new file mode 100644 index 0000000000000..9d9266300feef --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-databind-2.13.2.2.jar.sha1 @@ -0,0 +1 @@ +ffeb635597d093509f33e1e94274d14be610f933 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.13.2.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.13.2.jar.sha1 deleted file mode 100644 index 5d356f3fd045f..0000000000000 --- a/plugins/repository-s3/licenses/jackson-databind-2.13.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -926e48c451166a291f1ce6c6276d9abbefa7c00f \ No newline at end of file diff --git a/qa/os/build.gradle b/qa/os/build.gradle index 038e3d16745c3..92c5e4f154ad8 100644 --- a/qa/os/build.gradle +++ b/qa/os/build.gradle @@ -50,7 +50,7 @@ dependencies { testImplementation "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" testImplementation "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - testImplementation "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" + testImplementation "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" } tasks.named('forbiddenApisTest').configure { diff --git a/qa/wildfly/build.gradle b/qa/wildfly/build.gradle index 7cb08a9de6f08..b7a5089451672 100644 --- a/qa/wildfly/build.gradle +++ b/qa/wildfly/build.gradle @@ -50,7 +50,7 @@ dependencies { } api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" + api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:${versions.jackson}" api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-base:${versions.jackson}" api "com.fasterxml.jackson.module:jackson-module-jaxb-annotations:${versions.jackson}" diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index d1040acd03aa7..c56cc6d196b63 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -41,6 +41,6 @@ dependencies { api 'com.google.code.gson:gson:2.9.0' api "org.bouncycastle:bcpkix-jdk15on:${versions.bouncycastle}" api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:${versions.jackson}" - api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" + api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api 'net.minidev:json-smart:2.4.8' } From bcaa06bc0ffe8d350e80b0da53af5bf2ba9f99d0 Mon Sep 17 00:00:00 2001 From: Suraj Singh <79435743+dreamer-89@users.noreply.github.com> Date: Tue, 29 Mar 2022 09:27:28 -0700 Subject: [PATCH 020/653] Add mapping method back referenced in other repos (#2636) Signed-off-by: Suraj Singh --- .../admin/indices/create/CreateIndexRequest.java | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java index 7f1f516d13a04..26ff4f1da3ba4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java @@ -245,14 +245,22 @@ public CreateIndexRequest mapping(String mapping) { return this; } + /** + * Adds mapping that will be added when the index gets created. + * + * @param source The mapping source + * @param xContentType The content type of the source + */ + public CreateIndexRequest mapping(String source, XContentType xContentType) { + return mapping(new BytesArray(source), xContentType); + } + /** * Adds mapping that will be added when the index gets created. * * @param source The mapping source * @param xContentType the content type of the mapping source - * @deprecated types are being removed */ - @Deprecated private CreateIndexRequest mapping(BytesReference source, XContentType xContentType) { Objects.requireNonNull(xContentType); Map mappingAsMap = XContentHelper.convertToMap(source, false, xContentType).v2(); From fb5cebbb9b47b35944432d5e15210f0327d32111 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 29 Mar 2022 10:46:13 -0700 Subject: [PATCH 021/653] Bump reactor-netty from 1.0.16 to 1.0.17 in /plugins/repository-azure (#2613) * Bump reactor-netty from 1.0.16 to 1.0.17 in /plugins/repository-azure Bumps [reactor-netty](https://github.com/reactor/reactor-netty) from 1.0.16 to 1.0.17. - [Release notes](https://github.com/reactor/reactor-netty/releases) - [Commits](https://github.com/reactor/reactor-netty/compare/v1.0.16...v1.0.17) --- updated-dependencies: - dependency-name: io.projectreactor.netty:reactor-netty dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/repository-azure/build.gradle | 2 +- plugins/repository-azure/licenses/reactor-netty-1.0.16.jar.sha1 | 1 - plugins/repository-azure/licenses/reactor-netty-1.0.17.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-azure/licenses/reactor-netty-1.0.16.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactor-netty-1.0.17.jar.sha1 diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index a491e766eb7c7..628b5f7c58c04 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -57,7 +57,7 @@ dependencies { api 'com.azure:azure-storage-blob:12.14.4' api 'org.reactivestreams:reactive-streams:1.0.3' api 'io.projectreactor:reactor-core:3.4.15' - api 'io.projectreactor.netty:reactor-netty:1.0.16' + api 'io.projectreactor.netty:reactor-netty:1.0.17' api 'io.projectreactor.netty:reactor-netty-core:1.0.16' api 'io.projectreactor.netty:reactor-netty-http:1.0.16' api "org.slf4j:slf4j-api:${versions.slf4j}" diff --git a/plugins/repository-azure/licenses/reactor-netty-1.0.16.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-1.0.16.jar.sha1 deleted file mode 100644 index 582380e449a1d..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-1.0.16.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d90829f6127966b0c35c4a3e8e23ca9ed29cd8a5 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-1.0.17.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-1.0.17.jar.sha1 new file mode 100644 index 0000000000000..a1f6aa3686692 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-1.0.17.jar.sha1 @@ -0,0 +1 @@ +7720beb4f58a4379e6294d62766d2e9e1bfaf646 \ No newline at end of file From ec4fe7066b80b684c719630bf9609bb4bedd5a90 Mon Sep 17 00:00:00 2001 From: Mohit Godwani <81609427+mgodwan@users.noreply.github.com> Date: Tue, 29 Mar 2022 23:30:12 +0530 Subject: [PATCH 022/653] Make discovered_master field optional on the client to support compatibility for opensearch client with odfe (#2641) Signed-off-by: Mohit Godwani --- .../cluster/health/ClusterHealthResponse.java | 4 ++-- .../health/ClusterHealthResponsesTests.java | 23 ++++++++++++++++++- 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java index d9094e307fff1..841231c971eaa 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java @@ -90,7 +90,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo // ClusterStateHealth fields int numberOfNodes = (int) parsedObjects[i++]; int numberOfDataNodes = (int) parsedObjects[i++]; - boolean hasDiscoveredMaster = (boolean) parsedObjects[i++]; + boolean hasDiscoveredMaster = Boolean.TRUE.equals(parsedObjects[i++]); int activeShards = (int) parsedObjects[i++]; int relocatingShards = (int) parsedObjects[i++]; int activePrimaryShards = (int) parsedObjects[i++]; @@ -151,7 +151,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo // ClusterStateHealth fields PARSER.declareInt(constructorArg(), new ParseField(NUMBER_OF_NODES)); PARSER.declareInt(constructorArg(), new ParseField(NUMBER_OF_DATA_NODES)); - PARSER.declareBoolean(constructorArg(), new ParseField(DISCOVERED_MASTER)); + PARSER.declareBoolean(optionalConstructorArg(), new ParseField(DISCOVERED_MASTER)); PARSER.declareInt(constructorArg(), new ParseField(ACTIVE_SHARDS)); PARSER.declareInt(constructorArg(), new ParseField(RELOCATING_SHARDS)); PARSER.declareInt(constructorArg(), new ParseField(ACTIVE_PRIMARY_SHARDS)); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java index decad9d6f840e..5af15396dbefa 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java @@ -228,7 +228,7 @@ public void testParseFromXContentWithDiscoveredMasterField() throws IOException NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, "{\"cluster_name\":\"535799904437:7-1-3-node\",\"status\":\"green\"," - + "\"timed_out\":false,\"number_of_nodes\":6,\"number_of_data_nodes\":3,\"discovered_master\":false," + + "\"timed_out\":false,\"number_of_nodes\":6,\"number_of_data_nodes\":3,\"discovered_master\":true," + "\"active_primary_shards\":4,\"active_shards\":5,\"relocating_shards\":0,\"initializing_shards\":0," + "\"unassigned_shards\":0,\"delayed_unassigned_shards\":0,\"number_of_pending_tasks\":0," + "\"number_of_in_flight_fetch\":0,\"task_max_waiting_in_queue_millis\":0," @@ -236,6 +236,27 @@ public void testParseFromXContentWithDiscoveredMasterField() throws IOException ) ) { + ClusterHealthResponse clusterHealth = ClusterHealthResponse.fromXContent(parser); + assertNotNull(clusterHealth); + assertThat(clusterHealth.getClusterName(), Matchers.equalTo("535799904437:7-1-3-node")); + assertThat(clusterHealth.getNumberOfNodes(), Matchers.equalTo(6)); + assertThat(clusterHealth.hasDiscoveredMaster(), Matchers.equalTo(true)); + } + } + + public void testParseFromXContentWithoutDiscoveredMasterField() throws IOException { + try ( + XContentParser parser = JsonXContent.jsonXContent.createParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + "{\"cluster_name\":\"535799904437:7-1-3-node\",\"status\":\"green\"," + + "\"timed_out\":false,\"number_of_nodes\":6,\"number_of_data_nodes\":3," + + "\"active_primary_shards\":4,\"active_shards\":5,\"relocating_shards\":0,\"initializing_shards\":0," + + "\"unassigned_shards\":0,\"delayed_unassigned_shards\":0,\"number_of_pending_tasks\":0," + + "\"number_of_in_flight_fetch\":0,\"task_max_waiting_in_queue_millis\":0," + + "\"active_shards_percent_as_number\":100}" + ) + ) { ClusterHealthResponse clusterHealth = ClusterHealthResponse.fromXContent(parser); assertNotNull(clusterHealth); assertThat(clusterHealth.getClusterName(), Matchers.equalTo("535799904437:7-1-3-node")); From 8ea246e70ee53b0bbd668435e63b7c8061576bce Mon Sep 17 00:00:00 2001 From: Owais Kazi Date: Tue, 29 Mar 2022 18:26:04 -0700 Subject: [PATCH 023/653] Changed JAVA_HOME to jdk-17 (#2656) Signed-off-by: Owais Kazi --- jenkins/jenkinsfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/jenkins/jenkinsfile b/jenkins/jenkinsfile index 113cb27c4a610..96973fceea765 100644 --- a/jenkins/jenkinsfile +++ b/jenkins/jenkinsfile @@ -3,7 +3,7 @@ pipeline { docker { label 'AL2-X64' /* See - https://github.com/opensearch-project/opensearch-build/blob/main/docker/ci/dockerfiles/build.ubuntu18.opensearch.x64.dockerfile + https://hub.docker.com/layers/ci-runner/opensearchstaging/ci-runner/ci-runner-ubuntu1804-build-v1/images/sha256-2c7bb2780bc08cd4e7e3c382ac53db414754dabd52f9b70e1c7e344dfb9a0e5e?context=explore for docker image */ image 'opensearchstaging/ci-runner:ci-runner-ubuntu1804-build-v1' @@ -16,7 +16,7 @@ pipeline { JAVA14_HOME="/opt/java/openjdk-14" JAVA17_HOME="/opt/java/openjdk-17" JAVA8_HOME="/opt/java/openjdk-8" - JAVA_HOME="/opt/java/openjdk-14" + JAVA_HOME="/opt/java/openjdk-17" } stages { From 65cc56e754e4a854963663ead5d06ea6e975d1eb Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Tue, 29 Mar 2022 20:09:32 -0700 Subject: [PATCH 024/653] Gradle check retry (#2638) * Add retry plugin support for Test implementations Signed-off-by: Kunal Kotwani * Update test retry parameters Signed-off-by: Kunal Kotwani * Remove CI environment check for test retries Signed-off-by: Kunal Kotwani * Update retry count for tests Signed-off-by: Kunal Kotwani --- build.gradle | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/build.gradle b/build.gradle index be5766f327e0d..bfa435cb4812c 100644 --- a/build.gradle +++ b/build.gradle @@ -49,6 +49,7 @@ plugins { id 'opensearch.docker-support' id 'opensearch.global-build-info' id "com.diffplug.spotless" version "6.3.0" apply false + id "org.gradle.test-retry" version "1.3.1" apply false } apply from: 'gradle/build-complete.gradle' @@ -232,7 +233,7 @@ allprojects { tasks.withType(JavaCompile).configureEach { JavaCompile compile -> // See please https://bugs.openjdk.java.net/browse/JDK-8209058 if (BuildParams.runtimeJavaVersion > JavaVersion.VERSION_11) { - compile.options.compilerArgs << '-Werror' + compile.options.compilerArgs << '-Werror' } compile.options.compilerArgs << '-Xlint:auxiliaryclass' compile.options.compilerArgs << '-Xlint:cast' @@ -386,6 +387,18 @@ gradle.projectsEvaluated { } } +// test retry configuration +subprojects { + apply plugin: "org.gradle.test-retry" + tasks.withType(Test).configureEach { + retry { + failOnPassedAfterRetry = false + maxRetries = 3 + maxFailures = 10 + } + } +} + // eclipse configuration allprojects { apply plugin: 'eclipse' @@ -445,9 +458,9 @@ allprojects { tasks.named('eclipse') { dependsOn 'cleanEclipse', 'copyEclipseSettings' } afterEvaluate { - tasks.findByName("eclipseJdt")?.configure { - dependsOn 'copyEclipseSettings' - } + tasks.findByName("eclipseJdt")?.configure { + dependsOn 'copyEclipseSettings' + } } } From b5b0cd1b3aaf2b20dc3f357cae10600c04f8d4bf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Mar 2022 13:52:54 -0400 Subject: [PATCH 025/653] Bump jboss-annotations-api_1.2_spec in /qa/wildfly (#2615) Bumps [jboss-annotations-api_1.2_spec](https://github.com/jboss/jboss-annotations-api_spec) from 1.0.0.Final to 1.0.2.Final. - [Release notes](https://github.com/jboss/jboss-annotations-api_spec/releases) - [Commits](https://github.com/jboss/jboss-annotations-api_spec/compare/jboss-annotations-api_1.1_spec-1.0.0.Final...jboss-annotations-api_1.2_spec-1.0.2.Final) --- updated-dependencies: - dependency-name: org.jboss.spec.javax.annotation:jboss-annotations-api_1.2_spec dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- qa/wildfly/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qa/wildfly/build.gradle b/qa/wildfly/build.gradle index b7a5089451672..0e1c566bd2b52 100644 --- a/qa/wildfly/build.gradle +++ b/qa/wildfly/build.gradle @@ -40,7 +40,7 @@ testFixtures.useFixture() dependencies { providedCompile 'javax.enterprise:cdi-api:1.2' - providedCompile 'org.jboss.spec.javax.annotation:jboss-annotations-api_1.2_spec:1.0.0.Final' + providedCompile 'org.jboss.spec.javax.annotation:jboss-annotations-api_1.2_spec:1.0.2.Final' providedCompile 'org.jboss.spec.javax.ws.rs:jboss-jaxrs-api_2.0_spec:1.0.0.Final' api('org.jboss.resteasy:resteasy-jackson2-provider:3.0.19.Final') { exclude module: 'jackson-annotations' From e051a426d2d0b7e986bd81feb82ec7cb1baf674f Mon Sep 17 00:00:00 2001 From: Suraj Singh <79435743+dreamer-89@users.noreply.github.com> Date: Wed, 30 Mar 2022 21:33:17 -0700 Subject: [PATCH 026/653] Add 1.3.2 to main causing gradle check failures (#2679) Signed-off-by: Suraj Singh --- .ci/bwcVersions | 1 + server/src/main/java/org/opensearch/Version.java | 1 + 2 files changed, 2 insertions(+) diff --git a/.ci/bwcVersions b/.ci/bwcVersions index ddc36af48d674..de840b910ada2 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -38,4 +38,5 @@ BWC_VERSION: - "1.2.5" - "1.3.0" - "1.3.1" + - "1.3.2" - "1.4.0" diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index f74e529c442bb..eb6a80d37d83d 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -80,6 +80,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_1_2_5 = new Version(1020599, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_1_3_0 = new Version(1030099, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_1_3_1 = new Version(1030199, org.apache.lucene.util.Version.LUCENE_8_10_1); + public static final Version V_1_3_2 = new Version(1030299, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_1_4_0 = new Version(1040099, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_2_0_0 = new Version(2000099, org.apache.lucene.util.Version.LUCENE_9_1_0); public static final Version CURRENT = V_2_0_0; From ff7805e6cadcd6879abb9f273763c9825aa058ac Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 31 Mar 2022 17:41:12 -0400 Subject: [PATCH 027/653] Update to Gradle 7.4.2 (#2688) Signed-off-by: Andriy Redko --- gradle/wrapper/gradle-wrapper.jar | Bin 59821 -> 59536 bytes gradle/wrapper/gradle-wrapper.properties | 4 ++-- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index 41d9927a4d4fb3f96a785543079b8df6723c946b..7454180f2ae8848c63b8b4dea2cb829da983f2fa 100644 GIT binary patch delta 8722 zcmY*;Wn2_c*XJ;R(j_4+E#1=H-QC^YIm8gsFf@+D&?(ZAlF}t5odeR+{krb6yU*TF z|2X&D{M`@d*32TNOe20l5=0ho#^2I~pbD~q^aFzN{Rm#3zYeiL5N6aRiR|+XoxRvM znZSLLlAJDh@2J2?#n2A?qar%tzN-5NQO zL&|F{nGiQyzNJ+bM$Y`n=Lx^3wTG^o2bGB@cwr1eb+6c-1tN=U+Db;bc~eJ!hwM{SbI=#g?$!PjDB+) zPgU_2EIxocr*EOJG52-~!gml&|D|C2OQ3Y(zAhL}iae4-Ut0F*!z!VEdfw8#`LAi# zhJ_EM*~;S|FMV6y%-SduHjPOI3cFM(GpH|HES<}*=vqY+64%dJYc|k?n6Br7)D#~# zEqO(xepfaf2F{>{E2`xb=AO%A<7RtUq6kU_Iu0m?@0K(+<}u3gVw5fy=Y4CC*{IE3 zLP3YBJ7x+U(os5=&NT%gKi23bbaZ`@;%ln)wp4GpDUT$J8NtFDHJzIe_-t}{!HAsh zJ4<^WovY};)9IKAskSebdQiXv$y5}THuJZ}ouoElIZRui=6lrupV|_Jz=9^&;@HwL;J#@23k?A;k`0Bgf;ioO>W`IQ+4? z7A)eKoY4%+g%=w;=Vm8}H>@U*=*AWNtPqgWRqib#5RTGA@Q=43FrQn3J`GkTUV5yp0U`EOTqjfp+-9;0F8!dMEwwcK%(6`8sDD^aR04 zd6O5vh|Xk?&3dy4f|1QK&Ulf{h6Iq;d-&*ti#Ck>wZFG;GHwc?b;X~eBITx49>2d8 z4HcK&1&DvEGT6kXdzAm4oO8%c}8OBt~8H956_;YP-ss*uMf==a+%w~F>Qkm7r)IAuxuoX}h92$gHqbFUun#8m zWHdy`Zrm#=Pa98x8cO0vd@Tgkr*lm0{dky+Gocr0P8y%HGEI#c3qLqIRc`Oq_C%*; zG+QTr(#Q|yHKv6R@!DmLlwJQ3FAB)Yor-I4zyDyqM4yp5n2TrQH>gRt*Zw0+WI-Sj`EgmYHh=t9! zF6lz^xpqGGpo6!5`sc0a^FVhy_Uxq|@~(1@IIzV)nTpY9sY`CV!?8e&bB8=M&sYEb z2i}fvKdhp9Hs68Y-!QJ<=wE(iQ5+49tqt;Rh|jhYrI5VW-mIz|UY{h8E=rC5sh#DU z?wGgk-Tn!I?+Zer7pHlF_Z^!Kd1qkS3&lv#%s6-<5Y%jQL${cge5=G5Ab?D&|9$Y~ zf%rJC2+=2vg;y0-SJb3<@3%}BO$T$C66q$L_H33a`VUbgW~N(4B=v5(<=My|#|J7q z*Ox4wL4kbJd_~EjLTABSu4U7Jk#`y(6O*U6(k6XxM}CtGZB(H@3~kh*zaGRXM}Iwp zQ%xFk2>@wiZrVCV_G4G~v;NebCQ%T7{SDyPpSv&dT@Cn)Mx@IK*IdNrj{*4pkV4wv z)y0J538h>cpB7iPSzA~x24T`{dzNkpvGIqvt1Dvdq@o-`B=$hkczX8$yFMhsWNK-X zxr$kR$tMD0@W)Vxe1^t9qVmsg&K^F@u84)(n2dttIEAZFN6VD$&tskpG%SI7whGL3 z)DeRiwe&?8m7U{G`oW8!SCi*dM>oYL%UKQnKxV_0RXAEBQg1kStExGEUVwLJ0orGGwb7uv+kPDl7_E2*iD|J*=8A@;XCvwq0aw5oJYN*Yh&o=l} z2z8YKb-fIAH5spql4eXqp*)o2*b>#1@DSt?zZi{GPj0gH&Nm+EI<3^z0w%YTEV4xw zI6$+=Faa|Y4o5i0zm5lOg|&tmnJ806DBovU@Ll6XsA;NRrTK~t*AAJIAS=v-UZ%Pr z$oddI@NRir&erzCwq|)ciJemr-E061j{0Vc@Ys7K(mW|JYj*$+i1Q8XlIK8T?TYS(AXu$`2U zQ@fHxc=AVHl_}cRZQ)w0anMEoqRKKIvS^`<-aMf*FM`NsG&Uowneo+Ji$7DUDYc7*Hjg;-&aHM%3 zXO6cz$$G};Uqh+iY7Wpme>PHG4cu(q;xyskNLs$^uRRMfEg?8Cj~aE-ajM%CXkx0F z>C?g3tIA#9sBQOpe`J+04{q7^TqhFk^F1jFtk4JDRO*`d-fx`GYHb=&(JiaM1b?Y^ zO3Kj3sj76ieol|N$;>j@t#tKj=@*gP+mv}KwlTcPYgR$+)2(gk)2JNE=jSauPq!$< z<|?Sb%W)wS)b>b6i{8!x!^!xIdU3{CJFVnTcw0j{M%DUCF=_>eYYEUWnA-|B(+KYL z_W_`JI&&u^@t0})@DH^1LDuT0s3dMpCHIbYBgOT4Zh_4yHbSqRbtIKndeT4Q*Jg91 z@>rO!^t-G~*AIW;FQ$3J=b;oGg8?CTa~qNCb>&cgp@e;?0AqA&paz~(%PYO+QBo4( zp?}ZdSMWx0iJm7HVNk9A#^9Osa#GPJ!_pYEW}($8>&2}fbr@&ygZ?${A7_9?X$(&5 z#~-hxdPQwCNEpf=^+WH-3`2LxrrBMTa}~qJC9S;VzhG!On^JLyW6WkF{8aAE$sM+( zxr8xLW(KIjI`Rm(24r3OJBk<3GF=G!uSP0-G&AY32mLm8q=#Xom&Pqv=1C{d3>1^ zAjsmV@XZ%BKq^eUfBpa8KvO8ob|F3hAjJv*yo2Bhl0)KUus{qA9m8jf)KnOGGTa6~4>3@J_VzkL|vYPl*uL+Ot*Q7W!f5rJw5+AsjP_IfL+-S*2p| zB7!FhjvkUTxQkGWGSg{X;h~dK>gAJivW?88Nu!3o>ySDaABn$rAYt086#27fbjPQS zhq>55ASvm*60qRdVOY9=bU^+{Pi#!OaZwENN;zy5?EztOHK-Q5;rCuiFl}BSc1YaQ zC-S{=KsGDz@Ji9O5W;XxE0xI|@3o6(2~i4b8Ii9VT;^G$*dRw(V?=br)D&q^XkeBX z+gl~+R@rVD-Hwv@7RHV?Bip5KMI)aV^&snt?H<$Nt=OPx#VxF&BGi?2A2+lNOYywNUGMeGL;|(=UjGDtLG0sN&LpGx;|U;xa13s z;W_|SPk^G}!M9_^pO zA3bt3-tca%^42sHeDtfcC0S3w3H1ny!Bxpa=*k?XRPpx9Bb-gx1J9Yvx)4J(8cG+q z(iCPZ9dsf3#QVyZgD_MW#G#qgV)olu$59&3(PzQfw@%4uZ~<5J=ABvdY43(Qnp{;G zHg3>@T#>DbTuhFl3)fb3TFqdh)V2aq7!;&JOHseTWukvA7}(iGUq;v-{2J0iHSNHq z;+)h!p6Ok^+Sp8-jgL($n6Qu47xyE`cFO5SdZR6;R!FET`tm#0D37z339Suxjpv+s z*=%2-N$N?X&0?x_uut3erF@aBGj;9$k9?3FlbDO{RQa1_qtxrh4!4#fjp4x~akvdTp@ zos?^Q&XE;3N93s4rHQGPrV7+au1$$aB6$hLy*Yz_kN$~dweb9PcB!eYVQTGjFuJP> zZCEwBtb>TIgIO^qAzq@Bv-qud_ZD-2W<_at&ml-gv`tPt$@DF5`HlA zM>DmmMkpv&Zm-8)Y#0bLQf4MpD4_-7M8eu6rh(tL8dq8onHs#R9J~dGd2IaXXMC~h z91pKhnQa%Fsn29nAA1;x(%oC zhca~qQDJaMf?wFrl-Pj;e$bZMYmMF!Y3Lv&Sb?Sjn#!NVx&NDyc^$b4uYyo2OmERa zRz;yDGd@JTykzFLe|Wk-y7#3x`6$wt$zR8r48mdUvfbeL+4D|Z``~7$PrE@qc7rZe zVsIoIbCwzjLZ@_M1*bD{HaYn();Z1-q*-I{tEnTZ(}Zmk&%MXSNBX>o| z-u*RNkAyKC-Srp7c-=@5f)xMWg>o2WWl}j6j9=8+D8;T z>0*0q#;qw8%U8i;6s0fu#I*%(g*@@a2Er@@nyI}{=@W{Z-;`=wN4N~>6Xrh&z#g}l zN1g5}0-#(nHUTv_rl2{yUZ;h#t&Fd?tY!7L%ClY)>uH-Ny2ET$lW$S)IQiN79H)D^ zb&0AXYkupy0~w8)*>Sj_p9}4L?lGTq%VG|2p`nWGhnM^!g|j-|O{%9Q%swOq63|*W zw$(N_laI}`ilB+o!a-wl?er~;;3+)$_akSQ!8YO_&-e*SI7n^(QQ;X0ZE`{4f!gAl z5$d+9CKVNonM!NO_frREICIAxOv)wm>}-k?iRisM`R7;=lyo|E_YR~FpS&PS`Lg0f zl-ON<0S%Uix8J%#yZdkCz4YNhcec<|7*P(JsM#>-L>+tYg_71q9~70FAc^6KW5jql zw!crdgVLH1G_eET=|SEc977;)ezVC|{PJZfra|}@rD;0s&@61mTEBJtILllg{%{vN zfhb&lq0yChaLhnJ-Qb62MB7`>M;|_ceHKZAeeh@#8tbrK!ArP6oXIhMK;dhEJTY`@ z0Tq>MIe0`7tGv)N*F0IGYSJv0vN?Az8g+4K9S!pW2~9F4W(_U_T=jCZrzuZ3*|__T zONp_UWmyePv8C~rckc?Xji;Z5OEqg zC*Um)i;Wh4TEwqReQdVVbUKT^2>Tpi6z_^-uF*adUFug4i@JhzpWT^Sk&E>CyP2?H zWf6x}ehuTs6wvzCnTU&gYzT029Nz19(In1WC z`(1IGmi!O%2AR|BjQa4Q0~u)kM%}?xQyjWuQ16^Gp++;`vr7!k--UZWM*~7Zl|ceO@I3`OpaRhD;YoCuo5IC0uHx>9 z478hu@H|e0Zlo)Zj@01#;8BDs@991xe~^9uG2}UXLM(m7fa}AMwX*tjioBeV&Q8Gx zSq$6wZFkRBK`cMI>R(@W@+lo2t)L+4q-negWRLWZBz*|%=W4v62JrmzNuOtA*x)QE z5L%=OH#@KMdB%Jp^r?0tE}5-*6oP`-lO7Sf)0)n*e<{HA=&qhLR)oD8-+V}Z4=md) z+k9lKf64DB2hAT)UaCP~di?-V3~JBH7itYyk~L6hrnxM%?RKntqd`=!b|e7eFnAcu z3*V;g{xr7TSTm$}DY%~SMpl>m{Sj!We+WfxSEor?YeiAxYUy25pn(?T()E>ByP^c@ zipwvWrhIK((R((VU+;@LmOnDu)ZXB3YArzzin!Z^0;PyJWnlfflo|q8(QY;o1*5CO z##hnkO{uynTMdk`~DOC#1 zdiYxQoy}=@7(ke#A8$YZZVtk4wo$8x28&I;cY3Ro-|kW=*yiiHgCLZeAr)UtVx>Tu z|LvL0hq|1-jC0I4x#>&QZCfrVB=zT!nR|~Uz`9%~2 znl{uZ{VEszW`Fad^q_HB!K9*|U-stK%?~;g?&&+12A}Rq$z($Bzuk^2X(Y=hF?-dQ ztc3DsQKI;qhWIV`99Q#R3xnU0AvY!i*BECj-z9l74|%O=V@nlv|qqC^r^-~C?E zGW%c|uYgnfJ(gjsTm_cIqcv*mYM{+i+&@F@+69ZQOK&u#v4oxUSQJ=tvqQ3W=*m;| z>SkBi8LYb-qRY7Sthh*0%3XAC%$z1rhOJzuX=PkTOa=DlocZUpE#KxVNH5)_4n=T( zGi3YrH7e~sPNYVBd~Grcq#CF~rN{p9Zza-Ntnwfma@TB)=3g36*0lSZg#ixEjFe%+ zX=&LDZ5zqculZ`=RYc^ln(~;nN|Qh6gN=!6f9-N2h+3NWbIxYud&;4SX*tWf5slk4 z{q@@l71UAZgj~*6edXb57fBUxvAS7s(RI=X868JM0+^DCn2yC>;v%S;qPOjB>YVsz(Zx9a>>BK&M zIQK>7_n)4ud0X5YM}^i*keH{ehLsiy9@NvOpsFeQjdI6anLGvVbBw_*fU1TzdVS$i z*4j7z!I5RF#rSz|8ibi$;qE{4`aqWYik7QB5U&F5C*;TO_x+gtzPGpzNt!7~nsBT7)Ckc(K~%uv&{{6A`mmBJVAk-{s~52Vu|HbCH7_W1~ZCX^RflOakGg=jo2Z z<*s;5-J+2@^LRDZ-7EV&Pq+FTErw@pfFqvx^i%E7Fx#^n(E`m2(c>K-O5`M`Yek9el zzTGs5qD6*G;y#~xu3>qWuO?-amKYtvRA}I9z#UspEeM;wOERYeot_n_EUMJf$4_u?E!6X~?q)tPoZb^_;8Y_Ox2h1m<+Le-fsRd|T8db<8#$bqez zua^Z|>h%zdnuU^ww$#-dZ9NTM`FN+!IlLkz*FqWb!x^Z|C{KyGjZ+>G;;7Mb@LY|H zc+Gp`L((Dw7pnDlHNm&;SfHedhx*kad$I^uGz{`0BYelq0yEUHpNKSkvj$|dpvY3{7*YGyhXA^LP0&wOw9oNoC=QoVx1<2Dne8qqZL zm>nFh5DX(-RnQwvHCZQwn^#Z=E!SPVlaRJ78Bo@}!!9dRt^qZy?-*`Pt4WSmgucJv zV1yFkcjlEM^uz-;b#Q7ZCP@Lk)m}uPX={R4B=56k7WNh11BN~0T*vr@!!ow^B0hOR zQ)4)&(e%>bNNL%bm<&8H{*l_L7s0$2GUgX2Vd;=4d9Dm2v3TaL+;L>{K7h7 zV#k?xDPm(NDE31$ z<}|X)pEY6myjK+^gaIMk&Yj2~F0rSKemNqlsVm4c|N7mp_C*L01s;GNx#D-*&gk!qQr}^?_r@q!8fuXw!)fA7xkd} zb>vHvdx~H$5qqAWrow7}+8zBM65-JOt5z za=T6f7MK`XJuQog8kIEboPdhcaVJeHy)5z7EBLK5NRr()E|#K0L0N^JD@pUA^Czb` zbUZ_558y+vqAGeyHCbrvOvLD67Ph}06959VzQ_|>RrXQAqE+AQ(-AaKdxoWaF8hdt z{O3W@b^*o#-f1VuU>YMV03ELF7zkCN4Q&b#prz%3Nne0lSbRo@@ z^ihv%oIl~Qyl6Q;a#$*jOC%x0_;eis*)J7=f@Ct*)xF5 zo}u~@-I}2|$b%5L7>@+Z?4o+1r&v6ceIy+vroK&jCQ<4q&45HP2wCol4hVm3pZtjf zHz1D7oyaSKJ~T{Gx}7ONLA)D5k(%%`WswrDyzX*rn}i}}TB4^y#@mAwPzoC)`?rYv zHgx|trUN#mu*VzUV~8TnJM2Qh*ZM5B{x&y>5An`(M7=Z*Q>TdiH@j*2=moNuOtvpz z+G`@~-`%~+AgPKgke@XiRPgndh@bp*-HRsh;HTtz@-y_uhb%7ylVOTqG0#u?Vn5c5 zEp*XRo|8hcgG^$#{$O9CJ&NE;TrfRpSnLmes&MO{m=N%zc`}gb!eQ7odl$oy1%PI} z#AIxx%oRVy&{O~9xnK4$EY>(eQj}!HKIV$Fz*H=-=Kn)N0D6u`(;iO|VraI4fu_W` z;b5{7;Lyx4za}DU#+U7}=H0dAS#YJJ&g2!P@Htu-AL&w=-)*%P9h2{wR|@?Ff9~)b z^+e_3Hetq7W%ls{!?<6&Y$Z;NNB41pvrv)|MET6AZXFXJeFqbFW5@i5WGzl?bP+~? z*&_puH;wKv2)9T_d+P`bLvJFqX#j&xa*-;0nGBbQf0DC>o~=J_Wmtf*2SZQr?{i~X z9-IbRH8{iy?<0v9Ir1?$66+igy|yDQ5J~A9sFX@Pe<*kCY8+MwH?I z`P}zfQ6l^AO8ehZ=l^ZR;R%uu4;BK*=?W9t|0{+-at(MQZ(CtG=EJFNaFMlKCMXu30(gJUqj5+ z`GM|!keqcj;FKTa_qq;{*dHRXAq157hlB@kL#8%yAm2AgfU|*rDKX@FLlp=HL8ddv zAWLCHe@DcDeB2}fl7#=0+#<05c3=VqM*O3bkr@9X4nO|)q0hU;Gye{L8ZN*NH8Id@mP-u;Fmb8YuorjLrW&ndip8CN%_qp982r w1WEnz9^$&s1hkp_3#lPJQ~!HI7WYYjA7>z!`?f%npAh2%rB@vD|Lau$2O)#1n*aa+ delta 8958 zcmY+KWl$VIlZIh&f(Hri?gR<$?iyT!TL`X;1^2~W7YVSq1qtqM!JWlDxLm%}UESUM zndj}Uny%^UnjhVhFb!8V3s(a#fIy>`VW15{5nuy;_V&a5O#0S&!a4dSkUMz_VHu3S zGA@p9Q$T|Sj}tYGWdjH;Mpp8m&yu&YURcrt{K;R|kM~(*{v%QwrBJIUF+K1kX5ZmF zty3i{d`y0;DgE+de>vN@yYqFPe1Ud{!&G*Q?iUc^V=|H%4~2|N zW+DM)W!`b&V2mQ0Y4u_)uB=P@-2`v|Wm{>CxER1P^ z>c}ZPZ)xxdOCDu59{X^~2id7+6l6x)U}C4Em?H~F`uOxS1?}xMxTV|5@}PlN%Cg$( zwY6c}r60=z5ZA1L zTMe;84rLtYvcm?M(H~ZqU;6F7Evo{P7!LGcdwO|qf1w+)MsnvK5^c@Uzj<{ zUoej1>95tuSvDJ|5K6k%&UF*uE6kBn47QJw^yE&#G;u^Z9oYWrK(+oL97hBsUMc_^ z;-lmxebwlB`Er_kXp2$`&o+rPJAN<`WX3ws2K{q@qUp}XTfV{t%KrsZ5vM!Q#4{V& zq>iO$MCiLq#%wXj%`W$_%FRg_WR*quv65TdHhdpV&jlq<=K^K`&!Kl5mA6p4n~p3u zWE{20^hYpn1M}}VmSHBXl1*-)2MP=0_k)EPr#>EoZukiXFDz?Di1I>2@Z^P$pvaF+ zN+qUy63jek2m59;YG)`r^F3-O)0RDIXPhf)XOOdkmu`3SMMSW(g+`Ajt{=h1dt~ks ztrhhP|L4G%5x79N#kwAHh5N){@{fzE7n&%dnisCm65Za<8r_hKvfx4Bg*`%-*-Mvn zFvn~)VP@}1sAyD+B{{8l{EjD10Av&Mz9^Xff*t`lU=q=S#(|>ls520;n3<}X#pyh& z*{CJf7$*&~!9jMnw_D~ikUKJ2+UnXmN6qak{xx%W;BKuXt7@ky!LPI1qk?gDwG@@o zkY+BkIie>{{q==5)kXw(*t#I?__Kwi>`=+s?Gq6X+vtSsaAO&Tf+Bl$vKnzc&%BHM z=loWOQq~n}>l=EL(5&6((ESsQC3^@4jlO5Od{qN#sWV)vqXw}aA>*uvwZopNN(|-T zRTF%5Y_k1R$;(d-)n;hWex{;7b6KgdAVE@&0pd(*qDzBO#YZV%kh%pYt1`hnQ(Fa& zYiDrOTDqk5M7hzp9kI2h!PxNnuJ&xl*zF8sx6!67bA49R1bmUF5bpK&&{eI0U~cH}PM z3aW1$lRb|ItkG5~_eBNu$|I|vYIdAA9a!pVq<+UTx*M}fG`23zxXp&E=FfnY- zEzKj;Cu_s4v>leO7M2-mE(UzKHL4c$c`3dS*19OpLV^4NI*hWWnJQ9lvzP4c;c?do zqrcsKT*i~eIHl0D3r4N{)+RsB6XhrC^;sp2cf_Eq#6*CV;t8v=V!ISe>>9kPgh}NI z=1UZutslxcT$Ad;_P^;Oouoa(cs!Ctpvi>%aQ+Zp=1d|h{W9Wmf7JWxa(~<#tSZ?C%wu4_5F!fc!<@PIBeJ)Nr^$bB6!_Gic_7}c3J{QI~Gg5g5jTp9}V6KYgrgaX>pJt}7$!wOht&KO|+z{Iw@YL|@~D zMww}+lG}rm2^peNx>58ME||ZQxFQeVSX8iogHLq_vXb`>RnoEKaTWBF-$JD#Q4BMv zt2(2Qb*x-?ur1Y(NsW8AdtX0#rDB?O(Vs4_xA(u-o!-tBG03OI!pQD+2UytbL5>lG z*(F)KacHqMa4?dxa(Vcrw>IIAeB$3cx#;;5r2X;HE8|}eYdAgCw#tpXNy7C3w1q`9 zGxZ6;@1G%8shz9e+!K2MO*{_RjO}Jo6eL3{TSZ>nY7)Qs`Dhi5><@oh0r)gT7H-?3 zLDsd^@m%JvrS8sta5`QiZNs^*GT}Hiy^zjK2^Ni%`Z|ma)D2 zuyumbvw$M8$haCTI~6M%d4+P)uX%u{Sfg4Al+F7c6;O-*)DKI7E8izSOKB#FcV{M+ zEvY0FBkq!$J0EW$Cxl}3{JwV^ki-T?q6C30Y5e&p@8Rd?$ST-Ghn*-`tB{k54W<>F z5I)TFpUC!E9298=sk>m#FI4sUDy_!8?51FqqW!9LN1(zuDnB3$!pEUjL>N>RNgAG~-9Xm|1lqHseW(%v&6K(DZ3Pano(1-Qe?3%J&>0`~w^Q-p&@ zg@HjvhJk?*hpF7$9P|gkzz`zBz_5Z!C4_-%fCcAgiSilzFQef!@amHDrW!YZS@?7C zs2Y9~>yqO+rkih?kXztzvnB^6W=f52*iyuZPv$c42$WK7>PHb z6%MYIr5D32KPdwL1hJf{_#jn?`k(taW?mwmZVvrr=y~fNcV$`}v(8};o9AjOJumS4 z`889O91^pkF+|@$d9wVoZ3;^j;^sUs&Ubo_qD&MTL%O z&*SE0ujG~zm;?x)8TLC&ft))nyI zcg44@*Q{cYT+qGrA=In_X{NNCD+B0w#;@g)jvBU;_8od6U>;7HIo@F*=g8CQUo(u^ z3r4FJ7#<@)MXO&5+DgKE&^>^`r!loe7CWE*1k0*0wLFzSOV8jvlX~WOQ?$1v zk$Or}!;ix0g78^6W;+<=J>z@CBs!<<)HvF(Ls-&`matpesJ5kkjC)6nGB@b{ii6-Uoho$BT%iJgugTOeZ$5Xo4D7Pd< zC*LJh5V@2#5%aBZCgzlQi3@<_!VfiL07ywc)ZbwKPfcR|ElQoS(8x|a7#IR}7#Io= zwg4$8S{egr-NffD)Fg&X9bJSoM25pF&%hf>(T&9bI}=#dPQyNYz;ZZ7EZ=u1n701SWKkZ9n(-qU ztN`sdWL1uxQ1mKS@x11;O|@^AD9!NeoPx}?EKIr!2>1Qq4gjfGU)tr6?Z5l7JAS3j zZeq{vG{rb%DFE4%$szK}d2UzB{4>L?Tv+NAlE*&Nq6g+XauaSI+N2Y8PJLw+aNg1p zbxr|hI8wcMP&&+(Cu|%+Jq|r>+BHk@{AvfBXKiVldN)@}TBS0LdIpnANCVE26WL-} zV}HJ^?m&$Rkq;Zf*i-hoasnpJVyTH__dbGWrB_R55d*>pTyl6(?$EO@>RCmTX1Hzr zT2)rOng?D4FfZ_C49hjMV*UonG2DlG$^+k=Y%|?Dqae4}JOU=8=fgY4Uh!pa9eEqf zFX&WLPu!jArN*^(>|H>dj~g`ONZhaaD%h_HHrHkk%d~TR_RrX{&eM#P@3x=S^%_6h zh=A)A{id16$zEFq@-D7La;kTuE!oopx^9{uA3y<}9 z^bQ@U<&pJV6kq7LRF47&!UAvgkBx=)KS_X!NY28^gQr27P=gKh0+E>$aCx&^vj2uc}ycsfSEP zedhTgUwPx%?;+dESs!g1z}5q9EC+fol}tAH9#fhZQ?q1GjyIaR@}lGCSpM-014T~l zEwriqt~ftwz=@2tn$xP&-rJt?nn5sy8sJ5Roy;pavj@O+tm}d_qmAlvhG(&k>(arz z;e|SiTr+0<&6(-An0*4{7akwUk~Yf4M!!YKj^swp9WOa%al`%R>V7mi z+5+UodFAaPdi4(8_FO&O!Ymb#@yxkuVMrog(7gkj$G@FLA#ENMxG)4f<}S%Fn?Up$+C%{02AgMKa^ z4SFGWp6U>{Q6VRJV}yjxXT*e`1XaX}(dW1F&RNhpTzvCtzuu;LMhMfJ2LBEy?{^GHG!OF!! zDvs64TG)?MX&9NCE#H3(M0K>O>`ca0WT2YR>PTe&tn?~0FV!MRtdb@v?MAUG&Ef7v zW%7>H(;Mm)RJkt18GXv!&np z?RUxOrCfs;m{fBz5MVlq59idhov21di5>WXWD-594L-X5;|@kyWi@N+(jLuh=o+5l zGGTi~)nflP_G}Yg5Pi%pl88U4+^*ihDoMP&zA*^xJE_X*Ah!jODrijCqQ^{=&hD7& z^)qv3;cu?olaT3pc{)Kcy9jA2E8I)#Kn8qO>70SQ5P8YSCN=_+_&)qg)OYBg|-k^d3*@jRAeB?;yd-O1A0wJ z?K*RDm|wE<(PBz~+C%2CTtzCTUohxP2*1kE8Of~{KRAvMrO_}NN&@P7SUO{;zx0iK z@or9R8ydYOFZf(cHASCAatL%;62IL27~SmASr(7F&NMr+#gNw@z1VM z_ALFwo3)SoANEwRerBdRV`>y`t72#aF2ConmWQp(Xy|msN9$yxhZ1jAQ67lq{vbC5 zujj|MlGo`6Bfn0TfKgi(k=gq0`K~W+X(@GzYlPI4g0M;owH3yG14rhK>lG8lS{`!K z+Nc@glT-DGz?Ym?v#Hq|_mEdPAlHH5jZuh*6glq!+>Lk$S%ED2@+ea6CE@&1-9a?s znglt|fmIK}fg<9@XgHe4*q!aO<-;Xj$T?IzB-{&2`#eA6rdtCi80mpP&vw(Uytxu$#YzNI_cB>LS zmim>ys;ir;*Dzbr22ZDxO2s;671&J0U<9(n1yj)J zHFNz=ufPcQVEG+ePjB<5C;=H0{>Mi*xD>hQq8`Vi7TjJ$V04$`h3EZGL|}a07oQdR z?{cR(z+d>arn^AUug&voOzzi$ZqaS)blz-z3zr;10x;oP2)|Cyb^WtN2*wNn`YX!Y z+$Pji<7|!XyMCEw4so}xXLU)p)BA~2fl>y2Tt}o9*BPm?AXA8UE8a;>rOgyCwZBFa zyl42y`bc3}+hiZL_|L_LY29vVerM+BVE@YxK>TGm@dHi@Uw*7AIq?QA9?THL603J% zIBJ4y3n8OFzsOI;NH%DZ!MDwMl<#$)d9eVVeqVl(5ZX$PPbt*p_(_9VSXhaUPa9Qu z7)q4vqYKX7ieVSjOmVEbLj4VYtnDpe*0Y&+>0dS^bJ<8s*eHq3tjRAw^+Mu4W^-E= z4;&namG4G;3pVDyPkUw#0kWEO1;HI6M51(1<0|*pa(I!sj}F^)avrE`ShVMKBz}nE zzKgOPMSEp6M>hJzyTHHcjV%W*;Tdb}1xJjCP#=iQuBk_Eho6yCRVp&e!}4IBJ&?ksVc&u#g3+G$oNlJ?mWfADjeBS-Ph3`DKk-~Z70XugH8sq2eba@4 zIC1H_J$`9b$K`J)sGX3d!&>OmC@@rx1TL~NinQOYy72Q_+^&Mg>Ku(fTgaXdr$p_V z#gav1o{k~c>#)u3r@~6v^o)Lf=C{rAlL@!s457pq)pO;Cojx7U{urO4cvXP|E>+dV zmr2?!-5)tk-&*ap^D^2x7NG6nOop2zNFQ9v8-EZ{WCz-h36C)<^|f{V#R_WE^@(T0+d-at5hXX{U?zak*ac-XnyINo+yBD~~3O1I=a z99|CI>502&s-Qi5bv>^2#cQ%ut<4d7KgQ^kE|=%6#VlGiY8$rdJUH{sra;P~cyb_i zeX(kS%w0C?mjhJl9TZp8RS;N~y3(EXEz13oPhOSE4WaTljGkVXWd~|#)vsG6_76I)Kb z8ro?;{j^lxNsaxE-cfP;g(e;mhh3)&ba}li?woV2#7ByioiD>s%L_D;?#;C#z;a(N z-_WY<=SH42m9bFQ>Nb z@4K$@4l8pD7AKxCR>t0%`Qoy9=hA?<<^Vcj8;-E+oBe3ReW1`el8np8E$k{LgFQ}2 z2t8a`wOXFdJ9!5$&mEfD1CnJ)TB+RJih88-Zos9@HZ# zL#{qfbF0ARTXkR@G{lwlOH~nnL)1jcyu!qv2`57S&%oKz0}r{~l9U_UHaJ5!8#nrs z?2FrL`mxnzu&{bweD&62)ilz*?pYIvt`T!XFVVA78})p1YEy7 z8fK#s?b~Yo$n7&_a?EBdXH-_W)Z44?!;DFx6pZ?~RArtBI*Qm4~6nX6Z_T*i$bQPE;Qz?DAPstpGSqr-AJ zo%m9cA`oDDm?&dTaoh_>@F>a?!y4qt_;NGN9Z<%SS;fX-cSu|>+Pba22`CRb#|HZa z;{)yHE>M-pc1C0mrnT~80!u&dvVTYFV8xTQ#g;6{c<9d!FDqU%TK5T6h*w*p980D~ zUyCb`y3{-?(mJFP)0*-Nt;mI$-gc4VQumh|rs&j_^R{sgTPF`1Xja2YWstsKFuQ(d zmZMxV$p$|qQUXchu&8%J(9|)B?`~rIx&)LqDS>ob5%gTeTP#Sbny#y*rnJ&?(l=!( zoV~}LJ1DPLnF8oyM(2ScrQ0{Q4m4-BWnS4wilgCW-~~;}pw=&<+HggRD_3c@3RQIr z9+-%!%}u_{`YS=&>h%kPO3ce}>y!d-zqiniNR-b5r97u;+K6HA2tS>Z#cV{+eFI`* zd8RMGAUtX1KWfPV;q<-5JAykS+2sY$2~UX+4461a(%{P#{rwFPu0xpIuYlbgD{C7C z=U{FUarVTYX6ZUq3wE@G^QT4H2Re;n$Fz9cJ>hABl)9T8pozqbA1)H-%1=WKm^QMu zjnUZ&Pu>q+X&6Co*y#@pxc-4waKMInEPGmE_>3@Ym3S*dedSradmc5mlJn`i0vMW6 zhBnGQD^Z;&S0lnS0curqDO@({J7kTtRE+Ra?nl^HP9<)W&C>~`!258f$XDbyQOQXG zP8hhySnarOpgu8xv8@WlXnm(Uk~)_3$Sg0vTbU3 z{W!5B(L3{Yy3K5PN<@jEarAtja`}@KYva&zFRF*s+_%jIXh$T(S=an8?=Ry3H*NRqWgsM`&!#|@kf1>=4q%bFw7^Rhz!z5I zyI^zU8_R1WN9`88Z=n>pIZQ`Ixr~_9G%Q}@A7rd#*%y7G zXl^Id=^ZL?Rx}}gWXCqzj9C6;x(~mAH|$JteXa1MH<6UQig@!Hf~t}B%tP0I|H&;y zO6N0}svOa1a^PyP9N5?4W6VF%=Bj{qHUgc8@siw4bafT=UPFSoQqKgyUX>sXTBZ=x zOh^Ad!{kOM9v{%5y}`-8u*T&C7Vq6mD%GR}UeU(*epO&qgC-CkD;%=l)ZuinSzHM` z{@`j&_vC6dDe{Yb9k@1zeV_K6!l(@=6ucoI=R^cH=6{i71%4W3$J-?<8Qn#$-DMtA z6Qqi)t?4ifrt%3jSA#6ji#{f(($KBL-iQh-xrC||3U3lq`9>r)>X%oLvtimuHW-)} zy}>9~|M>w4eES`g7;iBM%Se5-OP%1U6gNWp3AZqT8C6OlFFfQ$|7LL;tBV)(qlp4K zruar^K8FnJN3@_}B;G`a~H`t|3+6d>q3#`ctTkE-D^1#d9NalQ04lH*qUW2!V zhk7#z8OwHhSl8w14;KctfO8ubZJ4$dEdpXE78wABz=n5*=q9ex3S}`e7x~~V-jmHOhtX2*n+pBslo3uosdE7xABK=V#-t{1Hd~?i z{i~%Bw6NYF+F$aK$M`r#xe=NxhA5=p%i7!$);sd>Q}#`G?Q~fygrMXmZw?0#5#17W}6Tj+&kFexG{!mYl5FoA99}3G9l;3lVQ^ z48^~gsVppE*x91WheqI(A%F0Z#$#1UJP1R12Mj9r)y(A?a+iquX+d8WD4WAQJ_!oq z9rTISr7bPd(GTP57xm$}C}&kjMivi;zi^Y9g3&X0A;ovdJ?{%_wHgt%%9P&N4H z^XzV(uNA4 zAP`hgP6BEN5`YXh|DF~6Pud?~gWfhUKoPX4>z|}0aocC&K+AoV%|SX*N!wGq3|y< zg4lP(04XIPmt6}$N!dTk+pZv>u;MTB{L4hp9uXk7>aS!6jqM2lVr%{)H3$O127TSZ z0x9hi0k-P?nWFdQ0K`pykqUIT&jD~B0tHP{ffS(}fZ(aW$oBWTSfHO!A^><6v Date: Thu, 31 Mar 2022 18:25:42 -0700 Subject: [PATCH 028/653] Centralize codes related to 'master_timeout' deprecation for eaiser removal - in CAT Nodes API (#2670) * Move parseDeprecatedMasterTimeoutParameter method into BaseRestHandler class to reduce duplication Signed-off-by: Tianli Feng * Add more comments to unit test Signed-off-by: Tianli Feng * Make log message key different Signed-off-by: Tianli Feng * Prohibit using 'master_timeout' and 'cluster_manager_timeout' parameter together Signed-off-by: Tianli Feng * Add separate unit tests for BaseRestHandler.parseDeprecatedMasterTimeoutParameter() Signed-off-by: Tianli Feng * Restore unit test for cat allocation api Signed-off-by: Tianli Feng * Adjust format by spotlessApply task Signed-off-by: Tianli Feng * Fix testBothParamsNotValid() by adding warning assertion Signed-off-by: Tianli Feng --- .../org/opensearch/rest/BaseRestHandler.java | 31 +++++ .../java/org/opensearch/rest/RestRequest.java | 27 ----- .../rest/action/cat/RestNodesAction.java | 20 +--- .../RenamedTimeoutRequestParameterTests.java | 113 ++++++++++++++++++ .../org/opensearch/rest/RestRequestTests.java | 37 ------ .../rest/action/cat/RestNodesActionTests.java | 18 --- 6 files changed, 145 insertions(+), 101 deletions(-) create mode 100644 server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java diff --git a/server/src/main/java/org/opensearch/rest/BaseRestHandler.java b/server/src/main/java/org/opensearch/rest/BaseRestHandler.java index 4ee209111bdcb..e0a62581447ac 100644 --- a/server/src/main/java/org/opensearch/rest/BaseRestHandler.java +++ b/server/src/main/java/org/opensearch/rest/BaseRestHandler.java @@ -36,9 +36,12 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.search.spell.LevenshteinDistance; import org.apache.lucene.util.CollectionUtil; +import org.opensearch.OpenSearchParseException; +import org.opensearch.action.support.master.MasterNodeRequest; import org.opensearch.client.node.NodeClient; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.plugins.ActionPlugin; @@ -200,6 +203,34 @@ protected Set responseParams() { return Collections.emptySet(); } + /** + * Parse the deprecated request parameter 'master_timeout', and add deprecated log if the parameter is used. + * It also validates whether the two parameters 'master_timeout' and 'cluster_manager_timeout' are not assigned together. + * The method is temporarily added in 2.0 duing applying inclusive language. Remove the method along with MASTER_ROLE. + * @param mnr the action request + * @param request the REST request to handle + * @param logger the logger that logs deprecation notices + * @param logMsgKeyPrefix the key prefix of a deprecation message to avoid duplicate messages. + */ + public static void parseDeprecatedMasterTimeoutParameter( + MasterNodeRequest mnr, + RestRequest request, + DeprecationLogger logger, + String logMsgKeyPrefix + ) { + final String MASTER_TIMEOUT_DEPRECATED_MESSAGE = + "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version."; + final String DUPLICATE_PARAMETER_ERROR_MESSAGE = + "Please only use one of the request parameters [master_timeout, cluster_manager_timeout]."; + if (request.hasParam("master_timeout")) { + logger.deprecate(logMsgKeyPrefix + "_master_timeout_parameter", MASTER_TIMEOUT_DEPRECATED_MESSAGE); + if (request.hasParam("cluster_manager_timeout")) { + throw new OpenSearchParseException(DUPLICATE_PARAMETER_ERROR_MESSAGE); + } + mnr.masterNodeTimeout(request.paramAsTime("master_timeout", mnr.masterNodeTimeout())); + } + } + public static class Wrapper extends BaseRestHandler { protected final BaseRestHandler delegate; diff --git a/server/src/main/java/org/opensearch/rest/RestRequest.java b/server/src/main/java/org/opensearch/rest/RestRequest.java index e04d8faa8af39..7d11da7e122cd 100644 --- a/server/src/main/java/org/opensearch/rest/RestRequest.java +++ b/server/src/main/java/org/opensearch/rest/RestRequest.java @@ -54,7 +54,6 @@ import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -579,32 +578,6 @@ public static XContentType parseContentType(List header) { throw new IllegalArgumentException("empty Content-Type header"); } - /** - * The method is only used to validate whether the values of the 2 request parameters "master_timeout" and "cluster_manager_timeout" is the same value or not. - * If the 2 values are not the same, throw an {@link OpenSearchParseException}. - * @param keys Names of the request parameters. - * @deprecated The method will be removed along with the request parameter "master_timeout". - */ - @Deprecated - public void validateParamValuesAreEqual(String... keys) { - // Track the last seen value and ensure that every subsequent value matches it. - // The value to be tracked is the non-empty values of the parameters with the key. - String lastSeenValue = null; - for (String key : keys) { - String value = param(key); - if (!Strings.isNullOrEmpty(value)) { - if (lastSeenValue == null || value.equals(lastSeenValue)) { - lastSeenValue = value; - } else { - throw new OpenSearchParseException( - "The values of the request parameters: {} are required to be equal, otherwise please only assign value to one of the request parameters.", - Arrays.toString(keys) - ); - } - } - } - } - public static class ContentTypeHeaderException extends RuntimeException { ContentTypeHeaderException(final IllegalArgumentException cause) { diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java index abc4b48b01cff..3052a9736f9a3 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java @@ -86,8 +86,6 @@ public class RestNodesAction extends AbstractCatAction { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestNodesAction.class); static final String LOCAL_DEPRECATED_MESSAGE = "Deprecated parameter [local] used. This parameter does not cause this API to act " + "locally, and should not be used. It will be unsupported in version 8.0."; - static final String MASTER_TIMEOUT_DEPRECATED_MESSAGE = - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version."; @Override public List routes() { @@ -113,7 +111,7 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli } clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); clusterStateRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", clusterStateRequest.masterNodeTimeout())); - parseDeprecatedMasterTimeoutParameter(clusterStateRequest, request); + parseDeprecatedMasterTimeoutParameter(clusterStateRequest, request, deprecationLogger, getName()); final boolean fullId = request.paramAsBoolean("full_id", false); return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override @@ -529,20 +527,4 @@ Table buildTable( private short calculatePercentage(long used, long max) { return max <= 0 ? 0 : (short) ((100d * used) / max); } - - /** - * Parse the deprecated request parameter 'master_timeout', and add deprecated log if the parameter is used. - * It also validates whether the value of 'master_timeout' is the same with 'cluster_manager_timeout'. - * Remove the method along with MASTER_ROLE. - * @deprecated As of 2.0, because promoting inclusive language. - */ - @Deprecated - private void parseDeprecatedMasterTimeoutParameter(ClusterStateRequest clusterStateRequest, RestRequest request) { - final String deprecatedTimeoutParam = "master_timeout"; - if (request.hasParam(deprecatedTimeoutParam)) { - deprecationLogger.deprecate("cat_nodes_master_timeout_parameter", MASTER_TIMEOUT_DEPRECATED_MESSAGE); - request.validateParamValuesAreEqual(deprecatedTimeoutParam, "cluster_manager_timeout"); - clusterStateRequest.masterNodeTimeout(request.paramAsTime(deprecatedTimeoutParam, clusterStateRequest.masterNodeTimeout())); - } - } } diff --git a/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java b/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java new file mode 100644 index 0000000000000..b96edad72350f --- /dev/null +++ b/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java @@ -0,0 +1,113 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action; + +import org.junit.After; +import org.opensearch.OpenSearchParseException; +import org.opensearch.action.support.master.MasterNodeRequest; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.common.settings.Settings; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.action.cat.RestNodesAction; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.rest.FakeRestRequest; +import org.opensearch.threadpool.TestThreadPool; + +import static org.hamcrest.Matchers.containsString; + +/** + * As of 2.0, the request parameter 'master_timeout' in all applicable REST APIs is deprecated, + * and alternative parameter 'cluster_manager_timeout' is added. + * The tests are used to validate the behavior about the renamed request parameter. + * Remove the test after removing MASTER_ROLE and 'master_timeout'. + */ +public class RenamedTimeoutRequestParameterTests extends OpenSearchTestCase { + private final TestThreadPool threadPool = new TestThreadPool(RenamedTimeoutRequestParameterTests.class.getName()); + private final NodeClient client = new NodeClient(Settings.EMPTY, threadPool); + private final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RenamedTimeoutRequestParameterTests.class); + + private static final String DUPLICATE_PARAMETER_ERROR_MESSAGE = + "Please only use one of the request parameters [master_timeout, cluster_manager_timeout]."; + private static final String MASTER_TIMEOUT_DEPRECATED_MESSAGE = + "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version."; + + @After + public void terminateThreadPool() { + terminate(threadPool); + } + + public void testNoWarningsForNewParam() { + BaseRestHandler.parseDeprecatedMasterTimeoutParameter( + getMasterNodeRequest(), + getRestRequestWithNewParam(), + deprecationLogger, + "test" + ); + } + + public void testDeprecationWarningForOldParam() { + BaseRestHandler.parseDeprecatedMasterTimeoutParameter( + getMasterNodeRequest(), + getRestRequestWithDeprecatedParam(), + deprecationLogger, + "test" + ); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testBothParamsNotValid() { + Exception e = assertThrows( + OpenSearchParseException.class, + () -> BaseRestHandler.parseDeprecatedMasterTimeoutParameter( + getMasterNodeRequest(), + getRestRequestWithBothParams(), + deprecationLogger, + "test" + ) + ); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testCatAllocation() { + RestNodesAction action = new RestNodesAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.doCatRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + private MasterNodeRequest getMasterNodeRequest() { + return new MasterNodeRequest() { + @Override + public ActionRequestValidationException validate() { + return null; + } + }; + } + + private FakeRestRequest getRestRequestWithBothParams() { + FakeRestRequest request = new FakeRestRequest(); + request.params().put("cluster_manager_timeout", "1h"); + request.params().put("master_timeout", "3s"); + return request; + } + + private FakeRestRequest getRestRequestWithDeprecatedParam() { + FakeRestRequest request = new FakeRestRequest(); + request.params().put("master_timeout", "3s"); + return request; + } + + private FakeRestRequest getRestRequestWithNewParam() { + FakeRestRequest request = new FakeRestRequest(); + request.params().put("cluster_manager_timeout", "2m"); + return request; + } +} diff --git a/server/src/test/java/org/opensearch/rest/RestRequestTests.java b/server/src/test/java/org/opensearch/rest/RestRequestTests.java index d5a915b42cf87..7abc53e4ca610 100644 --- a/server/src/test/java/org/opensearch/rest/RestRequestTests.java +++ b/server/src/test/java/org/opensearch/rest/RestRequestTests.java @@ -34,7 +34,6 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.common.CheckedConsumer; -import org.opensearch.common.Strings; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.collect.MapBuilder; @@ -51,13 +50,11 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.concurrent.atomic.AtomicReference; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; -import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.mockito.Mockito.mock; @@ -283,40 +280,6 @@ public void testRequiredContent() { assertEquals("unknown content type", e.getMessage()); } - /* - * The test is added in 2.0 when the request parameter "cluster_manager_timeout" is introduced. - * Remove the test along with the removal of the non-inclusive terminology "master_timeout". - */ - public void testValidateParamValuesAreEqualWhenTheyAreEqual() { - FakeRestRequest request = new FakeRestRequest(); - String valueForKey1 = randomFrom("value1", "", null); - String valueForKey2 = "value1"; - request.params().put("key1", valueForKey1); - request.params().put("key2", valueForKey2); - request.validateParamValuesAreEqual("key1", "key2"); - assertTrue( - String.format( - Locale.ROOT, - "The 2 values should be equal, or having 1 null/empty value. Value of key1: %s. Value of key2: %s", - valueForKey1, - valueForKey2 - ), - Strings.isNullOrEmpty(valueForKey1) || valueForKey1.equals(valueForKey2) - ); - } - - /* - * The test is added in 2.0 when the request parameter "cluster_manager_timeout" is introduced. - * Remove the test along with the removal of the non-inclusive terminology "master_timeout". - */ - public void testValidateParamValuesAreEqualWhenTheyAreNotEqual() { - FakeRestRequest request = new FakeRestRequest(); - request.params().put("key1", "value1"); - request.params().put("key2", "value2"); - Exception e = assertThrows(OpenSearchParseException.class, () -> request.validateParamValuesAreEqual("key1", "key2")); - assertThat(e.getMessage(), containsString("The values of the request parameters: [key1, key2] are required to be equal")); - } - private static RestRequest contentRestRequest(String content, Map params) { Map> headers = new HashMap<>(); headers.put("Content-Type", Collections.singletonList("application/json")); diff --git a/server/src/test/java/org/opensearch/rest/action/cat/RestNodesActionTests.java b/server/src/test/java/org/opensearch/rest/action/cat/RestNodesActionTests.java index 9293d40605f42..593ad2907797e 100644 --- a/server/src/test/java/org/opensearch/rest/action/cat/RestNodesActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/cat/RestNodesActionTests.java @@ -32,7 +32,6 @@ package org.opensearch.rest.action.cat; -import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -52,7 +51,6 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; -import static org.hamcrest.CoreMatchers.containsString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -91,20 +89,4 @@ public void testCatNodesWithLocalDeprecationWarning() { terminate(threadPool); } - - /** - * Validate both cluster_manager_timeout and its predecessor can be parsed correctly. - * Remove the test along with MASTER_ROLE. It's added in version 2.0.0. - */ - public void testCatNodesWithClusterManagerTimeout() { - TestThreadPool threadPool = new TestThreadPool(RestNodesActionTests.class.getName()); - NodeClient client = new NodeClient(Settings.EMPTY, threadPool); - FakeRestRequest request = new FakeRestRequest(); - request.params().put("cluster_manager_timeout", randomFrom("1h", "2m")); - request.params().put("master_timeout", "3s"); - Exception e = assertThrows(OpenSearchParseException.class, () -> action.doCatRequest(request, client)); - assertThat(e.getMessage(), containsString("[master_timeout, cluster_manager_timeout] are required to be equal")); - assertWarnings(RestNodesAction.MASTER_TIMEOUT_DEPRECATED_MESSAGE); - terminate(threadPool); - } } From 86eb24d27fa016d2a47b026a1270e5abd59ddd11 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Thu, 31 Mar 2022 23:08:19 -0500 Subject: [PATCH 029/653] [Remove] types from rest-api-spec endpoints (#2689) Removes types from rest-api-spec endpoints Signed-off-by: Nicholas Walter Knize --- .../client/RestHighLevelClientTests.java | 1 - .../resources/rest-api-spec/api/bulk.json | 17 ------ .../resources/rest-api-spec/api/create.json | 26 --------- .../resources/rest-api-spec/api/delete.json | 25 --------- .../rest-api-spec/api/exists_source.json | 25 --------- .../resources/rest-api-spec/api/explain.json | 26 --------- .../api/indices.exists_type.json | 55 ------------------- .../rest-api-spec/api/indices.stats.json | 4 -- .../api/indices.validate_query.json | 22 -------- .../resources/rest-api-spec/api/update.json | 25 --------- 10 files changed, 226 deletions(-) delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_type.json diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java index 73ce7d1b2b794..7766fa76d5cfe 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java @@ -841,7 +841,6 @@ public void testApiNamingConventions() throws Exception { "create", "get_script_context", "get_script_languages", - "indices.exists_type", "indices.get_upgrade", "indices.put_alias", "render_search_template", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json b/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json index f7c0d69805caf..bb066cd131480 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json @@ -26,23 +26,6 @@ "description":"Default index for items which don't provide one" } } - }, - { - "path":"/{index}/{type}/_bulk", - "methods":[ - "POST", - "PUT" - ], - "parts":{ - "index":{ - "type":"string", - "description":"Default index for items which don't provide one" - }, - "type":{ - "type":"string", - "description":"Default document type for items which don't provide one" - } - } } ] }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/create.json b/rest-api-spec/src/main/resources/rest-api-spec/api/create.json index 171f3da44d36d..767af84b82258 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/create.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/create.json @@ -23,32 +23,6 @@ "description":"The name of the index" } } - }, - { - "path":"/{index}/{type}/{id}/_create", - "methods":[ - "PUT", - "POST" - ], - "parts":{ - "id":{ - "type":"string", - "description":"Document ID" - }, - "index":{ - "type":"string", - "description":"The name of the index" - }, - "type":{ - "type":"string", - "description":"The type of the document", - "deprecated":true - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } } ] }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json index 0d82bca9d4173..76dceb455627f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json @@ -22,31 +22,6 @@ "description":"The name of the index" } } - }, - { - "path":"/{index}/{type}/{id}", - "methods":[ - "DELETE" - ], - "parts":{ - "id":{ - "type":"string", - "description":"The document ID" - }, - "index":{ - "type":"string", - "description":"The name of the index" - }, - "type":{ - "type":"string", - "description":"The type of the document", - "deprecated":true - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } } ] }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json b/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json index 143ee406025ce..bdbf818fb5d81 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json @@ -22,31 +22,6 @@ "description":"The name of the index" } } - }, - { - "path":"/{index}/{type}/{id}/_source", - "methods":[ - "HEAD" - ], - "parts":{ - "id":{ - "type":"string", - "description":"The document ID" - }, - "index":{ - "type":"string", - "description":"The name of the index" - }, - "type":{ - "type":"string", - "description":"The type of the document; deprecated and optional starting with 7.0", - "deprecated":true - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } } ] }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json b/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json index c7c393a6a1cba..7f630f7666f30 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json @@ -23,32 +23,6 @@ "description":"The name of the index" } } - }, - { - "path":"/{index}/{type}/{id}/_explain", - "methods":[ - "GET", - "POST" - ], - "parts":{ - "id":{ - "type":"string", - "description":"The document ID" - }, - "index":{ - "type":"string", - "description":"The name of the index" - }, - "type":{ - "type":"string", - "description":"The type of the document", - "deprecated":true - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } } ] }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_type.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_type.json deleted file mode 100644 index c854d0e8fd841..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_type.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "indices.exists_type":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-types-exists.html", - "description":"Returns information about whether a particular document type exists. (DEPRECATED)" - }, - "stability":"stable", - "url":{ - "paths":[ - { - "path":"/{index}/_mapping/{type}", - "methods":[ - "HEAD" - ], - "parts":{ - "index":{ - "type":"list", - "description":"A comma-separated list of index names; use `_all` to check the types across all indices" - }, - "type":{ - "type":"list", - "description":"A comma-separated list of document types to check" - } - } - } - ] - }, - "params":{ - "ignore_unavailable":{ - "type":"boolean", - "description":"Whether specified concrete indices should be ignored when unavailable (missing or closed)" - }, - "allow_no_indices":{ - "type":"boolean", - "description":"Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" - }, - "expand_wildcards":{ - "type":"enum", - "options":[ - "open", - "closed", - "hidden", - "none", - "all" - ], - "default":"open", - "description":"Whether to expand wildcard expression to concrete indices that are open, closed or both." - }, - "local":{ - "type":"boolean", - "description":"Return local information, do not retrieve the state from master node (default: false)" - } - } - } -} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json index 0a8960f2f9e89..382bb9efde0ff 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json @@ -118,10 +118,6 @@ ], "default":"indices" }, - "types":{ - "type":"list", - "description":"A comma-separated list of document types for the `indexing` index metric" - }, "include_segment_file_sizes":{ "type":"boolean", "description":"Whether to report the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested)", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.validate_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.validate_query.json index 3becec003a9e6..cc0386ee3b972 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.validate_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.validate_query.json @@ -26,28 +26,6 @@ "description":"A comma-separated list of index names to restrict the operation; use `_all` or empty string to perform the operation on all indices" } } - }, - { - "path":"/{index}/{type}/_validate/query", - "methods":[ - "GET", - "POST" - ], - "parts":{ - "index":{ - "type":"list", - "description":"A comma-separated list of index names to restrict the operation; use `_all` or empty string to perform the operation on all indices" - }, - "type":{ - "type":"list", - "description":"A comma-separated list of document types to restrict the operation; leave empty to perform the operation on all types", - "deprecated":true - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } } ] }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json index 81bc101600aeb..c8d1ed435756b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json @@ -22,31 +22,6 @@ "description":"The name of the index" } } - }, - { - "path":"/{index}/{type}/{id}/_update", - "methods":[ - "POST" - ], - "parts":{ - "id":{ - "type":"string", - "description":"Document ID" - }, - "index":{ - "type":"string", - "description":"The name of the index" - }, - "type":{ - "type":"string", - "description":"The type of the document", - "deprecated":true - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"Specifying types in urls has been deprecated" - } } ] }, From d7d41085f81a5e7eab6e31849f8bfc2b5cfc61d3 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Fri, 1 Apr 2022 13:40:42 -0700 Subject: [PATCH 030/653] Add Shadow jar publication to lang-painless module. (#2681) * Add Shadow jar publication to lang-painless module. This change creates a shadow jar for asm dependencies so that they do not conflict with direct asm dependencies from log4j AL2 patch. Signed-off-by: Marc Handalian * Remove security.manager systemProperty that is not required. Signed-off-by: Marc Handalian * Add explicit task dependency for publishing to maven local required by gradle. Signed-off-by: Marc Handalian * Move asm dependencies back to api scope. Signed-off-by: Marc Handalian --- modules/lang-painless/build.gradle | 30 ++++++++++++++++++- .../licenses/asm-analysis-7.2.jar.sha1 | 1 - .../licenses/asm-analysis-9.2.jar.sha1 | 1 + 3 files changed, 30 insertions(+), 2 deletions(-) delete mode 100644 modules/lang-painless/licenses/asm-analysis-7.2.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-analysis-9.2.jar.sha1 diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index c524f9a7e2f2c..ffbb3e60eb5ab 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -29,8 +29,11 @@ */ import org.opensearch.gradle.testclusters.DefaultTestClustersTask; +import com.github.jengelman.gradle.plugins.shadow.ShadowBasePlugin + apply plugin: 'opensearch.validate-rest-spec' apply plugin: 'opensearch.yaml-rest-test' +apply plugin: 'com.github.johnrengelman.shadow' opensearchplugin { description 'An easy, safe and fast scripting language for OpenSearch' @@ -49,11 +52,36 @@ dependencies { api 'org.ow2.asm:asm-util:9.2' api 'org.ow2.asm:asm-tree:9.2' api 'org.ow2.asm:asm-commons:9.2' - api 'org.ow2.asm:asm-analysis:7.2' + api 'org.ow2.asm:asm-analysis:9.2' api 'org.ow2.asm:asm:9.2' api project('spi') } +test { + doFirst { + test.classpath -= project.files(project.tasks.named('shadowJar')) + test.classpath -= project.configurations.getByName(ShadowBasePlugin.CONFIGURATION_NAME) + test.classpath += project.extensions.getByType(SourceSetContainer).getByName(SourceSet.MAIN_SOURCE_SET_NAME).runtimeClasspath + } +} + +shadowJar { + classifier = null + relocate 'org.objectweb', 'org.opensearch.repackage.org.objectweb' + dependencies { + include(dependency('org.ow2.asm:asm:9.2')) + include(dependency('org.ow2.asm:asm-util:9.2')) + include(dependency('org.ow2.asm:asm-tree:9.2')) + include(dependency('org.ow2.asm:asm-commons:9.2')) + include(dependency('org.ow2.asm:asm-analysis:9.2')) + } +} + +tasks.validateNebulaPom.dependsOn tasks.generatePomFileForShadowPublication +tasks.validateShadowPom.dependsOn tasks.generatePomFileForNebulaPublication +tasks.publishNebulaPublicationToMavenLocal.dependsOn tasks.generatePomFileForShadowPublication +tasks.publishShadowPublicationToMavenLocal.dependsOn tasks.generatePomFileForNebulaPublication + tasks.named("dependencyLicenses").configure { mapping from: /asm-.*/, to: 'asm' } diff --git a/modules/lang-painless/licenses/asm-analysis-7.2.jar.sha1 b/modules/lang-painless/licenses/asm-analysis-7.2.jar.sha1 deleted file mode 100644 index 849b5e0bfa671..0000000000000 --- a/modules/lang-painless/licenses/asm-analysis-7.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b6e6abe057f23630113f4167c34bda7086691258 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-analysis-9.2.jar.sha1 b/modules/lang-painless/licenses/asm-analysis-9.2.jar.sha1 new file mode 100644 index 0000000000000..b93483a24da5d --- /dev/null +++ b/modules/lang-painless/licenses/asm-analysis-9.2.jar.sha1 @@ -0,0 +1 @@ +7487dd756daf96cab9986e44b9d7bcb796a61c10 \ No newline at end of file From 78465b4a08d51b8016ebfa757ca24e97606bdc84 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Fri, 1 Apr 2022 16:08:44 -0700 Subject: [PATCH 031/653] Add request parameter 'cluster_manager_timeout' as the alternative for 'master_timeout', and deprecate 'master_timeout' - in CAT APIs (#2557) Apply the change of CAT Nodes API in PR #2435 to other applicable CAT APIs. - Deprecate the request parameter `master_timeout` that used in many CAT APIs. - Add alternative new request parameter `cluster_manager_timeout`. - Add unit tests. Signed-off-by: Tianli Feng --- .../rest-api-spec/api/cat.allocation.json | 10 +- .../api/cat.cluster_manager.json | 10 +- .../rest-api-spec/api/cat.indices.json | 10 +- .../rest-api-spec/api/cat.nodeattrs.json | 10 +- .../rest-api-spec/api/cat.pending_tasks.json | 10 +- .../rest-api-spec/api/cat.plugins.json | 10 +- .../rest-api-spec/api/cat.repositories.json | 10 +- .../rest-api-spec/api/cat.segments.json | 12 ++ .../rest-api-spec/api/cat.shards.json | 10 +- .../rest-api-spec/api/cat.snapshots.json | 10 +- .../rest-api-spec/api/cat.templates.json | 10 +- .../rest-api-spec/api/cat.thread_pool.json | 10 +- .../rest/action/cat/RestAllocationAction.java | 6 +- .../rest/action/cat/RestIndicesAction.java | 126 +++++++++++------- .../rest/action/cat/RestMasterAction.java | 6 +- .../rest/action/cat/RestNodeAttrsAction.java | 6 +- .../cat/RestPendingClusterTasksAction.java | 8 +- .../rest/action/cat/RestPluginsAction.java | 6 +- .../action/cat/RestRepositoriesAction.java | 10 +- .../rest/action/cat/RestSegmentsAction.java | 8 +- .../rest/action/cat/RestShardsAction.java | 6 +- .../rest/action/cat/RestSnapshotAction.java | 8 +- .../rest/action/cat/RestTemplatesAction.java | 8 +- .../rest/action/cat/RestThreadPoolAction.java | 6 +- .../RenamedTimeoutRequestParameterTests.java | 97 ++++++++++++++ 25 files changed, 347 insertions(+), 76 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.allocation.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.allocation.json index 7b3dc70b03c38..717c1c49808f6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.allocation.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.allocation.json @@ -55,7 +55,15 @@ }, "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "h":{ "type":"list", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.cluster_manager.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.cluster_manager.json index c1084825546bf..cd96038ad0693 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.cluster_manager.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.cluster_manager.json @@ -36,7 +36,15 @@ }, "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "h":{ "type":"list", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json index a92189134f88f..2491ab309531d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json @@ -55,7 +55,15 @@ }, "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "h":{ "type":"list", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodeattrs.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodeattrs.json index e688e23cab089..c8afa4cb17039 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodeattrs.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodeattrs.json @@ -26,7 +26,15 @@ }, "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "h":{ "type":"list", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.pending_tasks.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.pending_tasks.json index 36fa33be495cd..9c0edf8c53d90 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.pending_tasks.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.pending_tasks.json @@ -26,7 +26,15 @@ }, "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "h":{ "type":"list", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.plugins.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.plugins.json index d5346c6d9e7b4..0b5b39b01ee58 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.plugins.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.plugins.json @@ -26,7 +26,15 @@ }, "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "h":{ "type":"list", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.repositories.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.repositories.json index 84d9965907ff3..58960709a99bb 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.repositories.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.repositories.json @@ -27,7 +27,15 @@ }, "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "h":{ "type":"list", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segments.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segments.json index 472ef7fd22eee..5107353c7b14f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segments.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segments.json @@ -49,6 +49,18 @@ "pb" ] }, + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" + }, "h":{ "type":"list", "description":"Comma-separated list of column names to display" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.shards.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.shards.json index a13c0f6bf6d4a..fab381a098e3f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.shards.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.shards.json @@ -55,7 +55,15 @@ }, "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "h":{ "type":"list", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.snapshots.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.snapshots.json index 757c2cfbe7dc6..1320207abfe75 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.snapshots.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.snapshots.json @@ -39,7 +39,15 @@ }, "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "h":{ "type":"list", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.templates.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.templates.json index 53fc872b5dae2..d45593b7bb2c8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.templates.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.templates.json @@ -38,7 +38,15 @@ }, "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "h":{ "type":"list", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json index 710c297dbbe75..1165703490d1a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json @@ -54,7 +54,15 @@ }, "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "h":{ "type":"list", diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestAllocationAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestAllocationAction.java index a74d64d1530c5..eb03b9e25a294 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestAllocationAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestAllocationAction.java @@ -44,6 +44,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Strings; import org.opensearch.common.Table; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; @@ -58,6 +59,8 @@ public class RestAllocationAction extends AbstractCatAction { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestAllocationAction.class); + @Override public List routes() { return unmodifiableList(asList(new Route(GET, "/_cat/allocation"), new Route(GET, "/_cat/allocation/{nodes}"))); @@ -79,7 +82,8 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().routingTable(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", clusterStateRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(clusterStateRequest, request, deprecationLogger, getName()); return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java index 2c0eef6a8fdb8..a26b57aab0636 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java @@ -32,6 +32,7 @@ package org.opensearch.rest.action.cat; +import org.opensearch.OpenSearchParseException; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionResponse; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; @@ -52,6 +53,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Strings; import org.opensearch.common.Table; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.unit.TimeValue; @@ -82,6 +84,11 @@ public class RestIndicesAction extends AbstractCatAction { private static final DateFormatter STRICT_DATE_TIME_FORMATTER = DateFormatter.forPattern("strict_date_time"); + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestIndicesAction.class); + private static final String MASTER_TIMEOUT_DEPRECATED_MESSAGE = + "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version."; + private static final String DUPLICATE_PARAMETER_ERROR_MESSAGE = + "Please only use one of the request parameters [master_timeout, cluster_manager_timeout]."; @Override public List routes() { @@ -109,7 +116,16 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, IndicesOptions.strictExpand()); final boolean local = request.paramAsBoolean("local", false); - final TimeValue masterNodeTimeout = request.paramAsTime("master_timeout", DEFAULT_MASTER_NODE_TIMEOUT); + TimeValue clusterManagerTimeout = request.paramAsTime("cluster_manager_timeout", DEFAULT_MASTER_NODE_TIMEOUT); + // Remove the if condition and statements inside after removing MASTER_ROLE. + if (request.hasParam("master_timeout")) { + deprecationLogger.deprecate("cat_indices_master_timeout_parameter", MASTER_TIMEOUT_DEPRECATED_MESSAGE); + if (request.hasParam("cluster_manager_timeout")) { + throw new OpenSearchParseException(DUPLICATE_PARAMETER_ERROR_MESSAGE); + } + clusterManagerTimeout = request.paramAsTime("master_timeout", DEFAULT_MASTER_NODE_TIMEOUT); + } + final TimeValue clusterManagerNodeTimeout = clusterManagerTimeout; final boolean includeUnloadedSegments = request.paramAsBoolean("include_unloaded_segments", false); return channel -> { @@ -120,56 +136,66 @@ public RestResponse buildResponse(final Table table) throws Exception { } }); - sendGetSettingsRequest(indices, indicesOptions, local, masterNodeTimeout, client, new ActionListener() { - @Override - public void onResponse(final GetSettingsResponse getSettingsResponse) { - final GroupedActionListener groupedListener = createGroupedListener(request, 4, listener); - groupedListener.onResponse(getSettingsResponse); - - // The list of indices that will be returned is determined by the indices returned from the Get Settings call. - // All the other requests just provide additional detail, and wildcards may be resolved differently depending on the - // type of request in the presence of security plugins (looking at you, ClusterHealthRequest), so - // force the IndicesOptions for all the sub-requests to be as inclusive as possible. - final IndicesOptions subRequestIndicesOptions = IndicesOptions.lenientExpandHidden(); - - // Indices that were successfully resolved during the get settings request might be deleted when the subsequent cluster - // state, cluster health and indices stats requests execute. We have to distinguish two cases: - // 1) the deleted index was explicitly passed as parameter to the /_cat/indices request. In this case we want the - // subsequent requests to fail. - // 2) the deleted index was resolved as part of a wildcard or _all. In this case, we want the subsequent requests not to - // fail on the deleted index (as we want to ignore wildcards that cannot be resolved). - // This behavior can be ensured by letting the cluster state, cluster health and indices stats requests re-resolve the - // index names with the same indices options that we used for the initial cluster state request (strictExpand). - sendIndicesStatsRequest( - indices, - subRequestIndicesOptions, - includeUnloadedSegments, - client, - ActionListener.wrap(groupedListener::onResponse, groupedListener::onFailure) - ); - sendClusterStateRequest( - indices, - subRequestIndicesOptions, - local, - masterNodeTimeout, - client, - ActionListener.wrap(groupedListener::onResponse, groupedListener::onFailure) - ); - sendClusterHealthRequest( - indices, - subRequestIndicesOptions, - local, - masterNodeTimeout, - client, - ActionListener.wrap(groupedListener::onResponse, groupedListener::onFailure) - ); - } - - @Override - public void onFailure(final Exception e) { - listener.onFailure(e); + sendGetSettingsRequest( + indices, + indicesOptions, + local, + clusterManagerNodeTimeout, + client, + new ActionListener() { + @Override + public void onResponse(final GetSettingsResponse getSettingsResponse) { + final GroupedActionListener groupedListener = createGroupedListener(request, 4, listener); + groupedListener.onResponse(getSettingsResponse); + + // The list of indices that will be returned is determined by the indices returned from the Get Settings call. + // All the other requests just provide additional detail, and wildcards may be resolved differently depending on the + // type of request in the presence of security plugins (looking at you, ClusterHealthRequest), so + // force the IndicesOptions for all the sub-requests to be as inclusive as possible. + final IndicesOptions subRequestIndicesOptions = IndicesOptions.lenientExpandHidden(); + + // Indices that were successfully resolved during the get settings request might be deleted when the subsequent + // cluster + // state, cluster health and indices stats requests execute. We have to distinguish two cases: + // 1) the deleted index was explicitly passed as parameter to the /_cat/indices request. In this case we want the + // subsequent requests to fail. + // 2) the deleted index was resolved as part of a wildcard or _all. In this case, we want the subsequent requests + // not to + // fail on the deleted index (as we want to ignore wildcards that cannot be resolved). + // This behavior can be ensured by letting the cluster state, cluster health and indices stats requests re-resolve + // the + // index names with the same indices options that we used for the initial cluster state request (strictExpand). + sendIndicesStatsRequest( + indices, + subRequestIndicesOptions, + includeUnloadedSegments, + client, + ActionListener.wrap(groupedListener::onResponse, groupedListener::onFailure) + ); + sendClusterStateRequest( + indices, + subRequestIndicesOptions, + local, + clusterManagerNodeTimeout, + client, + ActionListener.wrap(groupedListener::onResponse, groupedListener::onFailure) + ); + sendClusterHealthRequest( + indices, + subRequestIndicesOptions, + local, + clusterManagerNodeTimeout, + client, + ActionListener.wrap(groupedListener::onResponse, groupedListener::onFailure) + ); + } + + @Override + public void onFailure(final Exception e) { + listener.onFailure(e); + } } - }); + ); }; } diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestMasterAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestMasterAction.java index 1219b419122c6..4bcb16c741ecf 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestMasterAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestMasterAction.java @@ -38,6 +38,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.Table; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; import org.opensearch.rest.action.RestResponseListener; @@ -49,6 +50,8 @@ public class RestMasterAction extends AbstractCatAction { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestMasterAction.class); + @Override public List replacedRoutes() { // The deprecated path will be removed in a future major version. @@ -70,7 +73,8 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", clusterStateRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(clusterStateRequest, request, deprecationLogger, getName()); return channel -> client.admin().cluster().state(clusterStateRequest, new RestResponseListener(channel) { @Override diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestNodeAttrsAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestNodeAttrsAction.java index d6fc27a2713db..6ea36267d39ed 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestNodeAttrsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestNodeAttrsAction.java @@ -42,6 +42,7 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.Strings; import org.opensearch.common.Table; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.monitor.process.ProcessInfo; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; @@ -56,6 +57,8 @@ public class RestNodeAttrsAction extends AbstractCatAction { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestNodeAttrsAction.class); + @Override public List routes() { return singletonList(new Route(GET, "/_cat/nodeattrs")); @@ -76,7 +79,8 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", clusterStateRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(clusterStateRequest, request, deprecationLogger, getName()); return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestPendingClusterTasksAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestPendingClusterTasksAction.java index dbc1b21fcf0ab..7e261510ee742 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestPendingClusterTasksAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestPendingClusterTasksAction.java @@ -37,6 +37,7 @@ import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.service.PendingClusterTask; import org.opensearch.common.Table; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; import org.opensearch.rest.action.RestResponseListener; @@ -48,6 +49,8 @@ public class RestPendingClusterTasksAction extends AbstractCatAction { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestPendingClusterTasksAction.class); + @Override public List routes() { return singletonList(new Route(GET, "/_cat/pending_tasks")); @@ -66,7 +69,10 @@ protected void documentation(StringBuilder sb) { @Override public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { PendingClusterTasksRequest pendingClusterTasksRequest = new PendingClusterTasksRequest(); - pendingClusterTasksRequest.masterNodeTimeout(request.paramAsTime("master_timeout", pendingClusterTasksRequest.masterNodeTimeout())); + pendingClusterTasksRequest.masterNodeTimeout( + request.paramAsTime("cluster_manager_timeout", pendingClusterTasksRequest.masterNodeTimeout()) + ); + parseDeprecatedMasterTimeoutParameter(pendingClusterTasksRequest, request, deprecationLogger, getName()); pendingClusterTasksRequest.local(request.paramAsBoolean("local", pendingClusterTasksRequest.local())); return channel -> client.admin() .cluster() diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestPluginsAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestPluginsAction.java index 79cac0f906c74..8975c8ab4b3de 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestPluginsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestPluginsAction.java @@ -42,6 +42,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.Table; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.plugins.PluginInfo; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; @@ -55,6 +56,8 @@ public class RestPluginsAction extends AbstractCatAction { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestPluginsAction.class); + @Override public List routes() { return singletonList(new Route(GET, "/_cat/plugins")); @@ -75,7 +78,8 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", clusterStateRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(clusterStateRequest, request, deprecationLogger, getName()); return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestRepositoriesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestRepositoriesAction.java index fec3996506fca..76c71bee791ef 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestRepositoriesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestRepositoriesAction.java @@ -37,6 +37,7 @@ import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.Table; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; import org.opensearch.rest.action.RestResponseListener; @@ -51,16 +52,21 @@ */ public class RestRepositoriesAction extends AbstractCatAction { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestRepositoriesAction.class); + @Override public List routes() { return singletonList(new Route(GET, "/_cat/repositories")); } @Override - protected RestChannelConsumer doCatRequest(RestRequest request, NodeClient client) { + public RestChannelConsumer doCatRequest(RestRequest request, NodeClient client) { GetRepositoriesRequest getRepositoriesRequest = new GetRepositoriesRequest(); getRepositoriesRequest.local(request.paramAsBoolean("local", getRepositoriesRequest.local())); - getRepositoriesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getRepositoriesRequest.masterNodeTimeout())); + getRepositoriesRequest.masterNodeTimeout( + request.paramAsTime("cluster_manager_timeout", getRepositoriesRequest.masterNodeTimeout()) + ); + parseDeprecatedMasterTimeoutParameter(getRepositoriesRequest, request, deprecationLogger, getName()); return channel -> client.admin() .cluster() diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestSegmentsAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestSegmentsAction.java index 8d9d1937bdf56..3a31fdab125b1 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestSegmentsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestSegmentsAction.java @@ -43,6 +43,7 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.Strings; import org.opensearch.common.Table; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.index.engine.Segment; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; @@ -58,6 +59,8 @@ public class RestSegmentsAction extends AbstractCatAction { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestSegmentsAction.class); + @Override public List routes() { return unmodifiableList(asList(new Route(GET, "/_cat/segments"), new Route(GET, "/_cat/segments/{index}"))); @@ -74,12 +77,13 @@ public boolean allowSystemIndexAccessByDefault() { } @Override - protected RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { + public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", clusterStateRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(clusterStateRequest, request, deprecationLogger, getName()); clusterStateRequest.clear().nodes(true).routingTable(true).indices(indices); return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java index f9aa1a5554e9e..32e2ca8481f83 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java @@ -43,6 +43,7 @@ import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.common.Strings; import org.opensearch.common.Table; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.unit.TimeValue; import org.opensearch.index.cache.query.QueryCacheStats; import org.opensearch.index.engine.CommitStats; @@ -75,6 +76,8 @@ public class RestShardsAction extends AbstractCatAction { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestShardsAction.class); + @Override public List routes() { return unmodifiableList(asList(new Route(GET, "/_cat/shards"), new Route(GET, "/_cat/shards/{index}"))); @@ -101,7 +104,8 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", clusterStateRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(clusterStateRequest, request, deprecationLogger, getName()); clusterStateRequest.clear().nodes(true).routingTable(true).indices(indices); return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestSnapshotAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestSnapshotAction.java index 55e20bc59aeac..5adb9228d9869 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestSnapshotAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestSnapshotAction.java @@ -36,6 +36,7 @@ import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.common.Table; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.unit.TimeValue; import org.opensearch.rest.RestRequest; @@ -58,6 +59,8 @@ */ public class RestSnapshotAction extends AbstractCatAction { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestSnapshotAction.class); + @Override public List routes() { return unmodifiableList(asList(new Route(GET, "/_cat/snapshots"), new Route(GET, "/_cat/snapshots/{repository}"))); @@ -69,13 +72,14 @@ public String getName() { } @Override - protected RestChannelConsumer doCatRequest(final RestRequest request, NodeClient client) { + public RestChannelConsumer doCatRequest(final RestRequest request, NodeClient client) { GetSnapshotsRequest getSnapshotsRequest = new GetSnapshotsRequest().repository(request.param("repository")) .snapshots(new String[] { GetSnapshotsRequest.ALL_SNAPSHOTS }); getSnapshotsRequest.ignoreUnavailable(request.paramAsBoolean("ignore_unavailable", getSnapshotsRequest.ignoreUnavailable())); - getSnapshotsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getSnapshotsRequest.masterNodeTimeout())); + getSnapshotsRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", getSnapshotsRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(getSnapshotsRequest, request, deprecationLogger, getName()); return channel -> client.admin() .cluster() diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestTemplatesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestTemplatesAction.java index fcead3a34e8a9..bedc57453cb9b 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestTemplatesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestTemplatesAction.java @@ -40,6 +40,7 @@ import org.opensearch.cluster.metadata.ComposableIndexTemplate; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.Table; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.regex.Regex; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; @@ -54,6 +55,8 @@ public class RestTemplatesAction extends AbstractCatAction { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestTemplatesAction.class); + @Override public List routes() { return unmodifiableList(asList(new Route(GET, "/_cat/templates"), new Route(GET, "/_cat/templates/{name}"))); @@ -70,12 +73,13 @@ protected void documentation(StringBuilder sb) { } @Override - protected RestChannelConsumer doCatRequest(final RestRequest request, NodeClient client) { + public RestChannelConsumer doCatRequest(final RestRequest request, NodeClient client) { final String matchPattern = request.hasParam("name") ? request.param("name") : null; final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().metadata(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", clusterStateRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(clusterStateRequest, request, deprecationLogger, getName()); return channel -> client.admin().cluster().state(clusterStateRequest, new RestResponseListener(channel) { @Override diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestThreadPoolAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestThreadPoolAction.java index 1198fe0bdcba3..2f43a3a66d01b 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestThreadPoolAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestThreadPoolAction.java @@ -44,6 +44,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.Table; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.regex.Regex; import org.opensearch.monitor.process.ProcessInfo; import org.opensearch.rest.RestRequest; @@ -68,6 +69,8 @@ public class RestThreadPoolAction extends AbstractCatAction { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestThreadPoolAction.class); + @Override public List routes() { return unmodifiableList(asList(new Route(GET, "/_cat/thread_pool"), new Route(GET, "/_cat/thread_pool/{thread_pool_patterns}"))); @@ -89,7 +92,8 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", clusterStateRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(clusterStateRequest, request, deprecationLogger, getName()); return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override diff --git a/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java b/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java index b96edad72350f..66b6697c70966 100644 --- a/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java +++ b/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java @@ -14,6 +14,19 @@ import org.opensearch.client.node.NodeClient; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; +import org.opensearch.rest.action.cat.RestAllocationAction; +import org.opensearch.rest.action.cat.RestRepositoriesAction; +import org.opensearch.rest.action.cat.RestThreadPoolAction; +import org.opensearch.rest.action.cat.RestMasterAction; +import org.opensearch.rest.action.cat.RestShardsAction; +import org.opensearch.rest.action.cat.RestPluginsAction; +import org.opensearch.rest.action.cat.RestNodeAttrsAction; +import org.opensearch.rest.action.cat.RestNodesAction; +import org.opensearch.rest.action.cat.RestIndicesAction; +import org.opensearch.rest.action.cat.RestTemplatesAction; +import org.opensearch.rest.action.cat.RestPendingClusterTasksAction; +import org.opensearch.rest.action.cat.RestSegmentsAction; +import org.opensearch.rest.action.cat.RestSnapshotAction; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.action.cat.RestNodesAction; import org.opensearch.test.OpenSearchTestCase; @@ -77,12 +90,96 @@ public void testBothParamsNotValid() { } public void testCatAllocation() { + RestAllocationAction action = new RestAllocationAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.doCatRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testCatIndices() { + RestIndicesAction action = new RestIndicesAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.doCatRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testCatClusterManager() { + RestMasterAction action = new RestMasterAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.doCatRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testCatNodeattrs() { + RestNodeAttrsAction action = new RestNodeAttrsAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.doCatRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testCatNodes() { RestNodesAction action = new RestNodesAction(); Exception e = assertThrows(OpenSearchParseException.class, () -> action.doCatRequest(getRestRequestWithBothParams(), client)); assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); } + public void testCatPendingTasks() { + RestPendingClusterTasksAction action = new RestPendingClusterTasksAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.doCatRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testCatPlugins() { + RestPluginsAction action = new RestPluginsAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.doCatRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testCatRepositories() { + RestRepositoriesAction action = new RestRepositoriesAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.doCatRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testCatShards() { + RestShardsAction action = new RestShardsAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.doCatRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testCatSnapshots() { + RestSnapshotAction action = new RestSnapshotAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.doCatRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testCatTemplates() { + RestTemplatesAction action = new RestTemplatesAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.doCatRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testCatThreadPool() { + RestThreadPoolAction action = new RestThreadPoolAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.doCatRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testCatSegments() { + RestSegmentsAction action = new RestSegmentsAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.doCatRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + private MasterNodeRequest getMasterNodeRequest() { return new MasterNodeRequest() { @Override From 6c195abc593466b0a29a4a708ca109f98b6dbb83 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Sat, 2 Apr 2022 14:40:25 -0500 Subject: [PATCH 032/653] [Main] Bump Version 2.0 to 2.1 (#2715) Bumps from version 2.0 to 2.1 Signed-off-by: Nicholas Walter Knize --- .ci/bwcVersions | 2 +- buildSrc/version.properties | 2 +- server/src/main/java/org/opensearch/Version.java | 4 ++-- .../src/main/java/org/opensearch/test/VersionUtils.java | 2 +- .../test/java/org/opensearch/test/VersionUtilsTests.java | 6 ++++-- 5 files changed, 9 insertions(+), 7 deletions(-) diff --git a/.ci/bwcVersions b/.ci/bwcVersions index de840b910ada2..7b317d170d54c 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -39,4 +39,4 @@ BWC_VERSION: - "1.3.0" - "1.3.1" - "1.3.2" - - "1.4.0" + - "2.0.0" diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 41d8aa41ac631..4c09afd961c20 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,4 +1,4 @@ -opensearch = 2.0.0 +opensearch = 2.1.0 lucene = 9.1.0 bundled_jdk_vendor = adoptium diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index eb6a80d37d83d..cd0bf7b8d8174 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -81,9 +81,9 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_1_3_0 = new Version(1030099, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_1_3_1 = new Version(1030199, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_1_3_2 = new Version(1030299, org.apache.lucene.util.Version.LUCENE_8_10_1); - public static final Version V_1_4_0 = new Version(1040099, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_2_0_0 = new Version(2000099, org.apache.lucene.util.Version.LUCENE_9_1_0); - public static final Version CURRENT = V_2_0_0; + public static final Version V_2_1_0 = new Version(2010099, org.apache.lucene.util.Version.LUCENE_9_1_0); + public static final Version CURRENT = V_2_1_0; public static Version readVersion(StreamInput in) throws IOException { return fromId(in.readVInt()); diff --git a/test/framework/src/main/java/org/opensearch/test/VersionUtils.java b/test/framework/src/main/java/org/opensearch/test/VersionUtils.java index 1e6807189f947..0f8525285fd08 100644 --- a/test/framework/src/main/java/org/opensearch/test/VersionUtils.java +++ b/test/framework/src/main/java/org/opensearch/test/VersionUtils.java @@ -89,7 +89,7 @@ static Tuple, List> resolveReleasedVersions(Version curre stableVersions = previousMajor; // remove current moveLastToUnreleased(currentMajor, unreleasedVersions); - } else if (current.major != 1 && current.major != 2) { + } else if (current.major != 1) { // on a stable or release branch, ie N.x stableVersions = currentMajor; // remove the next maintenance bugfix diff --git a/test/framework/src/test/java/org/opensearch/test/VersionUtilsTests.java b/test/framework/src/test/java/org/opensearch/test/VersionUtilsTests.java index 7d8fb4a318621..de1f650aff20c 100644 --- a/test/framework/src/test/java/org/opensearch/test/VersionUtilsTests.java +++ b/test/framework/src/test/java/org/opensearch/test/VersionUtilsTests.java @@ -284,7 +284,6 @@ public void testResolveReleasedVersionsAtNewMinorBranchIn2x() { Arrays.asList( TestNewMinorBranchIn6x.V_1_6_0, TestNewMinorBranchIn6x.V_1_6_1, - TestNewMinorBranchIn6x.V_1_6_2, TestNewMinorBranchIn6x.V_2_0_0, TestNewMinorBranchIn6x.V_2_0_1, TestNewMinorBranchIn6x.V_2_1_0, @@ -292,7 +291,10 @@ public void testResolveReleasedVersionsAtNewMinorBranchIn2x() { ) ) ); - assertThat(unreleased, equalTo(Arrays.asList(TestNewMinorBranchIn6x.V_2_1_2, TestNewMinorBranchIn6x.V_2_2_0))); + assertThat( + unreleased, + equalTo(Arrays.asList(TestNewMinorBranchIn6x.V_1_6_2, TestNewMinorBranchIn6x.V_2_1_2, TestNewMinorBranchIn6x.V_2_2_0)) + ); } /** From 6a2a33d1872850b04562164c39621698cb99d7b8 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Mon, 4 Apr 2022 08:22:22 -0700 Subject: [PATCH 033/653] Make Rest-High-Rest-Level tests allow deprecation warning temporarily, during deprecation of request parameter 'master_timeout' (#2702) Temporarily build rest client with setStrictDeprecationMode(false) to allow deprecation warning in HLRC tests while master_timeout parameters is being refactored. Signed-off-by: Tianli Feng --- .../java/org/opensearch/test/rest/OpenSearchRestTestCase.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java index 9624a9d3d0554..f976b3619102a 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java @@ -824,7 +824,8 @@ protected String getProtocol() { protected RestClient buildClient(Settings settings, HttpHost[] hosts) throws IOException { RestClientBuilder builder = RestClient.builder(hosts); configureClient(builder, settings); - builder.setStrictDeprecationMode(true); + // TODO: set the method argument to 'true' after PR https://github.com/opensearch-project/OpenSearch/pull/2683 merged. + builder.setStrictDeprecationMode(false); return builder.build(); } From d848c632603a31631ea0a12382c52cce36adacae Mon Sep 17 00:00:00 2001 From: Wenjun Ruan Date: Mon, 4 Apr 2022 23:34:03 +0800 Subject: [PATCH 034/653] Optimize Node, remove duplicate Settings (#2703) Removes duplicate Settings Signed-off-by: ruanwenjun --- .../opensearch/index/get/ShardGetService.java | 27 ++++++++----------- .../main/java/org/opensearch/node/Node.java | 6 +---- 2 files changed, 12 insertions(+), 21 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/get/ShardGetService.java b/server/src/main/java/org/opensearch/index/get/ShardGetService.java index e63d80336bc7a..6aa10bc450b51 100644 --- a/server/src/main/java/org/opensearch/index/get/ShardGetService.java +++ b/server/src/main/java/org/opensearch/index/get/ShardGetService.java @@ -216,25 +216,20 @@ private GetResult innerGet( fetchSourceContext = normalizeFetchSourceContent(fetchSourceContext, gFields); Term uidTerm = new Term(IdFieldMapper.NAME, Uid.encodeId(id)); - Engine.GetResult get = indexShard.get( - new Engine.Get(realtime, true, id, uidTerm).version(version) - .versionType(versionType) - .setIfSeqNo(ifSeqNo) - .setIfPrimaryTerm(ifPrimaryTerm) - ); - if (get.exists() == false) { - get.close(); - } - - if (get == null || get.exists() == false) { - return new GetResult(shardId.getIndexName(), id, UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM, -1, false, null, null, null); - } - try { + try ( + Engine.GetResult get = indexShard.get( + new Engine.Get(realtime, true, id, uidTerm).version(version) + .versionType(versionType) + .setIfSeqNo(ifSeqNo) + .setIfPrimaryTerm(ifPrimaryTerm) + ) + ) { + if (get == null || get.exists() == false) { + return new GetResult(shardId.getIndexName(), id, UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM, -1, false, null, null, null); + } // break between having loaded it from translog (so we only have _source), and having a document to load return innerGetLoadFromStoredFields(id, gFields, fetchSourceContext, get, mapperService); - } finally { - get.close(); } } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 6368e097f6a14..73c2f221bc7cc 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -348,11 +348,7 @@ protected Node( Settings tmpSettings = Settings.builder() .put(initialEnvironment.settings()) .put(Client.CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE) - .build(); - - // Enabling shard indexing backpressure node-attribute - tmpSettings = Settings.builder() - .put(tmpSettings) + // Enabling shard indexing backpressure node-attribute .put(NODE_ATTRIBUTES.getKey() + SHARD_INDEXING_PRESSURE_ENABLED_ATTRIBUTE_KEY, "true") .build(); From f9ca90eb88ad25edd99b9770659bd66ff56969c9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Apr 2022 09:25:16 -0700 Subject: [PATCH 035/653] Bump proto-google-common-protos from 1.16.0 to 2.8.0 in /plugins/repository-gcs (#2738) * Bump proto-google-common-protos in /plugins/repository-gcs Bumps [proto-google-common-protos](https://github.com/googleapis/java-iam) from 1.16.0 to 2.8.0. - [Release notes](https://github.com/googleapis/java-iam/releases) - [Changelog](https://github.com/googleapis/java-iam/blob/main/CHANGELOG.md) - [Commits](https://github.com/googleapis/java-iam/commits) --- updated-dependencies: - dependency-name: com.google.api.grpc:proto-google-common-protos dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/repository-gcs/build.gradle | 2 +- .../licenses/proto-google-common-protos-1.16.0.jar.sha1 | 1 - .../licenses/proto-google-common-protos-2.8.0.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/proto-google-common-protos-1.16.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/proto-google-common-protos-2.8.0.jar.sha1 diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 2cfbd76394bcb..6e577d058ff67 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -64,7 +64,7 @@ dependencies { api 'com.google.protobuf:protobuf-java-util:3.19.3' api 'com.google.protobuf:protobuf-java:3.19.3' api 'com.google.code.gson:gson:2.9.0' - api 'com.google.api.grpc:proto-google-common-protos:1.16.0' + api 'com.google.api.grpc:proto-google-common-protos:2.8.0' api 'com.google.api.grpc:proto-google-iam-v1:0.12.0' api 'com.google.cloud:google-cloud-core-http:1.93.3' api 'com.google.auth:google-auth-library-credentials:0.20.0' diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-1.16.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-1.16.0.jar.sha1 deleted file mode 100644 index 7762b7a3ebdc3..0000000000000 --- a/plugins/repository-gcs/licenses/proto-google-common-protos-1.16.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2c5f022ea3b8e8df6a619c4cd8faf9af86022daa \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.8.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.8.0.jar.sha1 new file mode 100644 index 0000000000000..3f14d9e59c9e9 --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-common-protos-2.8.0.jar.sha1 @@ -0,0 +1 @@ +8adcbc3c5c3b1b7af1cf1e8a25af26a516d62a4c \ No newline at end of file From baaab58cccbb4aff5d51041b5470b797d3abd793 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Apr 2022 11:18:28 -0700 Subject: [PATCH 036/653] Bump json-smart from 2.4.7 to 2.4.8 in /plugins/repository-hdfs (#2735) * Bump json-smart from 2.4.7 to 2.4.8 in /plugins/repository-hdfs Bumps [json-smart](https://github.com/netplex/json-smart-v2) from 2.4.7 to 2.4.8. - [Release notes](https://github.com/netplex/json-smart-v2/releases) - [Commits](https://github.com/netplex/json-smart-v2/commits/2.4.8) --- updated-dependencies: - dependency-name: net.minidev:json-smart dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/json-smart-2.4.7.jar.sha1 | 1 - plugins/repository-hdfs/licenses/json-smart-2.4.8.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/json-smart-2.4.7.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/json-smart-2.4.8.jar.sha1 diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index d17a4060b9ab6..1787a380b933b 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -80,7 +80,7 @@ dependencies { api 'javax.servlet:servlet-api:2.5' api "org.slf4j:slf4j-api:${versions.slf4j}" api "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}" - api 'net.minidev:json-smart:2.4.7' + api 'net.minidev:json-smart:2.4.8' api 'org.apache.zookeeper:zookeeper:3.7.0' api "io.netty:netty-all:${versions.netty}" implementation 'com.fasterxml.woodstox:woodstox-core:6.2.8' diff --git a/plugins/repository-hdfs/licenses/json-smart-2.4.7.jar.sha1 b/plugins/repository-hdfs/licenses/json-smart-2.4.7.jar.sha1 deleted file mode 100644 index 16f9a4431485a..0000000000000 --- a/plugins/repository-hdfs/licenses/json-smart-2.4.7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d7f4c1530c07c54930935f3da85f48b83b3c109 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/json-smart-2.4.8.jar.sha1 b/plugins/repository-hdfs/licenses/json-smart-2.4.8.jar.sha1 new file mode 100644 index 0000000000000..7a3710cf1b364 --- /dev/null +++ b/plugins/repository-hdfs/licenses/json-smart-2.4.8.jar.sha1 @@ -0,0 +1 @@ +7c62f5f72ab05eb54d40e2abf0360a2fe9ea477f \ No newline at end of file From fb168b43ad619207e568811fd7cfb07d635dc138 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Apr 2022 11:19:19 -0700 Subject: [PATCH 037/653] Bump antlr4 from 4.5.3 to 4.9.3 in /modules/lang-painless (#2733) Bumps [antlr4](https://github.com/antlr/antlr4) from 4.5.3 to 4.9.3. - [Release notes](https://github.com/antlr/antlr4/releases) - [Changelog](https://github.com/antlr/antlr4/blob/master/CHANGES.txt) - [Commits](https://github.com/antlr/antlr4/compare/4.5.3...4.9.3) --- updated-dependencies: - dependency-name: org.antlr:antlr4 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- modules/lang-painless/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index ffbb3e60eb5ab..069158fb678ef 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -155,7 +155,7 @@ configurations { } dependencies { - regenerate 'org.antlr:antlr4:4.5.3' + regenerate 'org.antlr:antlr4:4.9.3' } String grammarPath = 'src/main/antlr' From 34221c2cbff8364f1003a100e4d0ca31f1a3e3b5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Apr 2022 11:19:36 -0700 Subject: [PATCH 038/653] Bump jboss-jaxrs-api_2.0_spec in /qa/wildfly (#2737) Bumps [jboss-jaxrs-api_2.0_spec](https://github.com/jboss/jboss-jaxrs-api_spec) from 1.0.0.Final to 1.0.1.Final. - [Release notes](https://github.com/jboss/jboss-jaxrs-api_spec/releases) - [Commits](https://github.com/jboss/jboss-jaxrs-api_spec/compare/jboss-jaxrs-api_1.1_spec-1.0.0.Final...jboss-jaxrs-api_1.1_spec-1.0.1.Final) --- updated-dependencies: - dependency-name: org.jboss.spec.javax.ws.rs:jboss-jaxrs-api_2.0_spec dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- qa/wildfly/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qa/wildfly/build.gradle b/qa/wildfly/build.gradle index 0e1c566bd2b52..9abaa2a83033f 100644 --- a/qa/wildfly/build.gradle +++ b/qa/wildfly/build.gradle @@ -41,7 +41,7 @@ testFixtures.useFixture() dependencies { providedCompile 'javax.enterprise:cdi-api:1.2' providedCompile 'org.jboss.spec.javax.annotation:jboss-annotations-api_1.2_spec:1.0.2.Final' - providedCompile 'org.jboss.spec.javax.ws.rs:jboss-jaxrs-api_2.0_spec:1.0.0.Final' + providedCompile 'org.jboss.spec.javax.ws.rs:jboss-jaxrs-api_2.0_spec:1.0.1.Final' api('org.jboss.resteasy:resteasy-jackson2-provider:3.0.19.Final') { exclude module: 'jackson-annotations' exclude module: 'jackson-core' From c90a8132881df9d2bd065af682d85fa48dbb9148 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Apr 2022 11:21:15 -0700 Subject: [PATCH 039/653] Bump com.diffplug.spotless from 6.3.0 to 6.4.1 (#2731) Bumps com.diffplug.spotless from 6.3.0 to 6.4.1. --- updated-dependencies: - dependency-name: com.diffplug.spotless dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index bfa435cb4812c..487f20c7f6ccd 100644 --- a/build.gradle +++ b/build.gradle @@ -48,7 +48,7 @@ plugins { id 'lifecycle-base' id 'opensearch.docker-support' id 'opensearch.global-build-info' - id "com.diffplug.spotless" version "6.3.0" apply false + id "com.diffplug.spotless" version "6.4.1" apply false id "org.gradle.test-retry" version "1.3.1" apply false } From 719c92a5051a777dc4bfc44127c6ca118d659cb0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Apr 2022 11:21:51 -0700 Subject: [PATCH 040/653] Bump gradle-docker-compose-plugin from 0.14.12 to 0.15.2 in /buildSrc (#2732) Bumps [gradle-docker-compose-plugin](https://github.com/avast/gradle-docker-compose-plugin) from 0.14.12 to 0.15.2. - [Release notes](https://github.com/avast/gradle-docker-compose-plugin/releases) - [Commits](https://github.com/avast/gradle-docker-compose-plugin/compare/0.14.12...0.15.2) --- updated-dependencies: - dependency-name: com.avast.gradle:gradle-docker-compose-plugin dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- buildSrc/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index cc7742a0d4390..18f8738bbba71 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -113,7 +113,7 @@ dependencies { api "net.java.dev.jna:jna:5.10.0" api 'gradle.plugin.com.github.johnrengelman:shadow:7.1.2' api 'de.thetaphi:forbiddenapis:3.3' - api 'com.avast.gradle:gradle-docker-compose-plugin:0.14.12' + api 'com.avast.gradle:gradle-docker-compose-plugin:0.15.2' api 'org.apache.maven:maven-model:3.6.2' api 'com.networknt:json-schema-validator:1.0.68' api "com.fasterxml.jackson.core:jackson-databind:${props.getProperty('jackson_databind')}" From 2d716ad9e2cd997eaa1ad8273cbf91a0691deb6d Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Mon, 4 Apr 2022 14:01:04 -0500 Subject: [PATCH 041/653] [Version] Don't spoof major for 3.0+ clusters (#2722) Changes version comparison logic to only translate major version when comparing with legacy 7x versions. This is needed beginning in 2.0 so that when running 2.0+ versions in bwc mode for 3.0+ upgrades, node versions no longer have to translate major version or spoof to legacy versions. Signed-off-by: Nicholas Walter Knize --- .../src/main/java/org/opensearch/Version.java | 42 ++++++++++--------- .../org/opensearch/LegacyESVersionTests.java | 4 +- .../java/org/opensearch/VersionTests.java | 20 +++++++++ 3 files changed, 45 insertions(+), 21 deletions(-) diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index cd0bf7b8d8174..7ccf4449436cd 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -279,11 +279,16 @@ public boolean onOrBefore(Version version) { return version.id >= id; } - // LegacyESVersion major 7 is equivalent to Version major 1 public int compareMajor(Version other) { - int m = major == 1 ? 7 : major == 2 ? 8 : major; - int om = other.major == 1 ? 7 : other.major == 2 ? 8 : other.major; - return Integer.compare(m, om); + // comparing Legacy 7x for bwc + // todo: remove the following when removing legacy support in 3.0.0 + if (major == 7 || other.major == 7) { + // opensearch v1.x and v2.x need major translation to compare w/ legacy versions + int m = major == 1 ? 7 : major == 2 ? 8 : major; + int om = other.major == 1 ? 7 : other.major == 2 ? 8 : other.major; + return Integer.compare(m, om); + } + return Integer.compare(major, other.major); } @Override @@ -339,12 +344,9 @@ protected Version computeMinCompatVersion() { } else if (major == 6) { // force the minimum compatibility for version 6 to 5.6 since we don't reference version 5 anymore return LegacyESVersion.fromId(5060099); - } - /* - * TODO - uncomment this logic from OpenSearch version 3 onwards - * - else if (major >= 3) { + } else if (major >= 3 && major < 5) { // all major versions from 3 onwards are compatible with last minor series of the previous major + // todo: remove 5 check when removing LegacyESVersionTests Version bwcVersion = null; for (int i = DeclaredVersionsHolder.DECLARED_VERSIONS.size() - 1; i >= 0; i--) { @@ -358,7 +360,6 @@ else if (major >= 3) { } return bwcVersion == null ? this : bwcVersion; } - */ return Version.min(this, fromId(maskId((int) major * 1000000 + 0 * 10000 + 99))); } @@ -396,6 +397,10 @@ private Version computeMinIndexCompatVersion() { bwcMajor = major - 1; } final int bwcMinor = 0; + if (major == 3) { + return Version.min(this, fromId((bwcMajor * 1000000 + bwcMinor * 10000 + 99) ^ MASK)); + } + // todo remove below when LegacyESVersion is removed in 3.0 return Version.min(this, fromId((bwcMajor * 1000000 + bwcMinor * 10000 + 99))); } @@ -409,16 +414,15 @@ public boolean isCompatible(Version version) { // OpenSearch version 2 is the functional equivalent of predecessor unreleased version "8" // todo refactor this logic after removing deprecated features int a = major; - if (major == 1) { - a = 7; - } else if (major == 2) { - a = 8; - } int b = version.major; - if (version.major == 1) { - b = 7; - } else if (version.major == 2) { - b = 8; + + if (a == 7 || b == 7) { + if (major <= 2) { + a += 6; // for legacy compatibility up to version 2.x (to compare minCompat) + } + if (version.major <= 2) { + b += 6; // for legacy compatibility up to version 2.x (to compare minCompat) + } } assert compatible == false || Math.max(a, b) - Math.min(a, b) <= 1; diff --git a/server/src/test/java/org/opensearch/LegacyESVersionTests.java b/server/src/test/java/org/opensearch/LegacyESVersionTests.java index 8fb3636dd8b2c..d59f5e38a4ed7 100644 --- a/server/src/test/java/org/opensearch/LegacyESVersionTests.java +++ b/server/src/test/java/org/opensearch/LegacyESVersionTests.java @@ -63,8 +63,8 @@ public void testVersionComparison() { // compare opensearch version to LegacyESVersion assertThat(Version.V_1_0_0.compareMajor(LegacyESVersion.V_7_0_0), is(0)); - assertThat(Version.V_1_0_0.compareMajor(LegacyESVersion.fromString("6.3.0")), is(1)); - assertThat(LegacyESVersion.fromString("6.3.0").compareMajor(Version.V_1_0_0), is(-1)); + assertThat(Version.V_2_0_0.compareMajor(LegacyESVersion.fromString("7.3.0")), is(1)); + assertThat(LegacyESVersion.fromString("7.3.0").compareMajor(Version.V_2_0_0), is(-1)); } public void testMin() { diff --git a/server/src/test/java/org/opensearch/VersionTests.java b/server/src/test/java/org/opensearch/VersionTests.java index beff71eceab0d..c2566e83dd9b6 100644 --- a/server/src/test/java/org/opensearch/VersionTests.java +++ b/server/src/test/java/org/opensearch/VersionTests.java @@ -245,6 +245,26 @@ public void testOpenSearchMinIndexCompatVersion() { assertEquals(expected.revision, actual.revision); } + /** test first version of opensearch compatibility that does not support legacy versions */ + public void testOpenSearchPreLegacyRemoval() { + Version opensearchVersion = Version.fromString("3.0.0"); + int opensearchMajor = opensearchVersion.major; + List candidates = VersionUtils.allOpenSearchVersions(); + Version expectedMinIndexCompat = VersionUtils.getFirstVersionOfMajor(candidates, opensearchMajor - 1); + Version actualMinIndexCompat = opensearchVersion.minimumIndexCompatibilityVersion(); + + Version expectedMinCompat = VersionUtils.lastFirstReleasedMinorFromMajor(VersionUtils.allOpenSearchVersions(), opensearchMajor - 1); + Version actualMinCompat = opensearchVersion.minimumCompatibilityVersion(); + // since some legacy versions still support build (alpha, beta, RC) we check major minor revision only + assertEquals(expectedMinIndexCompat.major, actualMinIndexCompat.major); + assertEquals(expectedMinIndexCompat.minor, actualMinIndexCompat.minor); + assertEquals(expectedMinIndexCompat.revision, actualMinIndexCompat.revision); + + assertEquals(expectedMinCompat.major, actualMinCompat.major); + assertEquals(expectedMinCompat.minor, actualMinCompat.minor); + assertEquals(expectedMinCompat.revision, actualMinCompat.revision); + } + public void testToString() { assertEquals("2.0.0-beta1", Version.fromString("2.0.0-beta1").toString()); assertEquals("5.0.0-alpha1", Version.fromId(5000001).toString()); From 7d23b180f77821266f0acd779a9db637739ee0b9 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Mon, 4 Apr 2022 13:47:34 -0700 Subject: [PATCH 042/653] Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Cluster APIs (#2658) - Deprecate the request parameter `master_timeout` that used in Cluster APIs which have got the parameter. - Add alternative new request parameter `cluster_manager_timeout`. - Add unit tests. Signed-off-by: Tianli Feng --- .../api/cluster.get_settings.json | 10 ++- .../rest-api-spec/api/cluster.health.json | 10 ++- .../api/cluster.pending_tasks.json | 10 ++- .../api/cluster.put_settings.json | 10 ++- .../rest-api-spec/api/cluster.reroute.json | 10 ++- .../rest-api-spec/api/cluster.state.json | 10 ++- .../cluster/RestClusterGetSettingsAction.java | 6 +- .../cluster/RestClusterHealthAction.java | 6 +- .../cluster/RestClusterRerouteAction.java | 5 +- .../admin/cluster/RestClusterStateAction.java | 5 +- .../RestClusterUpdateSettingsAction.java | 6 +- .../RestPendingClusterTasksAction.java | 8 +- .../RenamedTimeoutRequestParameterTests.java | 88 ++++++++++++++++++- .../reroute/ClusterRerouteRequestTests.java | 2 +- .../cluster/RestClusterHealthActionTests.java | 6 +- 15 files changed, 172 insertions(+), 20 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json index 6f91fbbedf5de..c60230dbc43b3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json @@ -22,7 +22,15 @@ }, "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "timeout":{ "type":"time", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json index 894b141f2f3b3..b3fc958891dfe 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json @@ -56,7 +56,15 @@ }, "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "timeout":{ "type":"time", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.pending_tasks.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.pending_tasks.json index d940adf9aef5d..22cfbac7ff447 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.pending_tasks.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.pending_tasks.json @@ -22,7 +22,15 @@ }, "master_timeout":{ "type":"time", - "description":"Specify timeout for connection to master" + "description":"Specify timeout for connection to master", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_settings.json index f6b9a0863380e..1e36acc51544d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_settings.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_settings.json @@ -22,7 +22,15 @@ }, "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "timeout":{ "type":"time", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.reroute.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.reroute.json index bcf2704110664..285da40dd0245 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.reroute.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.reroute.json @@ -44,7 +44,15 @@ }, "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "timeout":{ "type":"time", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.state.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.state.json index c17e5b073e361..b43ab901785bd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.state.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.state.json @@ -71,7 +71,15 @@ }, "master_timeout":{ "type":"time", - "description":"Specify timeout for connection to master" + "description":"Specify timeout for connection to master", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" }, "flat_settings":{ "type":"boolean", diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java index 003f1bec11d87..1238cd2a818d5 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java @@ -38,6 +38,7 @@ import org.opensearch.client.Requests; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.ClusterState; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsFilter; @@ -59,6 +60,8 @@ public class RestClusterGetSettingsAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestClusterGetSettingsAction.class); + private final Settings settings; private final ClusterSettings clusterSettings; private final SettingsFilter settingsFilter; @@ -84,7 +87,8 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest().routingTable(false).nodes(false); final boolean renderDefaults = request.paramAsBoolean("include_defaults", false); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", clusterStateRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(clusterStateRequest, request, deprecationLogger, getName()); return channel -> client.admin().cluster().state(clusterStateRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(ClusterStateResponse response, XContentBuilder builder) throws Exception { diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterHealthAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterHealthAction.java index 8b782e4399e73..877e387a15ec3 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterHealthAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterHealthAction.java @@ -39,6 +39,7 @@ import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.Priority; import org.opensearch.common.Strings; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestStatusToXContentListener; @@ -56,6 +57,8 @@ public class RestClusterHealthAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestClusterHealthAction.class); + @Override public List routes() { return unmodifiableList(asList(new Route(GET, "/_cluster/health"), new Route(GET, "/_cluster/health/{index}"))); @@ -81,7 +84,8 @@ public static ClusterHealthRequest fromRequest(final RestRequest request) { final ClusterHealthRequest clusterHealthRequest = clusterHealthRequest(Strings.splitStringByCommaToArray(request.param("index"))); clusterHealthRequest.indicesOptions(IndicesOptions.fromRequest(request, clusterHealthRequest.indicesOptions())); clusterHealthRequest.local(request.paramAsBoolean("local", clusterHealthRequest.local())); - clusterHealthRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterHealthRequest.masterNodeTimeout())); + clusterHealthRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", clusterHealthRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(clusterHealthRequest, request, deprecationLogger, "cluster_health"); clusterHealthRequest.timeout(request.paramAsTime("timeout", clusterHealthRequest.timeout())); String waitForStatus = request.param("wait_for_status"); if (waitForStatus != null) { diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterRerouteAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterRerouteAction.java index f519da109ba09..9c0e09b7629e0 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterRerouteAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterRerouteAction.java @@ -80,7 +80,7 @@ public RestClusterRerouteAction(SettingsFilter settingsFilter) { } // TODO: Remove the DeprecationLogger after removing MASTER_ROLE. - // It's used to log deprecation when request parameter 'metric' contains 'master_node'. + // It's used to log deprecation when request parameter 'metric' contains 'master_node', or request parameter 'master_timeout' is used. private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestClusterRerouteAction.class); private static final String DEPRECATED_MESSAGE_MASTER_NODE = "Deprecated value [master_node] used for parameter [metric]. To promote inclusive language, please use [cluster_manager_node] instead. It will be unsupported in a future major version."; @@ -143,7 +143,8 @@ public static ClusterRerouteRequest createRequest(RestRequest request) throws IO clusterRerouteRequest.explain(request.paramAsBoolean("explain", clusterRerouteRequest.explain())); clusterRerouteRequest.timeout(request.paramAsTime("timeout", clusterRerouteRequest.timeout())); clusterRerouteRequest.setRetryFailed(request.paramAsBoolean("retry_failed", clusterRerouteRequest.isRetryFailed())); - clusterRerouteRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterRerouteRequest.masterNodeTimeout())); + clusterRerouteRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", clusterRerouteRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(clusterRerouteRequest, request, deprecationLogger, "cluster_reroute"); request.applyContentParser(parser -> PARSER.parse(parser, clusterRerouteRequest, null)); return clusterRerouteRequest; } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterStateAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterStateAction.java index 32aa055c18300..7f18a19b5cd54 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterStateAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterStateAction.java @@ -73,7 +73,7 @@ public RestClusterStateAction(SettingsFilter settingsFilter) { } // TODO: Remove the DeprecationLogger after removing MASTER_ROLE. - // It's used to log deprecation when request parameter 'metric' contains 'master_node'. + // It's used to log deprecation when request parameter 'metric' contains 'master_node', or request parameter 'master_timeout' is used. private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestClusterStateAction.class); private static final String DEPRECATED_MESSAGE_MASTER_NODE = "Deprecated value [master_node] used for parameter [metric]. To promote inclusive language, please use [cluster_manager_node] instead. It will be unsupported in a future major version."; @@ -104,7 +104,8 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest(); clusterStateRequest.indicesOptions(IndicesOptions.fromRequest(request, clusterStateRequest.indicesOptions())); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", clusterStateRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(clusterStateRequest, request, deprecationLogger, getName()); if (request.hasParam("wait_for_metadata_version")) { clusterStateRequest.waitForMetadataVersion(request.paramAsLong("wait_for_metadata_version", 0)); } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java index bbe1bba70926f..c05cdc752b5f7 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java @@ -35,6 +35,7 @@ import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.client.Requests; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.rest.BaseRestHandler; @@ -51,6 +52,8 @@ public class RestClusterUpdateSettingsAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestClusterUpdateSettingsAction.class); + private static final String PERSISTENT = "persistent"; private static final String TRANSIENT = "transient"; @@ -69,8 +72,9 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final ClusterUpdateSettingsRequest clusterUpdateSettingsRequest = Requests.clusterUpdateSettingsRequest(); clusterUpdateSettingsRequest.timeout(request.paramAsTime("timeout", clusterUpdateSettingsRequest.timeout())); clusterUpdateSettingsRequest.masterNodeTimeout( - request.paramAsTime("master_timeout", clusterUpdateSettingsRequest.masterNodeTimeout()) + request.paramAsTime("cluster_manager_timeout", clusterUpdateSettingsRequest.masterNodeTimeout()) ); + parseDeprecatedMasterTimeoutParameter(clusterUpdateSettingsRequest, request, deprecationLogger, getName()); Map source; try (XContentParser parser = request.contentParser()) { source = parser.map(); diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java index de80054a9afb2..155adc8cc7e19 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java @@ -34,6 +34,7 @@ import org.opensearch.action.admin.cluster.tasks.PendingClusterTasksRequest; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; @@ -46,6 +47,8 @@ public class RestPendingClusterTasksAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestPendingClusterTasksAction.class); + @Override public List routes() { return singletonList(new Route(GET, "/_cluster/pending_tasks")); @@ -59,7 +62,10 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { PendingClusterTasksRequest pendingClusterTasksRequest = new PendingClusterTasksRequest(); - pendingClusterTasksRequest.masterNodeTimeout(request.paramAsTime("master_timeout", pendingClusterTasksRequest.masterNodeTimeout())); + pendingClusterTasksRequest.masterNodeTimeout( + request.paramAsTime("cluster_manager_timeout", pendingClusterTasksRequest.masterNodeTimeout()) + ); + parseDeprecatedMasterTimeoutParameter(pendingClusterTasksRequest, request, deprecationLogger, getName()); pendingClusterTasksRequest.local(request.paramAsBoolean("local", pendingClusterTasksRequest.local())); return channel -> client.admin().cluster().pendingClusterTasks(pendingClusterTasksRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java b/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java index 66b6697c70966..8bc9afc152382 100644 --- a/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java +++ b/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java @@ -12,8 +12,18 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.action.support.master.MasterNodeRequest; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsFilter; +import org.opensearch.common.xcontent.NamedXContentRegistry; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.action.admin.cluster.RestClusterGetSettingsAction; +import org.opensearch.rest.action.admin.cluster.RestClusterHealthAction; +import org.opensearch.rest.action.admin.cluster.RestClusterRerouteAction; +import org.opensearch.rest.action.admin.cluster.RestClusterStateAction; +import org.opensearch.rest.action.admin.cluster.RestClusterUpdateSettingsAction; import org.opensearch.rest.action.cat.RestAllocationAction; import org.opensearch.rest.action.cat.RestRepositoriesAction; import org.opensearch.rest.action.cat.RestThreadPoolAction; @@ -27,12 +37,13 @@ import org.opensearch.rest.action.cat.RestPendingClusterTasksAction; import org.opensearch.rest.action.cat.RestSegmentsAction; import org.opensearch.rest.action.cat.RestSnapshotAction; -import org.opensearch.rest.BaseRestHandler; -import org.opensearch.rest.action.cat.RestNodesAction; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.threadpool.TestThreadPool; +import java.io.IOException; +import java.util.Collections; + import static org.hamcrest.Matchers.containsString; /** @@ -180,6 +191,68 @@ public void testCatSegments() { assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); } + public void testClusterHealth() { + Exception e = assertThrows( + OpenSearchParseException.class, + () -> RestClusterHealthAction.fromRequest(getRestRequestWithBodyWithBothParams()) + ); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testClusterReroute() throws IOException { + final SettingsFilter filter = new SettingsFilter(Collections.singleton("foo.filtered")); + RestClusterRerouteAction action = new RestClusterRerouteAction(filter); + Exception e = assertThrows( + OpenSearchParseException.class, + () -> action.prepareRequest(getRestRequestWithBodyWithBothParams(), client) + ); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testClusterState() throws IOException { + final SettingsFilter filter = new SettingsFilter(Collections.singleton("foo.filtered")); + RestClusterStateAction action = new RestClusterStateAction(filter); + Exception e = assertThrows( + OpenSearchParseException.class, + () -> action.prepareRequest(getRestRequestWithBodyWithBothParams(), client) + ); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testClusterGetSettings() throws IOException { + final SettingsFilter filter = new SettingsFilter(Collections.singleton("foo.filtered")); + RestClusterGetSettingsAction action = new RestClusterGetSettingsAction(null, null, filter); + Exception e = assertThrows( + OpenSearchParseException.class, + () -> action.prepareRequest(getRestRequestWithBodyWithBothParams(), client) + ); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testClusterUpdateSettings() throws IOException { + RestClusterUpdateSettingsAction action = new RestClusterUpdateSettingsAction(); + Exception e = assertThrows( + OpenSearchParseException.class, + () -> action.prepareRequest(getRestRequestWithBodyWithBothParams(), client) + ); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testClusterPendingTasks() { + RestPendingClusterTasksAction action = new RestPendingClusterTasksAction(); + Exception e = assertThrows( + OpenSearchParseException.class, + () -> action.prepareRequest(getRestRequestWithBodyWithBothParams(), client) + ); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + private MasterNodeRequest getMasterNodeRequest() { return new MasterNodeRequest() { @Override @@ -207,4 +280,15 @@ private FakeRestRequest getRestRequestWithNewParam() { request.params().put("cluster_manager_timeout", "2m"); return request; } + + private FakeRestRequest getRestRequestWithBodyWithBothParams() { + FakeRestRequest request = getFakeRestRequestWithBody(); + request.params().put("cluster_manager_timeout", "2m"); + request.params().put("master_timeout", "3s"); + return request; + } + + private FakeRestRequest getFakeRestRequestWithBody() { + return new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withContent(new BytesArray("{}"), XContentType.JSON).build(); + } } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java index 3b7f2ff7f7ae2..6f62883ff436c 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java @@ -232,7 +232,7 @@ private RestRequest toRestRequest(ClusterRerouteRequest original) throws IOExcep params.put("retry_failed", Boolean.toString(original.isRetryFailed())); } if (false == original.masterNodeTimeout().equals(MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT) || randomBoolean()) { - params.put("master_timeout", original.masterNodeTimeout().toString()); + params.put("cluster_manager_timeout", original.masterNodeTimeout().toString()); } if (original.getCommands() != null) { hasBody = true; diff --git a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestClusterHealthActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestClusterHealthActionTests.java index 4f065653b44a6..8334a1e88190a 100644 --- a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestClusterHealthActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestClusterHealthActionTests.java @@ -52,7 +52,7 @@ public void testFromRequest() { Map params = new HashMap<>(); String index = "index"; boolean local = randomBoolean(); - String masterTimeout = randomTimeValue(); + String clusterManagerTimeout = randomTimeValue(); String timeout = randomTimeValue(); ClusterHealthStatus waitForStatus = randomFrom(ClusterHealthStatus.values()); boolean waitForNoRelocatingShards = randomBoolean(); @@ -63,7 +63,7 @@ public void testFromRequest() { params.put("index", index); params.put("local", String.valueOf(local)); - params.put("master_timeout", masterTimeout); + params.put("cluster_manager_timeout", clusterManagerTimeout); params.put("timeout", timeout); params.put("wait_for_status", waitForStatus.name()); if (waitForNoRelocatingShards || randomBoolean()) { @@ -81,7 +81,7 @@ public void testFromRequest() { assertThat(clusterHealthRequest.indices().length, equalTo(1)); assertThat(clusterHealthRequest.indices()[0], equalTo(index)); assertThat(clusterHealthRequest.local(), equalTo(local)); - assertThat(clusterHealthRequest.masterNodeTimeout(), equalTo(TimeValue.parseTimeValue(masterTimeout, "test"))); + assertThat(clusterHealthRequest.masterNodeTimeout(), equalTo(TimeValue.parseTimeValue(clusterManagerTimeout, "test"))); assertThat(clusterHealthRequest.timeout(), equalTo(TimeValue.parseTimeValue(timeout, "test"))); assertThat(clusterHealthRequest.waitForStatus(), equalTo(waitForStatus)); assertThat(clusterHealthRequest.waitForNoRelocatingShards(), equalTo(waitForNoRelocatingShards)); From cb238aae616d6a0fd8f82e128a1f94c8e4e8b1f7 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Mon, 4 Apr 2022 17:40:31 -0500 Subject: [PATCH 043/653] Bump version to 3.0.0 (#2556) Bump version to 3.0.0 Signed-off-by: Nicholas Walter Knize --- .ci/bwcVersions | 1 + .../java/org/opensearch/gradle/Version.java | 2 +- .../DistributionDownloadPluginTests.java | 10 +- .../org/opensearch/gradle/VersionTests.java | 2 +- buildSrc/version.properties | 2 +- .../client/core/MainResponseTests.java | 3 +- ...ncatenateGraphTokenFilterFactoryTests.java | 59 ---- .../common/EdgeNGramTokenizerTests.java | 31 +- .../common/SynonymsAnalysisTests.java | 16 +- ...DelimiterGraphTokenFilterFactoryTests.java | 33 -- .../AnalysisPhoneticFactoryTests.java | 6 +- .../src/main/java/org/opensearch/Version.java | 15 +- .../metadata/MetadataCreateIndexService.java | 23 +- .../opensearch/transport/InboundDecoder.java | 6 +- .../transport/TransportHandshaker.java | 7 +- .../org/opensearch/LegacyESVersionTests.java | 294 ------------------ .../java/org/opensearch/VersionTests.java | 17 +- .../health/ClusterHealthRequestTests.java | 82 ----- .../health/ClusterHealthResponsesTests.java | 65 ---- .../ingest/SimulateProcessorResultTests.java | 17 - .../action/main/MainResponseTests.java | 2 +- .../action/support/IndicesOptionsTests.java | 12 +- .../coordination/JoinTaskExecutorTests.java | 98 +----- .../metadata/AutoExpandReplicasTests.java | 13 +- .../metadata/DataStreamTemplateTests.java | 22 -- .../MetadataCreateIndexServiceTests.java | 9 - .../common/lucene/uid/VersionsTests.java | 4 +- .../org/opensearch/env/NodeMetadataTests.java | 22 -- .../opensearch/index/IndexSettingsTests.java | 8 +- .../index/analysis/PreBuiltAnalyzerTests.java | 8 +- .../index/mapper/RootObjectMapperTests.java | 32 -- .../indices/analysis/AnalysisModuleTests.java | 29 -- .../opensearch/ingest/IngestStatsTests.java | 19 -- .../PersistentTasksCustomMetadataTests.java | 4 +- .../action/document/RestIndexActionTests.java | 9 - .../aggregations/bucket/GeoHashGridTests.java | 35 --- .../aggregations/bucket/GeoTileGridTests.java | 35 --- .../GeoTileGridValuesSourceBuilderTests.java | 39 --- .../metrics/InternalScriptedMetricTests.java | 45 --- .../transport/InboundDecoderTests.java | 86 ++--- .../org/opensearch/test/VersionUtils.java | 12 +- .../AbstractSimpleTransportTestCase.java | 37 +-- .../opensearch/test/VersionUtilsTests.java | 19 +- .../rest/yaml/section/SkipSectionTests.java | 26 +- 44 files changed, 135 insertions(+), 1181 deletions(-) delete mode 100644 server/src/test/java/org/opensearch/LegacyESVersionTests.java diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 7b317d170d54c..8503dd364c8d8 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -40,3 +40,4 @@ BWC_VERSION: - "1.3.1" - "1.3.2" - "2.0.0" + - "2.1.0" diff --git a/buildSrc/src/main/java/org/opensearch/gradle/Version.java b/buildSrc/src/main/java/org/opensearch/gradle/Version.java index 3012488381729..4c184f908e5d2 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/Version.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/Version.java @@ -77,7 +77,7 @@ public Version(int major, int minor, int revision) { // currently snapshot is not taken into account int id = major * 10000000 + minor * 100000 + revision * 1000; // identify if new OpenSearch version 1 - this.id = major == 1 || major == 2 ? id ^ MASK : id; + this.id = major == 1 || major == 2 || major == 3 ? id ^ MASK : id; } private static int parseSuffixNumber(String substring) { diff --git a/buildSrc/src/test/java/org/opensearch/gradle/DistributionDownloadPluginTests.java b/buildSrc/src/test/java/org/opensearch/gradle/DistributionDownloadPluginTests.java index 446c94acc7ad4..d7798ef5040bb 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/DistributionDownloadPluginTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/DistributionDownloadPluginTests.java @@ -53,11 +53,11 @@ public class DistributionDownloadPluginTests extends GradleUnitTestCase { private static Project packagesProject; private static Project bwcProject; - private static final Version BWC_MAJOR_VERSION = Version.fromString("5.0.0"); - private static final Version BWC_MINOR_VERSION = Version.fromString("4.1.0"); - private static final Version BWC_STAGED_VERSION = Version.fromString("4.0.0"); - private static final Version BWC_BUGFIX_VERSION = Version.fromString("4.0.1"); - private static final Version BWC_MAINTENANCE_VERSION = Version.fromString("3.90.1"); + private static final Version BWC_MAJOR_VERSION = Version.fromString("6.0.0"); + private static final Version BWC_MINOR_VERSION = Version.fromString("5.1.0"); + private static final Version BWC_STAGED_VERSION = Version.fromString("5.0.0"); + private static final Version BWC_BUGFIX_VERSION = Version.fromString("5.0.1"); + private static final Version BWC_MAINTENANCE_VERSION = Version.fromString("4.90.1"); private static final BwcVersions BWC_MINOR = new BwcVersions( new TreeSet<>(Arrays.asList(BWC_BUGFIX_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION)), BWC_MAJOR_VERSION diff --git a/buildSrc/src/test/java/org/opensearch/gradle/VersionTests.java b/buildSrc/src/test/java/org/opensearch/gradle/VersionTests.java index a9f32886f7927..b6abe5662e474 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/VersionTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/VersionTests.java @@ -65,7 +65,7 @@ public void testRelaxedVersionParsing() { public void testCompareWithStringVersions() { // 1.10.2 is now rebased to OpenSearch version; so this needs to report - assertTrue("OpenSearch 1.10.20 is not interpreted as after Legacy 3.0.0", Version.fromString("1.10.20").after("3.0.0")); + assertTrue("OpenSearch 1.10.20 is not interpreted as after Legacy 6.0.0", Version.fromString("1.10.20").after("6.0.0")); assertTrue( "7.0.0-alpha1 should be equal to 7.0.0-alpha1", Version.fromString("7.0.0-alpha1").equals(Version.fromString("7.0.0-alpha1")) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 4c09afd961c20..84ee06cafba2d 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,4 +1,4 @@ -opensearch = 2.1.0 +opensearch = 3.0.0 lucene = 9.1.0 bundled_jdk_vendor = adoptium diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/core/MainResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/core/MainResponseTests.java index cabb125a739b7..cd759aa62eaf1 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/core/MainResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/core/MainResponseTests.java @@ -33,7 +33,6 @@ package org.opensearch.client.core; import org.opensearch.Build; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.client.AbstractResponseTestCase; import org.opensearch.cluster.ClusterName; @@ -53,7 +52,7 @@ protected org.opensearch.action.main.MainResponse createServerTestInstance(XCont ClusterName clusterName = new ClusterName(randomAlphaOfLength(10)); String nodeName = randomAlphaOfLength(10); final String date = new Date(randomNonNegativeLong()).toString(); - Version version = VersionUtils.randomVersionBetween(random(), LegacyESVersion.V_7_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); Build build = new Build( Build.Type.UNKNOWN, randomAlphaOfLength(8), diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ConcatenateGraphTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ConcatenateGraphTokenFilterFactoryTests.java index 509010e209088..eaf571e7469d6 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ConcatenateGraphTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ConcatenateGraphTokenFilterFactoryTests.java @@ -13,10 +13,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; -import org.apache.lucene.analysis.miscellaneous.ConcatenateGraphFilter; import org.apache.lucene.util.automaton.TooComplexToDeterminizeException; -import org.opensearch.LegacyESVersion; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.index.analysis.AnalysisTestsHelper; @@ -24,7 +21,6 @@ import org.opensearch.index.analysis.TokenFilterFactory; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.OpenSearchTokenStreamTestCase; -import org.opensearch.test.VersionUtils; import java.io.IOException; import java.io.StringReader; @@ -62,61 +58,6 @@ public void testTokenizerCustomizedSeparator() throws IOException { assertTokenStreamContents(tokenFilter.create(tokenizer), new String[] { "PowerShot+Is+AweSome" }); } - public void testOldLuceneVersionSeparator() throws IOException { - OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings( - Settings.builder() - .put( - IndexMetadata.SETTING_VERSION_CREATED, - VersionUtils.randomVersionBetween(random(), LegacyESVersion.V_7_0_0, LegacyESVersion.V_7_5_2) - ) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .put("index.analysis.filter.my_concatenate_graph.type", "concatenate_graph") - .put("index.analysis.filter.my_concatenate_graph.token_separator", "+") // this will be ignored - .build(), - new CommonAnalysisPlugin() - ); - - TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_concatenate_graph"); - String source = "PowerShot Is AweSome"; - Tokenizer tokenizer = new WhitespaceTokenizer(); - tokenizer.setReader(new StringReader(source)); - - // earlier Lucene version will only use Lucene's default separator - assertTokenStreamContents( - tokenFilter.create(tokenizer), - new String[] { - "PowerShot" - + ConcatenateGraphFilter.DEFAULT_TOKEN_SEPARATOR - + "Is" - + ConcatenateGraphFilter.DEFAULT_TOKEN_SEPARATOR - + "AweSome" } - ); - } - - public void testOldLuceneVersionNoSeparator() throws IOException { - OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings( - Settings.builder() - .put( - IndexMetadata.SETTING_VERSION_CREATED, - VersionUtils.randomVersionBetween(random(), LegacyESVersion.V_7_0_0, LegacyESVersion.V_7_5_2) - ) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .put("index.analysis.filter.my_concatenate_graph.type", "concatenate_graph") - .put("index.analysis.filter.my_concatenate_graph.token_separator", "+") // this will be ignored - .put("index.analysis.filter.my_concatenate_graph.preserve_separator", "false") - .build(), - new CommonAnalysisPlugin() - ); - - TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_concatenate_graph"); - String source = "PowerShot Is AweSome"; - Tokenizer tokenizer = new WhitespaceTokenizer(); - tokenizer.setReader(new StringReader(source)); - - // earlier Lucene version will not add separator if preserve_separator is false - assertTokenStreamContents(tokenFilter.create(tokenizer), new String[] { "PowerShotIsAweSome" }); - } - public void testTokenizerEmptySeparator() throws IOException { OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings( Settings.builder() diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/EdgeNGramTokenizerTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/EdgeNGramTokenizerTests.java index 07ac0d69428af..e77f895d05661 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/EdgeNGramTokenizerTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/EdgeNGramTokenizerTests.java @@ -33,7 +33,6 @@ package org.opensearch.analysis.common; import org.apache.lucene.analysis.Tokenizer; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; @@ -68,34 +67,6 @@ private IndexAnalyzers buildAnalyzers(Version version, String tokenizer) throws public void testPreConfiguredTokenizer() throws IOException { - // Before 7.3 we return ngrams of length 1 only - { - Version version = VersionUtils.randomVersionBetween( - random(), - LegacyESVersion.fromString("7.0.0"), - VersionUtils.getPreviousVersion(LegacyESVersion.fromString("7.3.0")) - ); - try (IndexAnalyzers indexAnalyzers = buildAnalyzers(version, "edge_ngram")) { - NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); - assertNotNull(analyzer); - assertAnalyzesTo(analyzer, "test", new String[] { "t" }); - } - } - - // Check deprecated name as well - { - Version version = VersionUtils.randomVersionBetween( - random(), - LegacyESVersion.fromString("7.0.0"), - VersionUtils.getPreviousVersion(LegacyESVersion.fromString("7.3.0")) - ); - try (IndexAnalyzers indexAnalyzers = buildAnalyzers(version, "edgeNGram")) { - NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); - assertNotNull(analyzer); - assertAnalyzesTo(analyzer, "test", new String[] { "t" }); - } - } - // Afterwards, we return ngrams of length 1 and 2, to match the default factory settings { try (IndexAnalyzers indexAnalyzers = buildAnalyzers(Version.CURRENT, "edge_ngram")) { @@ -109,7 +80,7 @@ public void testPreConfiguredTokenizer() throws IOException { { try ( IndexAnalyzers indexAnalyzers = buildAnalyzers( - VersionUtils.randomVersionBetween(random(), LegacyESVersion.fromString("7.3.0"), Version.CURRENT), + VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT), "edgeNGram" ) ) { diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/SynonymsAnalysisTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/SynonymsAnalysisTests.java index 8094e24b9adc8..99e1c90808f41 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/SynonymsAnalysisTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/SynonymsAnalysisTests.java @@ -37,7 +37,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.core.KeywordTokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; @@ -231,10 +230,7 @@ public void testChainedSynonymFilters() throws IOException { public void testShingleFilters() { Settings settings = Settings.builder() - .put( - IndexMetadata.SETTING_VERSION_CREATED, - VersionUtils.randomVersionBetween(random(), LegacyESVersion.V_7_0_0, Version.CURRENT) - ) + .put(IndexMetadata.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT)) .put("path.home", createTempDir().toString()) .put("index.analysis.filter.synonyms.type", "synonym") .putList("index.analysis.filter.synonyms.synonyms", "programmer, developer") @@ -293,10 +289,7 @@ public void testPreconfiguredTokenFilters() throws IOException { ); Settings settings = Settings.builder() - .put( - IndexMetadata.SETTING_VERSION_CREATED, - VersionUtils.randomVersionBetween(random(), LegacyESVersion.V_7_0_0, Version.CURRENT) - ) + .put(IndexMetadata.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT)) .put("path.home", createTempDir().toString()) .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); @@ -320,10 +313,7 @@ public void testPreconfiguredTokenFilters() throws IOException { public void testDisallowedTokenFilters() throws IOException { Settings settings = Settings.builder() - .put( - IndexMetadata.SETTING_VERSION_CREATED, - VersionUtils.randomVersionBetween(random(), LegacyESVersion.V_7_0_0, Version.CURRENT) - ) + .put(IndexMetadata.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT)) .put("path.home", createTempDir().toString()) .putList("common_words", "a", "b") .put("output_unigrams", "true") diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java index 6129971a69e18..102182f381128 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java @@ -33,7 +33,6 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; @@ -47,7 +46,6 @@ import org.opensearch.indices.analysis.AnalysisModule; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; -import org.opensearch.test.VersionUtils; import java.io.IOException; import java.io.StringReader; @@ -202,38 +200,7 @@ public void testIgnoreKeywords() throws IOException { } public void testPreconfiguredFilter() throws IOException { - // Before 7.3 we don't adjust offsets - { - Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(); - Settings indexSettings = Settings.builder() - .put( - IndexMetadata.SETTING_VERSION_CREATED, - VersionUtils.randomVersionBetween( - random(), - LegacyESVersion.V_7_0_0, - VersionUtils.getPreviousVersion(LegacyESVersion.V_7_3_0) - ) - ) - .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard") - .putList("index.analysis.analyzer.my_analyzer.filter", "word_delimiter_graph") - .build(); - IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); - - try ( - IndexAnalyzers indexAnalyzers = new AnalysisModule( - TestEnvironment.newEnvironment(settings), - Collections.singletonList(new CommonAnalysisPlugin()) - ).getAnalysisRegistry().build(idxSettings) - ) { - - NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); - assertNotNull(analyzer); - assertAnalyzesTo(analyzer, "h100", new String[] { "h", "100" }, new int[] { 0, 0 }, new int[] { 4, 4 }); - - } - } - // Afger 7.3 we do adjust offsets { Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(); Settings indexSettings = Settings.builder() diff --git a/plugins/analysis-phonetic/src/test/java/org/opensearch/index/analysis/AnalysisPhoneticFactoryTests.java b/plugins/analysis-phonetic/src/test/java/org/opensearch/index/analysis/AnalysisPhoneticFactoryTests.java index 0ef8d22f37335..19bc27f6e616d 100644 --- a/plugins/analysis-phonetic/src/test/java/org/opensearch/index/analysis/AnalysisPhoneticFactoryTests.java +++ b/plugins/analysis-phonetic/src/test/java/org/opensearch/index/analysis/AnalysisPhoneticFactoryTests.java @@ -32,7 +32,6 @@ package org.opensearch.index.analysis; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; @@ -65,10 +64,7 @@ public void testDisallowedWithSynonyms() throws IOException { AnalysisPhoneticPlugin plugin = new AnalysisPhoneticPlugin(); Settings settings = Settings.builder() - .put( - IndexMetadata.SETTING_VERSION_CREATED, - VersionUtils.randomVersionBetween(random(), LegacyESVersion.V_7_0_0, Version.CURRENT) - ) + .put(IndexMetadata.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT)) .put("path.home", createTempDir().toString()) .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index 7ccf4449436cd..e68305df20869 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -83,7 +83,8 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_1_3_2 = new Version(1030299, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_2_0_0 = new Version(2000099, org.apache.lucene.util.Version.LUCENE_9_1_0); public static final Version V_2_1_0 = new Version(2010099, org.apache.lucene.util.Version.LUCENE_9_1_0); - public static final Version CURRENT = V_2_1_0; + public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_1_0); + public static final Version CURRENT = V_3_0_0; public static Version readVersion(StreamInput in) throws IOException { return fromId(in.readVInt()); @@ -261,6 +262,8 @@ private static Version fromStringSlow(String version) { this.revision = (byte) ((id / 100) % 100); this.build = (byte) (id % 100); this.luceneVersion = Objects.requireNonNull(luceneVersion); + this.minCompatVersion = null; + this.minIndexCompatVersion = null; } public boolean after(Version version) { @@ -282,7 +285,7 @@ public boolean onOrBefore(Version version) { public int compareMajor(Version other) { // comparing Legacy 7x for bwc // todo: remove the following when removing legacy support in 3.0.0 - if (major == 7 || other.major == 7) { + if (major == 7 || other.major == 7 || major == 6 || other.major == 6) { // opensearch v1.x and v2.x need major translation to compare w/ legacy versions int m = major == 1 ? 7 : major == 2 ? 8 : major; int om = other.major == 1 ? 7 : other.major == 2 ? 8 : other.major; @@ -313,11 +316,11 @@ static class DeclaredVersionsHolder { // lazy initialized because we don't yet have the declared versions ready when instantiating the cached Version // instances - private Version minCompatVersion; + protected Version minCompatVersion = null; // lazy initialized because we don't yet have the declared versions ready when instantiating the cached Version // instances - private Version minIndexCompatVersion; + protected Version minIndexCompatVersion = null; /** * Returns the minimum compatible version based on the current @@ -411,12 +414,12 @@ public boolean isCompatible(Version version) { boolean compatible = onOrAfter(version.minimumCompatibilityVersion()) && version.onOrAfter(minimumCompatibilityVersion()); // OpenSearch version 1 is the functional equivalent of predecessor version 7 - // OpenSearch version 2 is the functional equivalent of predecessor unreleased version "8" + // OpenSearch version 2 is the functional equivalent of predecessor version 8 // todo refactor this logic after removing deprecated features int a = major; int b = version.major; - if (a == 7 || b == 7) { + if (a == 7 || b == 7 || a == 6 || b == 6) { if (major <= 2) { a += 6; // for legacy compatibility up to version 2.x (to compare minCompat) } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index cb76b7217624f..244ad4e6eda76 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -851,13 +851,7 @@ static Settings aggregateIndexSettings( indexSettingsBuilder.put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), createdVersion); } if (INDEX_NUMBER_OF_SHARDS_SETTING.exists(indexSettingsBuilder) == false) { - final int numberOfShards; - if (INDEX_NUMBER_OF_SHARDS_SETTING.exists(settings)) { - numberOfShards = INDEX_NUMBER_OF_SHARDS_SETTING.get(settings); - } else { - numberOfShards = getNumberOfShards(indexSettingsBuilder); - } - indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, numberOfShards); + indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, INDEX_NUMBER_OF_SHARDS_SETTING.get(settings)); } if (INDEX_NUMBER_OF_REPLICAS_SETTING.exists(indexSettingsBuilder) == false) { indexSettingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, INDEX_NUMBER_OF_REPLICAS_SETTING.get(settings)); @@ -915,21 +909,6 @@ public static void validateStoreTypeSettings(Settings settings) { } } - static int getNumberOfShards(final Settings.Builder indexSettingsBuilder) { - // TODO: this logic can be removed when the current major version is 8 - assert Version.CURRENT.major == 1 || Version.CURRENT.major == 2; - final int numberOfShards; - final Version indexVersionCreated = Version.fromId( - Integer.parseInt(indexSettingsBuilder.get(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey())) - ); - if (indexVersionCreated.before(LegacyESVersion.V_7_0_0)) { - numberOfShards = 5; - } else { - numberOfShards = INDEX_NUMBER_OF_SHARDS_SETTING.getDefault(Settings.EMPTY); - } - return numberOfShards; - } - /** * Calculates the number of routing shards based on the configured value in indexSettings or if recovering from another index * it will return the value configured for that index. diff --git a/server/src/main/java/org/opensearch/transport/InboundDecoder.java b/server/src/main/java/org/opensearch/transport/InboundDecoder.java index 9cfb4a79161e7..342a076774896 100644 --- a/server/src/main/java/org/opensearch/transport/InboundDecoder.java +++ b/server/src/main/java/org/opensearch/transport/InboundDecoder.java @@ -55,6 +55,8 @@ public class InboundDecoder implements Releasable { private int bytesConsumed = 0; private boolean isClosed = false; + private static Version V_4_0_0 = Version.fromId(4000099 ^ Version.MASK); + public InboundDecoder(Version version, PageCacheRecycler recycler) { this.version = version; this.recycler = recycler; @@ -217,8 +219,8 @@ static IllegalStateException ensureVersionCompatibility(Version remoteVersion, V // handshake. This looks odd but it's required to establish the connection correctly we check for real compatibility // once the connection is established final Version compatibilityVersion = isHandshake ? currentVersion.minimumCompatibilityVersion() : currentVersion; - if ((currentVersion.onOrAfter(Version.V_2_0_0) && remoteVersion.equals(Version.fromId(6079999))) == false - && remoteVersion.isCompatible(compatibilityVersion) == false) { + boolean v3x = currentVersion.onOrAfter(Version.V_3_0_0) && currentVersion.before(V_4_0_0); + if ((v3x && remoteVersion.equals(Version.fromId(7099999)) == false) && remoteVersion.isCompatible(compatibilityVersion) == false) { final Version minCompatibilityVersion = isHandshake ? compatibilityVersion : compatibilityVersion.minimumCompatibilityVersion(); String msg = "Received " + (isHandshake ? "handshake " : "") + "message from unsupported version: ["; return new IllegalStateException(msg + remoteVersion + "] minimal compatible version is: [" + minCompatibilityVersion + "]"); diff --git a/server/src/main/java/org/opensearch/transport/TransportHandshaker.java b/server/src/main/java/org/opensearch/transport/TransportHandshaker.java index c85a8eebd8fbd..1b6a2580fcf77 100644 --- a/server/src/main/java/org/opensearch/transport/TransportHandshaker.java +++ b/server/src/main/java/org/opensearch/transport/TransportHandshaker.java @@ -63,9 +63,6 @@ final class TransportHandshaker { private final ThreadPool threadPool; private final HandshakeRequestSender handshakeRequestSender; - // @todo remove in 3.0.0 - static final Version V_3_0_0 = Version.fromId(3000099 ^ Version.MASK); - TransportHandshaker(Version version, ThreadPool threadPool, HandshakeRequestSender handshakeRequestSender) { this.version = version; this.threadPool = threadPool; @@ -95,7 +92,7 @@ void sendHandshake(long requestId, DiscoveryNode node, TcpChannel channel, TimeV // Sending only BC version to ElasticSearch node provide easy deprecation path for this BC version logic // in OpenSearch 2.0.0. minCompatVersion = Version.fromId(6079999); - } else if (version.onOrAfter(Version.V_2_0_0)) { + } else if (version.before(Version.V_3_0_0)) { minCompatVersion = Version.fromId(7099999); } handshakeRequestSender.sendRequest(node, channel, requestId, minCompatVersion); @@ -134,7 +131,7 @@ void handleHandshake(TransportChannel channel, long requestId, StreamInput strea // 1. if remote node is 7.x, then StreamInput version would be 6.8.0 // 2. if remote node is 6.8 then it would be 5.6.0 // 3. if remote node is OpenSearch 1.x then it would be 6.7.99 - if ((this.version.onOrAfter(Version.V_1_0_0) && this.version.before(V_3_0_0)) + if ((this.version.onOrAfter(Version.V_1_0_0) && this.version.before(Version.V_3_0_0)) && (stream.getVersion().equals(LegacyESVersion.fromId(6080099)) || stream.getVersion().equals(Version.fromId(5060099)))) { // send 7.10.2 in response to ensure compatibility w/ Legacy 7.10.x nodes for rolling upgrade support channel.sendResponse(new HandshakeResponse(LegacyESVersion.V_7_10_2)); diff --git a/server/src/test/java/org/opensearch/LegacyESVersionTests.java b/server/src/test/java/org/opensearch/LegacyESVersionTests.java deleted file mode 100644 index d59f5e38a4ed7..0000000000000 --- a/server/src/test/java/org/opensearch/LegacyESVersionTests.java +++ /dev/null @@ -1,294 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch; - -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.settings.Settings; -import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.test.VersionUtils; - -import java.lang.reflect.Modifier; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.lessThan; -import static org.hamcrest.Matchers.sameInstance; -import static org.opensearch.LegacyESVersion.V_7_0_0; -import static org.opensearch.test.VersionUtils.randomLegacyVersion; -import static org.opensearch.VersionTests.isCompatible; - -/** - * tests LegacyESVersion utilities. - * note: legacy version compatibility is already tested by e predecessor - */ -public class LegacyESVersionTests extends OpenSearchTestCase { - - public void testVersionComparison() { - Version V_6_8_15 = LegacyESVersion.fromString("6.8.15"); - assertThat(V_6_8_15.before(V_7_0_0), is(true)); - assertThat(V_6_8_15.before(V_6_8_15), is(false)); - assertThat(V_7_0_0.before(V_6_8_15), is(false)); - - assertThat(V_6_8_15.onOrBefore(V_7_0_0), is(true)); - assertThat(V_6_8_15.onOrBefore(V_6_8_15), is(true)); - assertThat(V_7_0_0.onOrBefore(V_6_8_15), is(false)); - - assertThat(V_6_8_15.after(V_7_0_0), is(false)); - assertThat(V_6_8_15.after(V_6_8_15), is(false)); - assertThat(V_7_0_0.after(V_6_8_15), is(true)); - - assertThat(V_6_8_15.onOrAfter(V_7_0_0), is(false)); - assertThat(V_6_8_15.onOrAfter(V_6_8_15), is(true)); - assertThat(V_7_0_0.onOrAfter(V_6_8_15), is(true)); - - assertTrue(LegacyESVersion.fromString("5.0.0-alpha2").onOrAfter(LegacyESVersion.fromString("5.0.0-alpha1"))); - assertTrue(LegacyESVersion.fromString("5.0.0").onOrAfter(LegacyESVersion.fromString("5.0.0-beta2"))); - assertTrue(LegacyESVersion.fromString("5.0.0-rc1").onOrAfter(LegacyESVersion.fromString("5.0.0-beta24"))); - assertTrue(LegacyESVersion.fromString("5.0.0-alpha24").before(LegacyESVersion.fromString("5.0.0-beta0"))); - - assertThat(V_6_8_15, is(lessThan(V_7_0_0))); - assertThat(V_7_0_0, is(greaterThan(V_6_8_15))); - - // compare opensearch version to LegacyESVersion - assertThat(Version.V_1_0_0.compareMajor(LegacyESVersion.V_7_0_0), is(0)); - assertThat(Version.V_2_0_0.compareMajor(LegacyESVersion.fromString("7.3.0")), is(1)); - assertThat(LegacyESVersion.fromString("7.3.0").compareMajor(Version.V_2_0_0), is(-1)); - } - - public void testMin() { - assertEquals(VersionUtils.getPreviousVersion(), LegacyESVersion.min(Version.CURRENT, VersionUtils.getPreviousVersion())); - assertEquals(LegacyESVersion.fromString("7.0.1"), LegacyESVersion.min(LegacyESVersion.fromString("7.0.1"), Version.CURRENT)); - Version legacyVersion = VersionUtils.randomLegacyVersion(random()); - Version opensearchVersion = VersionUtils.randomOpenSearchVersion(random()); - assertEquals(legacyVersion, Version.min(opensearchVersion, legacyVersion)); - } - - public void testMax() { - assertEquals(Version.CURRENT, Version.max(Version.CURRENT, VersionUtils.randomLegacyVersion(random()))); - assertEquals(Version.CURRENT, Version.max(LegacyESVersion.fromString("1.0.1"), Version.CURRENT)); - Version legacyVersion = VersionUtils.randomOpenSearchVersion(random()); - Version opensearchVersion = VersionUtils.randomLegacyVersion(random()); - assertEquals(legacyVersion, Version.max(opensearchVersion, legacyVersion)); - } - - public void testMinimumIndexCompatibilityVersion() { - assertEquals(LegacyESVersion.fromId(5000099), LegacyESVersion.fromId(6000026).minimumIndexCompatibilityVersion()); - assertEquals(LegacyESVersion.fromId(2000099), LegacyESVersion.fromId(5000099).minimumIndexCompatibilityVersion()); - assertEquals(LegacyESVersion.fromId(2000099), LegacyESVersion.fromId(5010000).minimumIndexCompatibilityVersion()); - assertEquals(LegacyESVersion.fromId(2000099), LegacyESVersion.fromId(5000001).minimumIndexCompatibilityVersion()); - } - - public void testVersionFromString() { - final int iters = scaledRandomIntBetween(100, 1000); - for (int i = 0; i < iters; i++) { - LegacyESVersion version = randomLegacyVersion(random()); - assertThat(LegacyESVersion.fromString(version.toString()), sameInstance(version)); - } - } - - public void testTooLongVersionFromString() { - Exception e = expectThrows(IllegalArgumentException.class, () -> LegacyESVersion.fromString("1.0.0.1.3")); - assertThat(e.getMessage(), containsString("needs to contain major, minor, and revision")); - } - - public void testTooShortVersionFromString() { - Exception e = expectThrows(IllegalArgumentException.class, () -> LegacyESVersion.fromString("1.0")); - assertThat(e.getMessage(), containsString("needs to contain major, minor, and revision")); - } - - public void testWrongVersionFromString() { - Exception e = expectThrows(IllegalArgumentException.class, () -> LegacyESVersion.fromString("WRONG.VERSION")); - assertThat(e.getMessage(), containsString("needs to contain major, minor, and revision")); - } - - public void testVersionNoPresentInSettings() { - Exception e = expectThrows(IllegalStateException.class, () -> LegacyESVersion.indexCreated(Settings.builder().build())); - assertThat(e.getMessage(), containsString("[index.version.created] is not present")); - } - - public void testIndexCreatedVersion() { - // an actual index has a IndexMetadata.SETTING_INDEX_UUID - final LegacyESVersion version = (LegacyESVersion) LegacyESVersion.fromId(6000026); - assertEquals( - version, - LegacyESVersion.indexCreated( - Settings.builder().put(IndexMetadata.SETTING_INDEX_UUID, "foo").put(IndexMetadata.SETTING_VERSION_CREATED, version).build() - ) - ); - } - - public void testMinCompatVersion() { - Version major = LegacyESVersion.fromString("6.8.0"); - assertThat(LegacyESVersion.fromString("1.0.0").minimumCompatibilityVersion(), equalTo(major)); - assertThat(LegacyESVersion.fromString("1.2.0").minimumCompatibilityVersion(), equalTo(major)); - assertThat(LegacyESVersion.fromString("1.3.0").minimumCompatibilityVersion(), equalTo(major)); - - Version major5x = LegacyESVersion.fromString("5.0.0"); - assertThat(LegacyESVersion.fromString("5.0.0").minimumCompatibilityVersion(), equalTo(major5x)); - assertThat(LegacyESVersion.fromString("5.2.0").minimumCompatibilityVersion(), equalTo(major5x)); - assertThat(LegacyESVersion.fromString("5.3.0").minimumCompatibilityVersion(), equalTo(major5x)); - - Version major56x = LegacyESVersion.fromString("5.6.0"); - assertThat(LegacyESVersion.fromString("6.5.0").minimumCompatibilityVersion(), equalTo(major56x)); - assertThat(LegacyESVersion.fromString("6.3.1").minimumCompatibilityVersion(), equalTo(major56x)); - - // from 7.0 on we are supporting the latest minor of the previous major... this might fail once we add a new version ie. 5.x is - // released since we need to bump the supported minor in Version#minimumCompatibilityVersion() - Version lastVersion = LegacyESVersion.fromString("6.8.0"); // TODO: remove this once min compat version is a constant instead of - // method - assertEquals(lastVersion.major, LegacyESVersion.V_7_0_0.minimumCompatibilityVersion().major); - assertEquals( - "did you miss to bump the minor in Version#minimumCompatibilityVersion()", - lastVersion.minor, - LegacyESVersion.V_7_0_0.minimumCompatibilityVersion().minor - ); - assertEquals(0, LegacyESVersion.V_7_0_0.minimumCompatibilityVersion().revision); - } - - public void testToString() { - // with 2.0.beta we lowercase - assertEquals("2.0.0-beta1", LegacyESVersion.fromString("2.0.0-beta1").toString()); - assertEquals("5.0.0-alpha1", LegacyESVersion.fromId(5000001).toString()); - assertEquals("2.3.0", LegacyESVersion.fromString("2.3.0").toString()); - assertEquals("0.90.0.Beta1", LegacyESVersion.fromString("0.90.0.Beta1").toString()); - assertEquals("1.0.0.Beta1", LegacyESVersion.fromString("1.0.0.Beta1").toString()); - assertEquals("2.0.0-beta1", LegacyESVersion.fromString("2.0.0-beta1").toString()); - assertEquals("5.0.0-beta1", LegacyESVersion.fromString("5.0.0-beta1").toString()); - assertEquals("5.0.0-alpha1", LegacyESVersion.fromString("5.0.0-alpha1").toString()); - } - - public void testIsRc() { - assertTrue(LegacyESVersion.fromString("2.0.0-rc1").isRC()); - assertTrue(LegacyESVersion.fromString("1.0.0.RC1").isRC()); - - for (int i = 0; i < 25; i++) { - assertEquals(LegacyESVersion.fromString("5.0.0-rc" + i).id, LegacyESVersion.fromId(5000000 + i + 50).id); - assertEquals("5.0.0-rc" + i, LegacyESVersion.fromId(5000000 + i + 50).toString()); - - // legacy RC versioning - assertEquals(LegacyESVersion.fromString("1.0.0.RC" + i).id, LegacyESVersion.fromId(1000000 + i + 50).id); - assertEquals("1.0.0.RC" + i, LegacyESVersion.fromId(1000000 + i + 50).toString()); - } - } - - public void testIsBeta() { - assertTrue(LegacyESVersion.fromString("2.0.0-beta1").isBeta()); - assertTrue(LegacyESVersion.fromString("1.0.0.Beta1").isBeta()); - assertTrue(LegacyESVersion.fromString("0.90.0.Beta1").isBeta()); - - for (int i = 0; i < 25; i++) { - assertEquals(LegacyESVersion.fromString("5.0.0-beta" + i).id, LegacyESVersion.fromId(5000000 + i + 25).id); - assertEquals("5.0.0-beta" + i, LegacyESVersion.fromId(5000000 + i + 25).toString()); - } - } - - public void testIsAlpha() { - assertTrue(new LegacyESVersion(5000001, org.apache.lucene.util.Version.LUCENE_8_0_0).isAlpha()); - assertFalse(new LegacyESVersion(4000002, org.apache.lucene.util.Version.LUCENE_8_0_0).isAlpha()); - assertTrue(new LegacyESVersion(4000002, org.apache.lucene.util.Version.LUCENE_8_0_0).isBeta()); - assertTrue(LegacyESVersion.fromString("5.0.0-alpha14").isAlpha()); - assertEquals(5000014, LegacyESVersion.fromString("5.0.0-alpha14").id); - assertTrue(LegacyESVersion.fromId(5000015).isAlpha()); - - for (int i = 0; i < 25; i++) { - assertEquals(LegacyESVersion.fromString("5.0.0-alpha" + i).id, LegacyESVersion.fromId(5000000 + i).id); - assertEquals("5.0.0-alpha" + i, LegacyESVersion.fromId(5000000 + i).toString()); - } - } - - public void testParseVersion() { - final int iters = scaledRandomIntBetween(100, 1000); - for (int i = 0; i < iters; i++) { - LegacyESVersion version = randomLegacyVersion(random()); - LegacyESVersion parsedVersion = (LegacyESVersion) LegacyESVersion.fromString(version.toString()); - assertEquals(version, parsedVersion); - } - - expectThrows(IllegalArgumentException.class, () -> { LegacyESVersion.fromString("5.0.0-alph2"); }); - assertEquals(LegacyESVersion.fromString("2.0.0-SNAPSHOT"), LegacyESVersion.fromId(2000099)); - expectThrows(IllegalArgumentException.class, () -> { LegacyESVersion.fromString("5.0.0-SNAPSHOT"); }); - } - - public void testAllVersionsMatchId() throws Exception { - final Set releasedVersions = new HashSet<>(VersionUtils.allReleasedVersions()); - final Set unreleasedVersions = new HashSet<>(VersionUtils.allUnreleasedVersions()); - Map maxBranchVersions = new HashMap<>(); - for (java.lang.reflect.Field field : Version.class.getFields()) { - if (field.getName().matches("_ID")) { - assertTrue(field.getName() + " should be static", Modifier.isStatic(field.getModifiers())); - assertTrue(field.getName() + " should be final", Modifier.isFinal(field.getModifiers())); - int versionId = (Integer) field.get(Version.class); - - String constantName = field.getName().substring(0, field.getName().indexOf("_ID")); - java.lang.reflect.Field versionConstant = Version.class.getField(constantName); - assertTrue(constantName + " should be static", Modifier.isStatic(versionConstant.getModifiers())); - assertTrue(constantName + " should be final", Modifier.isFinal(versionConstant.getModifiers())); - - Version v = (Version) versionConstant.get(null); - logger.debug("Checking {}", v); - if (field.getName().endsWith("_UNRELEASED")) { - assertTrue(unreleasedVersions.contains(v)); - } else { - assertTrue(releasedVersions.contains(v)); - } - assertEquals("Version id " + field.getName() + " does not point to " + constantName, v, Version.fromId(versionId)); - assertEquals("Version " + constantName + " does not have correct id", versionId, v.id); - if (v.major >= 2) { - String number = v.toString(); - if (v.isBeta()) { - number = number.replace("-beta", "_beta"); - } else if (v.isRC()) { - number = number.replace("-rc", "_rc"); - } else if (v.isAlpha()) { - number = number.replace("-alpha", "_alpha"); - } - assertEquals("V_" + number.replace('.', '_'), constantName); - } else { - assertEquals("V_" + v.toString().replace('.', '_'), constantName); - } - - // only the latest version for a branch should be a snapshot (ie unreleased) - String branchName = "" + v.major + "." + v.minor; - Version maxBranchVersion = maxBranchVersions.get(branchName); - if (maxBranchVersion == null) { - maxBranchVersions.put(branchName, v); - } else if (v.after(maxBranchVersion)) { - if (v == Version.CURRENT) { - // Current is weird - it counts as released even though it shouldn't. - continue; - } - assertFalse( - "Version " + maxBranchVersion + " cannot be a snapshot because version " + v + " exists", - VersionUtils.allUnreleasedVersions().contains(maxBranchVersion) - ); - maxBranchVersions.put(branchName, v); - } - } - } - } - - public void testIsCompatible() { - assertTrue(isCompatible(LegacyESVersion.fromString("6.8.0"), LegacyESVersion.V_7_0_0)); - assertFalse(isCompatible(LegacyESVersion.fromString("6.6.0"), LegacyESVersion.V_7_0_0)); - assertFalse(isCompatible(LegacyESVersion.fromString("6.7.0"), LegacyESVersion.V_7_0_0)); - - assertFalse(isCompatible(LegacyESVersion.fromId(5000099), LegacyESVersion.fromString("6.0.0"))); - assertFalse(isCompatible(LegacyESVersion.fromId(5000099), LegacyESVersion.fromString("7.0.0"))); - - Version a = randomLegacyVersion(random()); - Version b = randomLegacyVersion(random()); - assertThat(a.isCompatible(b), equalTo(b.isCompatible(a))); - } -} diff --git a/server/src/test/java/org/opensearch/VersionTests.java b/server/src/test/java/org/opensearch/VersionTests.java index c2566e83dd9b6..5b3213ded1c02 100644 --- a/server/src/test/java/org/opensearch/VersionTests.java +++ b/server/src/test/java/org/opensearch/VersionTests.java @@ -213,11 +213,11 @@ public void testOpenSearchMinCompatVersion() { int opensearchMajor = opensearchVersion.major; int major = opensearchMajor - 1; if (opensearchMajor == 1) { - major = 7; + major = 6; } else if (opensearchMajor == 2) { - major = 8; + major = 7; } - assertEquals(VersionUtils.lastFirstReleasedMinorFromMajor(candidates, major - 1), opensearchVersion.minimumCompatibilityVersion()); + assertEquals(VersionUtils.lastFirstReleasedMinorFromMajor(candidates, major), opensearchVersion.minimumCompatibilityVersion()); } /** test opensearch min index compatibility */ @@ -230,14 +230,7 @@ public void testOpenSearchMinIndexCompatVersion() { // opensearch 3.x minCompat is 1.{last minor version}.0 // until 3.0 is staged the following line will only return legacy versions List candidates = opensearchVersion.major >= 3 ? VersionUtils.allOpenSearchVersions() : VersionUtils.allLegacyVersions(); - int opensearchMajor = opensearchVersion.major; - int major = opensearchMajor - 1; - if (opensearchMajor == 1) { - major = 7; - } else if (opensearchMajor == 2) { - major = 8; - } - Version expected = VersionUtils.getFirstVersionOfMajor(candidates, major - 1); + Version expected = VersionUtils.getFirstVersionOfMajor(candidates, opensearchVersion.major - 1); Version actual = opensearchVersion.minimumIndexCompatibilityVersion(); // since some legacy versions still support build (alpha, beta, RC) we check major minor revision only assertEquals(expected.major, actual.major); @@ -433,7 +426,7 @@ public void testIsCompatible() { } else { currentOrNextMajorVersion = currentMajorVersion; } - final Version lastMinorFromPreviousMajor = VersionUtils.allReleasedVersions() + final Version lastMinorFromPreviousMajor = VersionUtils.allOpenSearchVersions() .stream() .filter(v -> v.major == (currentOrNextMajorVersion.major == 1 ? 7 : currentOrNextMajorVersion.major - 1)) .max(Version::compareTo) diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestTests.java index c84279a0782c3..2576823578630 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestTests.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.health; -import org.opensearch.LegacyESVersion; import org.opensearch.action.support.IndicesOptions; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.Priority; @@ -40,12 +39,9 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.test.VersionUtils; import java.util.Locale; -import static org.opensearch.test.VersionUtils.getPreviousVersion; -import static org.opensearch.test.VersionUtils.randomVersionBetween; import static org.hamcrest.core.IsEqual.equalTo; public class ClusterHealthRequestTests extends OpenSearchTestCase { @@ -74,84 +70,6 @@ public void testRequestReturnsHiddenIndicesByDefault() { assertTrue(defaultRequest.indicesOptions().expandWildcardsHidden()); } - public void testBwcSerialization() throws Exception { - for (int runs = 0; runs < randomIntBetween(5, 20); runs++) { - // Generate a random cluster health request in version < 7.2.0 and serializes it - final BytesStreamOutput out = new BytesStreamOutput(); - out.setVersion(randomVersionBetween(random(), VersionUtils.getFirstVersion(), getPreviousVersion(LegacyESVersion.V_7_2_0))); - - final ClusterHealthRequest expected = randomRequest(); - { - expected.getParentTask().writeTo(out); - out.writeTimeValue(expected.masterNodeTimeout()); - out.writeBoolean(expected.local()); - if (expected.indices() == null) { - out.writeVInt(0); - } else { - out.writeVInt(expected.indices().length); - for (String index : expected.indices()) { - out.writeString(index); - } - } - out.writeTimeValue(expected.timeout()); - if (expected.waitForStatus() == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeByte(expected.waitForStatus().value()); - } - out.writeBoolean(expected.waitForNoRelocatingShards()); - expected.waitForActiveShards().writeTo(out); - out.writeString(expected.waitForNodes()); - if (expected.waitForEvents() == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - Priority.writeTo(expected.waitForEvents(), out); - } - out.writeBoolean(expected.waitForNoInitializingShards()); - } - - // Deserialize and check the cluster health request - final StreamInput in = out.bytes().streamInput(); - in.setVersion(out.getVersion()); - final ClusterHealthRequest actual = new ClusterHealthRequest(in); - - assertThat(actual.waitForStatus(), equalTo(expected.waitForStatus())); - assertThat(actual.waitForNodes(), equalTo(expected.waitForNodes())); - assertThat(actual.waitForNoInitializingShards(), equalTo(expected.waitForNoInitializingShards())); - assertThat(actual.waitForNoRelocatingShards(), equalTo(expected.waitForNoRelocatingShards())); - assertThat(actual.waitForActiveShards(), equalTo(expected.waitForActiveShards())); - assertThat(actual.waitForEvents(), equalTo(expected.waitForEvents())); - assertIndicesEquals(actual.indices(), expected.indices()); - assertThat(actual.indicesOptions(), equalTo(IndicesOptions.lenientExpandOpen())); - } - - for (int runs = 0; runs < randomIntBetween(5, 20); runs++) { - // Generate a random cluster health request in current version - final ClusterHealthRequest expected = randomRequest(); - - // Serialize to node in version < 7.2.0 - final BytesStreamOutput out = new BytesStreamOutput(); - out.setVersion(randomVersionBetween(random(), VersionUtils.getFirstVersion(), getPreviousVersion(LegacyESVersion.V_7_2_0))); - expected.writeTo(out); - - // Deserialize and check the cluster health request - final StreamInput in = out.bytes().streamInput(); - in.setVersion(out.getVersion()); - final ClusterHealthRequest actual = new ClusterHealthRequest(in); - - assertThat(actual.waitForStatus(), equalTo(expected.waitForStatus())); - assertThat(actual.waitForNodes(), equalTo(expected.waitForNodes())); - assertThat(actual.waitForNoInitializingShards(), equalTo(expected.waitForNoInitializingShards())); - assertThat(actual.waitForNoRelocatingShards(), equalTo(expected.waitForNoRelocatingShards())); - assertThat(actual.waitForActiveShards(), equalTo(expected.waitForActiveShards())); - assertThat(actual.waitForEvents(), equalTo(expected.waitForEvents())); - assertIndicesEquals(actual.indices(), expected.indices()); - assertThat(actual.indicesOptions(), equalTo(IndicesOptions.lenientExpandOpen())); - } - } - private ClusterHealthRequest randomRequest() { ClusterHealthRequest request = new ClusterHealthRequest(); request.waitForStatus(randomFrom(ClusterHealthStatus.values())); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java index 5af15396dbefa..3db20fd3404a7 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.health; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; @@ -58,7 +57,6 @@ import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.Matchers; -import org.opensearch.test.VersionUtils; import java.io.IOException; import java.util.Collections; @@ -149,69 +147,6 @@ private void assertClusterHealth(ClusterHealthResponse clusterHealth) { assertThat(clusterHealth.hasDiscoveredMaster(), Matchers.equalTo(clusterStateHealth.hasDiscoveredMaster())); } - public void testVersionCompatibleSerialization() throws IOException { - boolean hasDiscoveredMaster = false; - int indicesSize = randomInt(20); - Map indices = new HashMap<>(indicesSize); - if ("indices".equals(level) || "shards".equals(level)) { - for (int i = 0; i < indicesSize; i++) { - String indexName = randomAlphaOfLengthBetween(1, 5) + i; - indices.put(indexName, ClusterIndexHealthTests.randomIndexHealth(indexName, level)); - } - } - ClusterStateHealth stateHealth = new ClusterStateHealth( - randomInt(100), - randomInt(100), - randomInt(100), - randomInt(100), - randomInt(100), - randomInt(100), - randomInt(100), - hasDiscoveredMaster, - randomDoubleBetween(0d, 100d, true), - randomFrom(ClusterHealthStatus.values()), - indices - ); - // Create the Cluster Health Response object with discovered master as false, - // to verify serialization puts default value for the field - ClusterHealthResponse clusterHealth = new ClusterHealthResponse( - "test-cluster", - randomInt(100), - randomInt(100), - randomInt(100), - TimeValue.timeValueMillis(randomInt(10000)), - randomBoolean(), - stateHealth - ); - - BytesStreamOutput out_lt_1_0 = new BytesStreamOutput(); - Version old_version = VersionUtils.randomVersionBetween(random(), LegacyESVersion.V_7_0_0, LegacyESVersion.V_7_8_0); - out_lt_1_0.setVersion(old_version); - clusterHealth.writeTo(out_lt_1_0); - - BytesStreamOutput out_gt_1_0 = new BytesStreamOutput(); - Version new_version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); - out_gt_1_0.setVersion(new_version); - clusterHealth.writeTo(out_gt_1_0); - - // The serialized output byte stream will not be same; and different by a boolean field "discovered_master" - assertNotEquals(out_lt_1_0.size(), out_gt_1_0.size()); - assertThat(out_gt_1_0.size() - out_lt_1_0.size(), Matchers.equalTo(1)); - - // Input stream constructed from Version 6_8 or less will not have field "discovered_master"; - // hence fallback to default as no value retained - StreamInput in_lt_6_8 = out_lt_1_0.bytes().streamInput(); - in_lt_6_8.setVersion(old_version); - clusterHealth = ClusterHealthResponse.readResponseFrom(in_lt_6_8); - assertThat(clusterHealth.hasDiscoveredMaster(), Matchers.equalTo(true)); - - // Input stream constructed from Version 7_0 and above will have field "discovered_master"; hence value will be retained - StreamInput in_gt_7_0 = out_gt_1_0.bytes().streamInput(); - in_gt_7_0.setVersion(new_version); - clusterHealth = ClusterHealthResponse.readResponseFrom(in_gt_7_0); - assertThat(clusterHealth.hasDiscoveredMaster(), Matchers.equalTo(hasDiscoveredMaster)); - } - ClusterHealthResponse maybeSerialize(ClusterHealthResponse clusterHealth) throws IOException { if (randomBoolean()) { BytesStreamOutput out = new BytesStreamOutput(); diff --git a/server/src/test/java/org/opensearch/action/ingest/SimulateProcessorResultTests.java b/server/src/test/java/org/opensearch/action/ingest/SimulateProcessorResultTests.java index 6fa043761882e..b75c9893eda4b 100644 --- a/server/src/test/java/org/opensearch/action/ingest/SimulateProcessorResultTests.java +++ b/server/src/test/java/org/opensearch/action/ingest/SimulateProcessorResultTests.java @@ -32,14 +32,12 @@ package org.opensearch.action.ingest; -import org.opensearch.LegacyESVersion; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.ingest.IngestDocument; import org.opensearch.test.AbstractXContentTestCase; -import org.opensearch.test.VersionUtils; import java.io.IOException; import java.util.StringJoiner; @@ -85,21 +83,6 @@ public void testSerialization() throws IOException { } } - public void testBWCDescription() throws IOException { - boolean isSuccessful = randomBoolean(); - boolean isIgnoredException = randomBoolean(); - boolean hasCondition = randomBoolean(); - SimulateProcessorResult simulateProcessorResult = createTestInstance(isSuccessful, isIgnoredException, hasCondition); - - BytesStreamOutput out = new BytesStreamOutput(); - out.setVersion(VersionUtils.getPreviousVersion(LegacyESVersion.V_7_9_0)); - simulateProcessorResult.writeTo(out); - StreamInput in = out.bytes().streamInput(); - in.setVersion(VersionUtils.getPreviousVersion(LegacyESVersion.V_7_9_0)); - SimulateProcessorResult otherSimulateProcessorResult = new SimulateProcessorResult(in); - assertNull(otherSimulateProcessorResult.getDescription()); - } - static SimulateProcessorResult createTestInstance(boolean isSuccessful, boolean isIgnoredException, boolean hasCondition) { String type = randomAlphaOfLengthBetween(1, 10); String processorTag = randomAlphaOfLengthBetween(1, 10); diff --git a/server/src/test/java/org/opensearch/action/main/MainResponseTests.java b/server/src/test/java/org/opensearch/action/main/MainResponseTests.java index b333118c4e070..6e2dbe4399410 100644 --- a/server/src/test/java/org/opensearch/action/main/MainResponseTests.java +++ b/server/src/test/java/org/opensearch/action/main/MainResponseTests.java @@ -58,7 +58,7 @@ protected MainResponse createTestInstance() { ClusterName clusterName = new ClusterName(randomAlphaOfLength(10)); String nodeName = randomAlphaOfLength(10); final String date = new Date(randomNonNegativeLong()).toString(); - Version version = VersionUtils.randomVersionBetween(random(), LegacyESVersion.V_7_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); Build build = new Build( Build.Type.UNKNOWN, randomAlphaOfLength(8), diff --git a/server/src/test/java/org/opensearch/action/support/IndicesOptionsTests.java b/server/src/test/java/org/opensearch/action/support/IndicesOptionsTests.java index 21ce9c29c6a03..1f037d2d58e11 100644 --- a/server/src/test/java/org/opensearch/action/support/IndicesOptionsTests.java +++ b/server/src/test/java/org/opensearch/action/support/IndicesOptionsTests.java @@ -32,7 +32,6 @@ package org.opensearch.action.support; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.support.IndicesOptions.Option; import org.opensearch.action.support.IndicesOptions.WildcardStates; @@ -60,14 +59,13 @@ import static org.opensearch.test.VersionUtils.randomVersionBetween; import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.Matchers.is; public class IndicesOptionsTests extends OpenSearchTestCase { public void testSerialization() throws Exception { int iterations = randomIntBetween(5, 20); for (int i = 0; i < iterations; i++) { - Version version = randomVersionBetween(random(), LegacyESVersion.V_7_0_0, null); + Version version = randomVersionBetween(random(), Version.V_1_0_0, null); IndicesOptions indicesOptions = IndicesOptions.fromOptions( randomBoolean(), randomBoolean(), @@ -92,15 +90,9 @@ public void testSerialization() throws Exception { assertThat(indicesOptions2.allowNoIndices(), equalTo(indicesOptions.allowNoIndices())); assertThat(indicesOptions2.expandWildcardsOpen(), equalTo(indicesOptions.expandWildcardsOpen())); assertThat(indicesOptions2.expandWildcardsClosed(), equalTo(indicesOptions.expandWildcardsClosed())); - if (version.before(LegacyESVersion.V_7_7_0)) { - assertThat(indicesOptions2.expandWildcardsHidden(), is(true)); - } else { - assertThat(indicesOptions2.expandWildcardsHidden(), equalTo(indicesOptions.expandWildcardsHidden())); - } - + assertThat(indicesOptions2.expandWildcardsHidden(), equalTo(indicesOptions.expandWildcardsHidden())); assertThat(indicesOptions2.forbidClosedIndices(), equalTo(indicesOptions.forbidClosedIndices())); assertThat(indicesOptions2.allowAliasesToMultipleIndices(), equalTo(indicesOptions.allowAliasesToMultipleIndices())); - assertEquals(indicesOptions2.ignoreAliases(), indicesOptions.ignoreAliases()); } } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index a019235c99743..49ef48cd1e9c6 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -47,20 +47,17 @@ import org.opensearch.common.settings.Settings; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; -import org.opensearch.transport.TransportService; -import java.util.HashMap; import java.util.HashSet; -import java.util.Map; +import static org.opensearch.test.VersionUtils.allVersions; import static org.opensearch.test.VersionUtils.maxCompatibleVersion; import static org.opensearch.test.VersionUtils.randomCompatibleVersion; -import static org.opensearch.test.VersionUtils.randomVersion; +import static org.opensearch.test.VersionUtils.randomOpenSearchVersion; import static org.opensearch.test.VersionUtils.randomVersionBetween; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -99,7 +96,7 @@ public void testPreventJoinClusterWithUnsupportedIndices() { public void testPreventJoinClusterWithUnsupportedNodeVersions() { DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); - final Version version = randomVersion(random()); + final Version version = randomOpenSearchVersion(random()); builder.add(new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), version)); builder.add(new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), randomCompatibleVersion(random(), version))); DiscoveryNodes nodes = builder.build(); @@ -117,14 +114,19 @@ public void testPreventJoinClusterWithUnsupportedNodeVersions() { }); } - if (minNodeVersion.onOrAfter(LegacyESVersion.V_7_0_0)) { + if (minNodeVersion.onOrAfter(LegacyESVersion.V_7_0_0) && minNodeVersion.before(Version.V_3_0_0)) { Version oldMajor = minNodeVersion.minimumCompatibilityVersion(); expectThrows(IllegalStateException.class, () -> JoinTaskExecutor.ensureMajorVersionBarrier(oldMajor, minNodeVersion)); } - final Version minGoodVersion = maxNodeVersion.compareMajor(minNodeVersion) == 0 ? - // we have to stick with the same major - minNodeVersion : maxNodeVersion.minimumCompatibilityVersion(); + final Version minGoodVersion; + if (maxNodeVersion.compareMajor(minNodeVersion) == 0) { + // we have to stick with the same major + minGoodVersion = minNodeVersion; + } else { + Version minCompatVersion = maxNodeVersion.minimumCompatibilityVersion(); + minGoodVersion = minCompatVersion.before(allVersions().get(0)) ? allVersions().get(0) : minCompatVersion; + } final Version justGood = randomVersionBetween(random(), minGoodVersion, maxCompatibleVersion(minNodeVersion)); if (randomBoolean()) { @@ -196,80 +198,4 @@ public void testUpdatesNodeWithNewRoles() throws Exception { assertThat(result.resultingState.getNodes().get(actualNode.getId()).getRoles(), equalTo(actualNode.getRoles())); } - - public void testUpdatesNodeWithOpenSearchVersionForExistingAndNewNodes() throws Exception { - // During the upgrade from Elasticsearch, OpenSearch node send their version as 7.10.2 to Elasticsearch master - // in order to successfully join the cluster. But as soon as OpenSearch node becomes the master, cluster state - // should show the OpenSearch nodes version as 1.x. As the cluster state was carry forwarded from ES master, - // version in DiscoveryNode is stale 7.10.2. - final AllocationService allocationService = mock(AllocationService.class); - when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); - when(allocationService.disassociateDeadNodes(any(), anyBoolean(), any())).then( - invocationOnMock -> invocationOnMock.getArguments()[0] - ); - final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); - Map channelVersions = new HashMap<>(); - String node_1 = UUIDs.base64UUID(); // OpenSearch node running BWC version - String node_2 = UUIDs.base64UUID(); // OpenSearch node running BWC version - String node_3 = UUIDs.base64UUID(); // OpenSearch node running BWC version, sending new join request and no active channel - String node_4 = UUIDs.base64UUID(); // ES node 7.10.2 - String node_5 = UUIDs.base64UUID(); // ES node 7.10.2 in cluster but missing channel version - String node_6 = UUIDs.base64UUID(); // ES node 7.9.0 - String node_7 = UUIDs.base64UUID(); // ES node 7.9.0 in cluster but missing channel version - channelVersions.put(node_1, Version.CURRENT); - channelVersions.put(node_2, Version.CURRENT); - channelVersions.put(node_4, LegacyESVersion.V_7_10_2); - channelVersions.put(node_6, LegacyESVersion.V_7_10_0); - - final TransportService transportService = mock(TransportService.class); - when(transportService.getChannelVersion(any())).thenReturn(channelVersions); - DiscoveryNodes.Builder nodes = new DiscoveryNodes.Builder().localNodeId(node_1); - nodes.add(new DiscoveryNode(node_1, buildNewFakeTransportAddress(), LegacyESVersion.V_7_10_2)); - nodes.add(new DiscoveryNode(node_2, buildNewFakeTransportAddress(), LegacyESVersion.V_7_10_2)); - nodes.add(new DiscoveryNode(node_3, buildNewFakeTransportAddress(), LegacyESVersion.V_7_10_2)); - nodes.add(new DiscoveryNode(node_4, buildNewFakeTransportAddress(), LegacyESVersion.V_7_10_2)); - nodes.add(new DiscoveryNode(node_5, buildNewFakeTransportAddress(), LegacyESVersion.V_7_10_2)); - nodes.add(new DiscoveryNode(node_6, buildNewFakeTransportAddress(), LegacyESVersion.V_7_10_1)); - nodes.add(new DiscoveryNode(node_7, buildNewFakeTransportAddress(), LegacyESVersion.V_7_10_0)); - final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).nodes(nodes).build(); - final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor( - Settings.EMPTY, - allocationService, - logger, - rerouteService, - transportService - ); - final DiscoveryNode existing_node_3 = clusterState.nodes().get(node_3); - final DiscoveryNode node_3_new_join = new DiscoveryNode( - existing_node_3.getName(), - existing_node_3.getId(), - existing_node_3.getEphemeralId(), - existing_node_3.getHostName(), - existing_node_3.getHostAddress(), - existing_node_3.getAddress(), - existing_node_3.getAttributes(), - existing_node_3.getRoles(), - Version.CURRENT - ); - - final ClusterStateTaskExecutor.ClusterTasksResult result = joinTaskExecutor.execute( - clusterState, - List.of( - new JoinTaskExecutor.Task(node_3_new_join, "test"), - JoinTaskExecutor.newBecomeMasterTask(), - JoinTaskExecutor.newFinishElectionTask() - ) - ); - final ClusterStateTaskExecutor.TaskResult taskResult = result.executionResults.values().iterator().next(); - assertTrue(taskResult.isSuccess()); - DiscoveryNodes resultNodes = result.resultingState.getNodes(); - assertEquals(Version.CURRENT, resultNodes.get(node_1).getVersion()); - assertEquals(Version.CURRENT, resultNodes.get(node_2).getVersion()); - assertEquals(Version.CURRENT, resultNodes.get(node_3).getVersion()); // 7.10.2 in old state but sent new join and processed - assertEquals(LegacyESVersion.V_7_10_2, resultNodes.get(node_4).getVersion()); - assertFalse(resultNodes.nodeExists(node_5)); // 7.10.2 node without active channel will be removed and should rejoin - assertEquals(LegacyESVersion.V_7_10_0, resultNodes.get(node_6).getVersion()); - // 7.9.0 node without active channel but shouldn't get removed - assertEquals(LegacyESVersion.V_7_10_0, resultNodes.get(node_7).getVersion()); - } } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/AutoExpandReplicasTests.java b/server/src/test/java/org/opensearch/cluster/metadata/AutoExpandReplicasTests.java index d6c62e4bb0903..aafd507aef7cd 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/AutoExpandReplicasTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/AutoExpandReplicasTests.java @@ -31,7 +31,6 @@ package org.opensearch.cluster.metadata; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.admin.cluster.reroute.ClusterRerouteRequest; import org.opensearch.action.admin.indices.create.CreateIndexRequest; @@ -244,7 +243,7 @@ public void testOnlyAutoExpandAllocationFilteringAfterAllNodesUpgraded() { try { List allNodes = new ArrayList<>(); DiscoveryNode oldNode = createNode( - VersionUtils.randomVersionBetween(random(), LegacyESVersion.V_7_0_0, LegacyESVersion.V_7_5_1), + VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.V_1_2_1), DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, DiscoveryNodeRole.DATA_ROLE ); // local node is the master @@ -266,11 +265,11 @@ public void testOnlyAutoExpandAllocationFilteringAfterAllNodesUpgraded() { state = cluster.reroute(state, new ClusterRerouteRequest()); } - DiscoveryNode newNode = createNode( - LegacyESVersion.V_7_6_0, - DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, - DiscoveryNodeRole.DATA_ROLE - ); // local node is the cluster_manager + DiscoveryNode newNode = createNode(Version.V_1_3_0, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, DiscoveryNodeRole.DATA_ROLE); // local + // node + // is + // the + // cluster_manager state = cluster.addNodes(state, Collections.singletonList(newNode)); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/DataStreamTemplateTests.java b/server/src/test/java/org/opensearch/cluster/metadata/DataStreamTemplateTests.java index 23afc23c80279..7f8a32ac84ee5 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/DataStreamTemplateTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/DataStreamTemplateTests.java @@ -8,19 +8,13 @@ package org.opensearch.cluster.metadata; -import org.opensearch.Version; import org.opensearch.cluster.metadata.ComposableIndexTemplate.DataStreamTemplate; -import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.Writeable; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.test.AbstractSerializingTestCase; -import org.opensearch.test.VersionUtils; import java.io.IOException; -import static org.hamcrest.Matchers.equalTo; - public class DataStreamTemplateTests extends AbstractSerializingTestCase { @Override @@ -38,20 +32,4 @@ protected DataStreamTemplate createTestInstance() { return new DataStreamTemplate(new DataStream.TimestampField("timestamp_" + randomAlphaOfLength(5))); } - public void testBackwardCompatibleSerialization() throws Exception { - Version version = VersionUtils.getPreviousVersion(Version.V_1_0_0); - BytesStreamOutput out = new BytesStreamOutput(); - out.setVersion(version); - - DataStreamTemplate outTemplate = new DataStreamTemplate(); - outTemplate.writeTo(out); - assertThat(out.size(), equalTo(0)); - - StreamInput in = out.bytes().streamInput(); - in.setVersion(version); - DataStreamTemplate inTemplate = new DataStreamTemplate(in); - - assertThat(inTemplate, equalTo(outTemplate)); - } - } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index 5bea69c5bbd66..89550b491500d 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -36,7 +36,6 @@ import org.hamcrest.Matchers; import org.junit.Before; import org.opensearch.ExceptionsHelper; -import org.opensearch.LegacyESVersion; import org.opensearch.ResourceAlreadyExistsException; import org.opensearch.Version; import org.opensearch.action.admin.indices.alias.Alias; @@ -204,14 +203,6 @@ public static boolean isSplitable(int source, int target) { return source * x == target; } - public void testNumberOfShards() { - { - final Version versionCreated = VersionUtils.randomVersionBetween(random(), LegacyESVersion.V_7_0_0, Version.CURRENT); - final Settings.Builder indexSettingsBuilder = Settings.builder().put(SETTING_VERSION_CREATED, versionCreated); - assertThat(MetadataCreateIndexService.getNumberOfShards(indexSettingsBuilder), equalTo(1)); - } - } - public void testValidateShrinkIndex() { int numShards = randomIntBetween(2, 42); ClusterState state = createClusterState( diff --git a/server/src/test/java/org/opensearch/common/lucene/uid/VersionsTests.java b/server/src/test/java/org/opensearch/common/lucene/uid/VersionsTests.java index 4c2a500bb5cd3..dace484f80c2b 100644 --- a/server/src/test/java/org/opensearch/common/lucene/uid/VersionsTests.java +++ b/server/src/test/java/org/opensearch/common/lucene/uid/VersionsTests.java @@ -216,8 +216,8 @@ public void testCacheFilterReader() throws Exception { public void testLuceneVersionOnUnknownVersions() { // between two known versions, should use the lucene version of the previous version - Version version = LegacyESVersion.fromString("7.10.50"); - assertEquals(VersionUtils.getPreviousVersion(Version.fromString("7.10.3")).luceneVersion, version.luceneVersion); + Version version = Version.fromString("1.1.50"); + assertEquals(VersionUtils.getPreviousVersion(Version.fromString("1.1.3")).luceneVersion, version.luceneVersion); // too old version, major should be the oldest supported lucene version minus 1 version = LegacyESVersion.fromString("5.2.1"); diff --git a/server/src/test/java/org/opensearch/env/NodeMetadataTests.java b/server/src/test/java/org/opensearch/env/NodeMetadataTests.java index cd3b9c6205220..92ec33d7e78e0 100644 --- a/server/src/test/java/org/opensearch/env/NodeMetadataTests.java +++ b/server/src/test/java/org/opensearch/env/NodeMetadataTests.java @@ -31,23 +31,17 @@ package org.opensearch.env; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.collect.Tuple; -import org.opensearch.gateway.MetadataStateFormat; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.EqualsHashCodeTestUtils; import org.opensearch.test.VersionUtils; -import java.io.IOException; -import java.io.InputStream; -import java.nio.file.Files; import java.nio.file.Path; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.startsWith; public class NodeMetadataTests extends OpenSearchTestCase { @@ -77,22 +71,6 @@ public void testEqualsHashcodeSerialization() { }); } - public void testReadsFormatWithoutVersion() throws IOException { - // the behaviour tested here is only appropriate if the current version is compatible with versions 7 and earlier - assertTrue(Version.CURRENT.minimumIndexCompatibilityVersion().onOrBefore(LegacyESVersion.V_7_0_0)); - // when the current version is incompatible with version 7, the behaviour should change to reject files like the given resource - // which do not have the version field - - final Path tempDir = createTempDir(); - final Path stateDir = Files.createDirectory(tempDir.resolve(MetadataStateFormat.STATE_DIR_NAME)); - final InputStream resource = this.getClass().getResourceAsStream("testReadsFormatWithoutVersion.binary"); - assertThat(resource, notNullValue()); - Files.copy(resource, stateDir.resolve(NodeMetadata.FORMAT.getStateFileName(between(0, Integer.MAX_VALUE)))); - final NodeMetadata nodeMetadata = NodeMetadata.FORMAT.loadLatestState(logger, xContentRegistry(), tempDir); - assertThat(nodeMetadata.nodeId(), equalTo("y6VUVMSaStO4Tz-B5BxcOw")); - assertThat(nodeMetadata.nodeVersion(), equalTo(Version.V_EMPTY)); - } - public void testUpgradesLegitimateVersions() { final String nodeId = randomAlphaOfLength(10); final NodeMetadata nodeMetadata = new NodeMetadata( diff --git a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java index d67534bbfbddf..71433673eef5a 100644 --- a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java @@ -32,7 +32,6 @@ package org.opensearch.index; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.AbstractScopedSettings; @@ -723,7 +722,7 @@ public void testUpdateSoftDeletesFails() { public void testSoftDeletesDefaultSetting() { // enabled by default on 7.0+ or later { - Version createdVersion = VersionUtils.randomVersionBetween(random(), LegacyESVersion.V_7_0_0, Version.CURRENT); + Version createdVersion = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), createdVersion).build(); assertTrue(IndexSettings.INDEX_SOFT_DELETES_SETTING.get(settings)); } @@ -731,10 +730,7 @@ public void testSoftDeletesDefaultSetting() { public void testIgnoreTranslogRetentionSettingsIfSoftDeletesEnabled() { Settings.Builder settings = Settings.builder() - .put( - IndexMetadata.SETTING_VERSION_CREATED, - VersionUtils.randomVersionBetween(random(), LegacyESVersion.V_7_4_0, Version.CURRENT) - ); + .put(IndexMetadata.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT)); if (randomBoolean()) { settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), randomPositiveTimeValue()); } diff --git a/server/src/test/java/org/opensearch/index/analysis/PreBuiltAnalyzerTests.java b/server/src/test/java/org/opensearch/index/analysis/PreBuiltAnalyzerTests.java index d8e0a4ea3bc2e..38736b26ea1ba 100644 --- a/server/src/test/java/org/opensearch/index/analysis/PreBuiltAnalyzerTests.java +++ b/server/src/test/java/org/opensearch/index/analysis/PreBuiltAnalyzerTests.java @@ -88,7 +88,13 @@ public void testThatInstancesAreCachedAndReused() { assertSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(v), PreBuiltAnalyzers.STANDARD.getAnalyzer(v)); assertNotSame( PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.CURRENT), - PreBuiltAnalyzers.STANDARD.getAnalyzer(VersionUtils.randomPreviousCompatibleVersion(random(), Version.CURRENT)) + PreBuiltAnalyzers.STANDARD.getAnalyzer( + VersionUtils.randomVersionBetween( + random(), + Version.CURRENT.minimumIndexCompatibilityVersion(), + VersionUtils.getPreviousVersion(Version.CURRENT) + ) + ) ); // Same Lucene version should be cached: diff --git a/server/src/test/java/org/opensearch/index/mapper/RootObjectMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/RootObjectMapperTests.java index bb92a4d6d49cf..0d0e6324f8959 100644 --- a/server/src/test/java/org/opensearch/index/mapper/RootObjectMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/RootObjectMapperTests.java @@ -32,12 +32,8 @@ package org.opensearch.index.mapper; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; -import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.index.mapper.MapperService.MergeReason; @@ -46,7 +42,6 @@ import java.io.IOException; import java.util.Arrays; -import static org.opensearch.test.VersionUtils.randomVersionBetween; import static org.hamcrest.Matchers.containsString; public class RootObjectMapperTests extends OpenSearchSingleNodeTestCase { @@ -483,31 +478,4 @@ public void testIllegalDynamicTemplateNoMappingType() throws Exception { protected boolean forbidPrivateIndexSettings() { return false; } - - public void testIllegalDynamicTemplatePre7Dot7Index() throws Exception { - XContentBuilder mapping = XContentFactory.jsonBuilder(); - mapping.startObject(); - { - mapping.startObject("type"); - mapping.startArray("dynamic_templates"); - { - mapping.startObject(); - mapping.startObject("my_template"); - mapping.field("match_mapping_type", "string"); - mapping.startObject("mapping"); - mapping.field("type", "string"); - mapping.endObject(); - mapping.endObject(); - mapping.endObject(); - } - mapping.endArray(); - mapping.endObject(); - } - mapping.endObject(); - Version createdVersion = randomVersionBetween(random(), LegacyESVersion.V_7_0_0, LegacyESVersion.V_7_6_0); - Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), createdVersion).build(); - MapperService mapperService = createIndex("test", indexSettings).mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(Strings.toString(mapping)), MergeReason.MAPPING_UPDATE); - assertThat(mapper.mappingSource().toString(), containsString("\"type\":\"string\"")); - } } diff --git a/server/src/test/java/org/opensearch/indices/analysis/AnalysisModuleTests.java b/server/src/test/java/org/opensearch/indices/analysis/AnalysisModuleTests.java index efec81e803f1c..5db837c2314a6 100644 --- a/server/src/test/java/org/opensearch/indices/analysis/AnalysisModuleTests.java +++ b/server/src/test/java/org/opensearch/indices/analysis/AnalysisModuleTests.java @@ -42,7 +42,6 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.store.Directory; import org.apache.lucene.store.NIOFSDirectory; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.io.Streams; @@ -213,34 +212,6 @@ public void testUnderscoreInAnalyzerName() throws IOException { } } - public void testStandardFilterBWC() throws IOException { - // standard tokenfilter should have been removed entirely in the 7x line. However, a - // cacheing bug meant that it was still possible to create indexes using a standard - // filter until 7.6 - { - Version version = VersionUtils.randomVersionBetween(random(), LegacyESVersion.V_7_6_0, Version.CURRENT); - final Settings settings = Settings.builder() - .put("index.analysis.analyzer.my_standard.tokenizer", "standard") - .put("index.analysis.analyzer.my_standard.filter", "standard") - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .put(IndexMetadata.SETTING_VERSION_CREATED, version) - .build(); - IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> getIndexAnalyzers(settings)); - assertThat(exc.getMessage(), equalTo("The [standard] token filter has been removed.")); - } - { - Version version = VersionUtils.randomVersionBetween(random(), LegacyESVersion.V_7_0_0, LegacyESVersion.V_7_5_2); - final Settings settings = Settings.builder() - .put("index.analysis.analyzer.my_standard.tokenizer", "standard") - .put("index.analysis.analyzer.my_standard.filter", "standard") - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .put(IndexMetadata.SETTING_VERSION_CREATED, version) - .build(); - getIndexAnalyzers(settings); - assertWarnings("The [standard] token filter is deprecated and will be removed in a future version."); - } - } - /** * Tests that plugins can register pre-configured char filters that vary in behavior based on OpenSearch version, Lucene version, * and that do not vary based on version at all. diff --git a/server/src/test/java/org/opensearch/ingest/IngestStatsTests.java b/server/src/test/java/org/opensearch/ingest/IngestStatsTests.java index 0486c9f29f86e..b5c74f0ee5d16 100644 --- a/server/src/test/java/org/opensearch/ingest/IngestStatsTests.java +++ b/server/src/test/java/org/opensearch/ingest/IngestStatsTests.java @@ -32,12 +32,10 @@ package org.opensearch.ingest; -import org.opensearch.LegacyESVersion; import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.test.VersionUtils; import java.io.IOException; import java.util.Collections; @@ -58,23 +56,6 @@ public void testSerialization() throws IOException { assertIngestStats(ingestStats, serializedStats, true, true); } - public void testBWCIngestProcessorTypeStats() throws IOException { - IngestStats.Stats totalStats = new IngestStats.Stats(50, 100, 200, 300); - List pipelineStats = createPipelineStats(); - Map> processorStats = createProcessorStats(pipelineStats); - IngestStats expectedIngestStats = new IngestStats(totalStats, pipelineStats, processorStats); - - // legacy output logic - BytesStreamOutput out = new BytesStreamOutput(); - out.setVersion(VersionUtils.getPreviousVersion(LegacyESVersion.V_7_6_0)); - expectedIngestStats.writeTo(out); - - StreamInput in = out.bytes().streamInput(); - in.setVersion(VersionUtils.getPreviousVersion(LegacyESVersion.V_7_6_0)); - IngestStats serializedStats = new IngestStats(in); - assertIngestStats(expectedIngestStats, serializedStats, true, false); - } - private List createPipelineStats() { IngestStats.PipelineStat pipeline1Stats = new IngestStats.PipelineStat("pipeline1", new IngestStats.Stats(3, 3, 3, 3)); IngestStats.PipelineStat pipeline2Stats = new IngestStats.PipelineStat("pipeline2", new IngestStats.Stats(47, 97, 197, 297)); diff --git a/server/src/test/java/org/opensearch/persistent/PersistentTasksCustomMetadataTests.java b/server/src/test/java/org/opensearch/persistent/PersistentTasksCustomMetadataTests.java index 873176f5d42be..96b33153ccf31 100644 --- a/server/src/test/java/org/opensearch/persistent/PersistentTasksCustomMetadataTests.java +++ b/server/src/test/java/org/opensearch/persistent/PersistentTasksCustomMetadataTests.java @@ -64,6 +64,7 @@ import org.opensearch.persistent.TestPersistentTasksPlugin.TestParams; import org.opensearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor; import org.opensearch.test.AbstractDiffableSerializationTestCase; +import org.opensearch.test.VersionUtils; import java.io.IOException; import java.util.ArrayList; @@ -79,7 +80,6 @@ import static org.opensearch.persistent.PersistentTasksExecutor.NO_NODE_FOUND; import static org.opensearch.test.VersionUtils.allReleasedVersions; import static org.opensearch.test.VersionUtils.compatibleFutureVersion; -import static org.opensearch.test.VersionUtils.getPreviousVersion; import static org.opensearch.test.VersionUtils.randomVersionBetween; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -281,7 +281,7 @@ public void testMinVersionSerialization() throws IOException { PersistentTasksCustomMetadata.Builder tasks = PersistentTasksCustomMetadata.builder(); Version minVersion = allReleasedVersions().stream().filter(Version::isRelease).findFirst().orElseThrow(NoSuchElementException::new); - final Version streamVersion = randomVersionBetween(random(), minVersion, getPreviousVersion(Version.CURRENT)); + final Version streamVersion = randomVersionBetween(random(), minVersion, VersionUtils.getPreviousVersion(Version.CURRENT)); tasks.addTask( "test_compatible_version", TestPersistentTasksExecutor.NAME, diff --git a/server/src/test/java/org/opensearch/rest/action/document/RestIndexActionTests.java b/server/src/test/java/org/opensearch/rest/action/document/RestIndexActionTests.java index 5a1d43ff5dd04..85e5497975888 100644 --- a/server/src/test/java/org/opensearch/rest/action/document/RestIndexActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/document/RestIndexActionTests.java @@ -33,7 +33,6 @@ package org.opensearch.rest.action.document; import org.apache.lucene.util.SetOnce; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.index.IndexRequest; @@ -46,7 +45,6 @@ import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.document.RestIndexAction.AutoIdHandler; import org.opensearch.rest.action.document.RestIndexAction.CreateHandler; -import org.opensearch.test.VersionUtils; import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.test.rest.RestActionTestCase; import org.junit.Before; @@ -96,13 +94,6 @@ public void testAutoIdDefaultsToOptypeCreate() { checkAutoIdOpType(Version.CURRENT, DocWriteRequest.OpType.CREATE); } - public void testAutoIdDefaultsToOptypeIndexForOlderVersions() { - checkAutoIdOpType( - VersionUtils.randomVersionBetween(random(), null, VersionUtils.getPreviousVersion(LegacyESVersion.V_7_5_0)), - DocWriteRequest.OpType.INDEX - ); - } - private void checkAutoIdOpType(Version minClusterVersion, DocWriteRequest.OpType expectedOpType) { SetOnce executeCalled = new SetOnce<>(); verifyingClient.setExecuteVerifier((actionType, request) -> { diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/GeoHashGridTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/GeoHashGridTests.java index b85761b709105..5e230a445ec98 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/GeoHashGridTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/GeoHashGridTests.java @@ -32,23 +32,10 @@ package org.opensearch.search.aggregations.bucket; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; -import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.common.geo.GeoBoundingBoxTests; -import org.opensearch.common.geo.GeoPoint; -import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.common.io.stream.NamedWriteableAwareStreamInput; -import org.opensearch.common.io.stream.NamedWriteableRegistry; -import org.opensearch.common.io.stream.StreamInput; import org.opensearch.search.aggregations.BaseAggregationTestCase; import org.opensearch.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder; import org.opensearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; -import org.opensearch.test.VersionUtils; - -import java.util.Collections; - -import static org.hamcrest.Matchers.equalTo; public class GeoHashGridTests extends BaseAggregationTestCase { @@ -72,26 +59,4 @@ protected GeoHashGridAggregationBuilder createTestAggregatorBuilder() { } return factory; } - - public void testSerializationPreBounds() throws Exception { - Version noBoundsSupportVersion = VersionUtils.randomVersionBetween(random(), LegacyESVersion.V_7_0_0, LegacyESVersion.V_7_5_0); - GeoHashGridAggregationBuilder builder = createTestAggregatorBuilder(); - try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setVersion(LegacyESVersion.V_7_6_0); - builder.writeTo(output); - try ( - StreamInput in = new NamedWriteableAwareStreamInput( - output.bytes().streamInput(), - new NamedWriteableRegistry(Collections.emptyList()) - ) - ) { - in.setVersion(noBoundsSupportVersion); - GeoHashGridAggregationBuilder readBuilder = new GeoHashGridAggregationBuilder(in); - assertThat( - readBuilder.geoBoundingBox(), - equalTo(new GeoBoundingBox(new GeoPoint(Double.NaN, Double.NaN), new GeoPoint(Double.NaN, Double.NaN))) - ); - } - } - } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/GeoTileGridTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/GeoTileGridTests.java index 3564ea337f741..d54667fb4f1a6 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/GeoTileGridTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/GeoTileGridTests.java @@ -32,24 +32,11 @@ package org.opensearch.search.aggregations.bucket; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; -import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.common.geo.GeoBoundingBoxTests; -import org.opensearch.common.geo.GeoPoint; -import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.common.io.stream.NamedWriteableAwareStreamInput; -import org.opensearch.common.io.stream.NamedWriteableRegistry; -import org.opensearch.common.io.stream.StreamInput; import org.opensearch.search.aggregations.BaseAggregationTestCase; import org.opensearch.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder; import org.opensearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; import org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils; -import org.opensearch.test.VersionUtils; - -import java.util.Collections; - -import static org.hamcrest.Matchers.equalTo; public class GeoTileGridTests extends BaseAggregationTestCase { @@ -72,26 +59,4 @@ protected GeoTileGridAggregationBuilder createTestAggregatorBuilder() { } return factory; } - - public void testSerializationPreBounds() throws Exception { - Version noBoundsSupportVersion = VersionUtils.randomVersionBetween(random(), LegacyESVersion.V_7_0_0, LegacyESVersion.V_7_5_0); - GeoTileGridAggregationBuilder builder = createTestAggregatorBuilder(); - try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setVersion(LegacyESVersion.V_7_6_0); - builder.writeTo(output); - try ( - StreamInput in = new NamedWriteableAwareStreamInput( - output.bytes().streamInput(), - new NamedWriteableRegistry(Collections.emptyList()) - ) - ) { - in.setVersion(noBoundsSupportVersion); - GeoTileGridAggregationBuilder readBuilder = new GeoTileGridAggregationBuilder(in); - assertThat( - readBuilder.geoBoundingBox(), - equalTo(new GeoBoundingBox(new GeoPoint(Double.NaN, Double.NaN), new GeoPoint(Double.NaN, Double.NaN))) - ); - } - } - } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilderTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilderTests.java index 58199741b1ee2..2b1700676f549 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilderTests.java @@ -32,22 +32,7 @@ package org.opensearch.search.aggregations.bucket.composite; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; -import org.opensearch.common.geo.GeoBoundingBox; -import org.opensearch.common.geo.GeoBoundingBoxTests; -import org.opensearch.common.geo.GeoPoint; -import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.common.io.stream.NamedWriteableAwareStreamInput; -import org.opensearch.common.io.stream.NamedWriteableRegistry; -import org.opensearch.common.io.stream.StreamInput; import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.test.VersionUtils; - -import java.io.IOException; -import java.util.Collections; - -import static org.hamcrest.Matchers.equalTo; public class GeoTileGridValuesSourceBuilderTests extends OpenSearchTestCase { @@ -56,28 +41,4 @@ public void testSetFormat() { expectThrows(IllegalArgumentException.class, () -> builder.format("format")); } - public void testBWCBounds() throws IOException { - Version noBoundsSupportVersion = VersionUtils.randomVersionBetween(random(), LegacyESVersion.V_7_0_0, LegacyESVersion.V_7_5_0); - GeoTileGridValuesSourceBuilder builder = new GeoTileGridValuesSourceBuilder("name"); - if (randomBoolean()) { - builder.geoBoundingBox(GeoBoundingBoxTests.randomBBox()); - } - try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setVersion(LegacyESVersion.V_7_6_0); - builder.writeTo(output); - try ( - StreamInput in = new NamedWriteableAwareStreamInput( - output.bytes().streamInput(), - new NamedWriteableRegistry(Collections.emptyList()) - ) - ) { - in.setVersion(noBoundsSupportVersion); - GeoTileGridValuesSourceBuilder readBuilder = new GeoTileGridValuesSourceBuilder(in); - assertThat( - readBuilder.geoBoundingBox(), - equalTo(new GeoBoundingBox(new GeoPoint(Double.NaN, Double.NaN), new GeoPoint(Double.NaN, Double.NaN))) - ); - } - } - } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalScriptedMetricTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalScriptedMetricTests.java index 69c53d1a526e8..fdb59591cba36 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalScriptedMetricTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalScriptedMetricTests.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.metrics; -import org.opensearch.LegacyESVersion; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; import org.opensearch.script.MockScriptEngine; @@ -42,12 +41,10 @@ import org.opensearch.script.ScriptService; import org.opensearch.script.ScriptType; import org.opensearch.search.aggregations.Aggregation.CommonFields; -import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.InternalAggregation.ReduceContext; import org.opensearch.search.aggregations.ParsedAggregation; import org.opensearch.search.aggregations.pipeline.PipelineAggregator.PipelineTree; import org.opensearch.test.InternalAggregationTestCase; -import org.opensearch.test.VersionUtils; import java.io.IOException; import java.util.ArrayList; @@ -60,7 +57,6 @@ import java.util.function.Supplier; import static java.util.Collections.singletonList; -import static org.hamcrest.Matchers.equalTo; public class InternalScriptedMetricTests extends InternalAggregationTestCase { @@ -276,45 +272,4 @@ protected InternalScriptedMetric mutateInstance(InternalScriptedMetric instance) } return new InternalScriptedMetric(name, aggregationsList, reduceScript, metadata); } - - public void testOldSerialization() throws IOException { - // A single element list looks like a fully reduced agg - InternalScriptedMetric original = new InternalScriptedMetric( - "test", - org.opensearch.common.collect.List.of("foo"), - new Script("test"), - null - ); - original.mergePipelineTreeForBWCSerialization(PipelineTree.EMPTY); - InternalScriptedMetric roundTripped = (InternalScriptedMetric) copyNamedWriteable( - original, - getNamedWriteableRegistry(), - InternalAggregation.class, - VersionUtils.randomVersionBetween(random(), LegacyESVersion.V_7_0_0, VersionUtils.getPreviousVersion(LegacyESVersion.V_7_8_0)) - ); - assertThat(roundTripped, equalTo(original)); - - // A multi-element list looks like a non-reduced agg - InternalScriptedMetric unreduced = new InternalScriptedMetric( - "test", - org.opensearch.common.collect.List.of("foo", "bar"), - new Script("test"), - null - ); - unreduced.mergePipelineTreeForBWCSerialization(PipelineTree.EMPTY); - Exception e = expectThrows( - IllegalArgumentException.class, - () -> copyNamedWriteable( - unreduced, - getNamedWriteableRegistry(), - InternalAggregation.class, - VersionUtils.randomVersionBetween( - random(), - LegacyESVersion.V_7_0_0, - VersionUtils.getPreviousVersion(LegacyESVersion.V_7_8_0) - ) - ) - ); - assertThat(e.getMessage(), equalTo("scripted_metric doesn't support cross cluster search until 7.8.0")); - } } diff --git a/server/src/test/java/org/opensearch/transport/InboundDecoderTests.java b/server/src/test/java/org/opensearch/transport/InboundDecoderTests.java index 9c78d039984e1..bcc7fe0ccac94 100644 --- a/server/src/test/java/org/opensearch/transport/InboundDecoderTests.java +++ b/server/src/test/java/org/opensearch/transport/InboundDecoderTests.java @@ -32,7 +32,6 @@ package org.opensearch.transport; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.bytes.ReleasableBytesReference; @@ -135,63 +134,18 @@ public void testDecode() throws IOException { assertEquals(InboundDecoder.END_CONTENT, endMarker); } - public void testDecodePreHeaderSizeVariableInt() throws IOException { - // TODO: Can delete test on 9.0 - boolean isCompressed = randomBoolean(); - String action = "test-request"; - long requestId = randomNonNegativeLong(); - final Version preHeaderVariableInt = LegacyESVersion.V_7_5_0; - final String contentValue = randomAlphaOfLength(100); - final OutboundMessage message = new OutboundMessage.Request( - threadContext, - new String[0], - new TestRequest(contentValue), - preHeaderVariableInt, - action, - requestId, - true, - isCompressed - ); - - final BytesReference totalBytes = message.serialize(new BytesStreamOutput()); - int partialHeaderSize = TcpHeader.headerSize(preHeaderVariableInt); - - InboundDecoder decoder = new InboundDecoder(Version.CURRENT, PageCacheRecycler.NON_RECYCLING_INSTANCE); - final ArrayList fragments = new ArrayList<>(); - final ReleasableBytesReference releasable1 = ReleasableBytesReference.wrap(totalBytes); - int bytesConsumed = decoder.decode(releasable1, fragments::add); - assertEquals(partialHeaderSize, bytesConsumed); - assertEquals(1, releasable1.refCount()); - - final Header header = (Header) fragments.get(0); - assertEquals(requestId, header.getRequestId()); - assertEquals(preHeaderVariableInt, header.getVersion()); - assertEquals(isCompressed, header.isCompressed()); - assertTrue(header.isHandshake()); - assertTrue(header.isRequest()); - assertTrue(header.needsToReadVariableHeader()); - fragments.clear(); - - final BytesReference bytes2 = totalBytes.slice(bytesConsumed, totalBytes.length() - bytesConsumed); - final ReleasableBytesReference releasable2 = ReleasableBytesReference.wrap(bytes2); - int bytesConsumed2 = decoder.decode(releasable2, fragments::add); - assertEquals(2, fragments.size()); - assertEquals(InboundDecoder.END_CONTENT, fragments.get(fragments.size() - 1)); - assertEquals(totalBytes.length() - bytesConsumed, bytesConsumed2); - } - public void testDecodeHandshakeCompatibility() throws IOException { String action = "test-request"; long requestId = randomNonNegativeLong(); final String headerKey = randomAlphaOfLength(10); final String headerValue = randomAlphaOfLength(20); threadContext.putHeader(headerKey, headerValue); - Version handshakeCompat = Version.CURRENT.minimumCompatibilityVersion().minimumCompatibilityVersion(); + Version handshakeCompatVersion = Version.CURRENT.minimumCompatibilityVersion().minimumCompatibilityVersion(); OutboundMessage message = new OutboundMessage.Request( threadContext, new String[0], new TestRequest(randomAlphaOfLength(100)), - handshakeCompat, + handshakeCompatVersion, action, requestId, true, @@ -199,7 +153,10 @@ public void testDecodeHandshakeCompatibility() throws IOException { ); final BytesReference bytes = message.serialize(new BytesStreamOutput()); - int totalHeaderSize = TcpHeader.headerSize(handshakeCompat); + int totalHeaderSize = TcpHeader.headerSize(handshakeCompatVersion); + if (handshakeCompatVersion.onOrAfter(TcpHeader.VERSION_WITH_HEADER_SIZE)) { + totalHeaderSize += bytes.getInt(TcpHeader.VARIABLE_HEADER_SIZE_POSITION); + } InboundDecoder decoder = new InboundDecoder(Version.CURRENT, PageCacheRecycler.NON_RECYCLING_INSTANCE); final ArrayList fragments = new ArrayList<>(); @@ -210,12 +167,10 @@ public void testDecodeHandshakeCompatibility() throws IOException { final Header header = (Header) fragments.get(0); assertEquals(requestId, header.getRequestId()); - assertEquals(handshakeCompat, header.getVersion()); + assertEquals(handshakeCompatVersion, header.getVersion()); assertFalse(header.isCompressed()); assertTrue(header.isHandshake()); assertTrue(header.isRequest()); - // TODO: On 9.0 this will be true because all compatible versions with contain the variable header int - assertTrue(header.needsToReadVariableHeader()); fragments.clear(); } @@ -306,12 +261,12 @@ public void testCompressedDecodeHandshakeCompatibility() throws IOException { final String headerKey = randomAlphaOfLength(10); final String headerValue = randomAlphaOfLength(20); threadContext.putHeader(headerKey, headerValue); - Version handshakeCompat = Version.CURRENT.minimumCompatibilityVersion().minimumCompatibilityVersion(); + Version handshakeCompatVersion = Version.CURRENT.minimumCompatibilityVersion().minimumCompatibilityVersion(); OutboundMessage message = new OutboundMessage.Request( threadContext, new String[0], new TestRequest(randomAlphaOfLength(100)), - handshakeCompat, + handshakeCompatVersion, action, requestId, true, @@ -319,7 +274,10 @@ public void testCompressedDecodeHandshakeCompatibility() throws IOException { ); final BytesReference bytes = message.serialize(new BytesStreamOutput()); - int totalHeaderSize = TcpHeader.headerSize(handshakeCompat); + int totalHeaderSize = TcpHeader.headerSize(handshakeCompatVersion); + if (handshakeCompatVersion.onOrAfter(TcpHeader.VERSION_WITH_HEADER_SIZE)) { + totalHeaderSize += bytes.getInt(TcpHeader.VARIABLE_HEADER_SIZE_POSITION); + } InboundDecoder decoder = new InboundDecoder(Version.CURRENT, PageCacheRecycler.NON_RECYCLING_INSTANCE); final ArrayList fragments = new ArrayList<>(); @@ -330,12 +288,10 @@ public void testCompressedDecodeHandshakeCompatibility() throws IOException { final Header header = (Header) fragments.get(0); assertEquals(requestId, header.getRequestId()); - assertEquals(handshakeCompat, header.getVersion()); + assertEquals(handshakeCompatVersion, header.getVersion()); assertTrue(header.isCompressed()); assertTrue(header.isHandshake()); assertTrue(header.isRequest()); - // TODO: On 9.0 this will be true because all compatible versions with contain the variable header int - assertTrue(header.needsToReadVariableHeader()); fragments.clear(); } @@ -372,25 +328,25 @@ public void testEnsureVersionCompatibility() throws IOException { ); assertNull(ise); - final Version version = Version.fromString("7.0.0"); - ise = InboundDecoder.ensureVersionCompatibility(Version.fromString("6.0.0"), version, true); + final Version version = Version.V_3_0_0; + ise = InboundDecoder.ensureVersionCompatibility(Version.V_2_0_0, version, true); assertNull(ise); - ise = InboundDecoder.ensureVersionCompatibility(Version.fromString("6.0.0"), version, false); + ise = InboundDecoder.ensureVersionCompatibility(Version.V_1_0_0, version, false); assertEquals( - "Received message from unsupported version: [6.0.0] minimal compatible version is: [" + "Received message from unsupported version: [1.0.0] minimal compatible version is: [" + version.minimumCompatibilityVersion() + "]", ise.getMessage() ); // For handshake we are compatible with N-2 - ise = InboundDecoder.ensureVersionCompatibility(Version.fromString("6.8.0"), version, true); + ise = InboundDecoder.ensureVersionCompatibility(Version.fromString("2.1.0"), version, true); assertNull(ise); - ise = InboundDecoder.ensureVersionCompatibility(Version.fromString("5.6.0"), version, false); + ise = InboundDecoder.ensureVersionCompatibility(Version.fromString("1.3.0"), version, false); assertEquals( - "Received message from unsupported version: [5.6.0] minimal compatible version is: [" + "Received message from unsupported version: [1.3.0] minimal compatible version is: [" + version.minimumCompatibilityVersion() + "]", ise.getMessage() diff --git a/test/framework/src/main/java/org/opensearch/test/VersionUtils.java b/test/framework/src/main/java/org/opensearch/test/VersionUtils.java index 0f8525285fd08..5989dfa7898fd 100644 --- a/test/framework/src/main/java/org/opensearch/test/VersionUtils.java +++ b/test/framework/src/main/java/org/opensearch/test/VersionUtils.java @@ -108,9 +108,11 @@ static Tuple, List> resolveReleasedVersions(Version curre List lastMinorLine = stableVersions.get(stableVersions.size() - 1); if (lastMinorLine.get(lastMinorLine.size() - 1) instanceof LegacyESVersion == false) { // if the last minor line is Legacy there are no more staged releases; do nothing + // otherwise the last minor line is (by definition) staged and unreleased Version lastMinor = moveLastToUnreleased(stableVersions, unreleasedVersions); + // no more staged legacy bugfixes so skip; if (lastMinor instanceof LegacyESVersion == false && lastMinor.revision == 0) { - // no more staged legacy versions + // this is not a legacy version; remove the staged bugfix if (stableVersions.get(stableVersions.size() - 1).size() == 1) { // a minor is being staged, which is also unreleased moveLastToUnreleased(stableVersions, unreleasedVersions); @@ -210,11 +212,11 @@ public static List allLegacyVersions() { } /** - * Get the released version before {@code version}. + * Get the version before {@code version}. */ public static Version getPreviousVersion(Version version) { - for (int i = RELEASED_VERSIONS.size() - 1; i >= 0; i--) { - Version v = RELEASED_VERSIONS.get(i); + for (int i = ALL_VERSIONS.size() - 1; i >= 0; i--) { + Version v = ALL_VERSIONS.get(i); if (v.before(version)) { return v; } @@ -223,7 +225,7 @@ public static Version getPreviousVersion(Version version) { } /** - * Get the released version before {@link Version#CURRENT}. + * Get the version before {@link Version#CURRENT}. */ public static Version getPreviousVersion() { Version version = getPreviousVersion(Version.CURRENT); diff --git a/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java index ec88cd0201db5..e4b98124ea441 100644 --- a/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java @@ -107,7 +107,6 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; -import static org.opensearch.transport.TransportHandshaker.V_3_0_0; import static org.opensearch.transport.TransportService.NOOP_TRANSPORT_INTERCEPTOR; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; @@ -2227,11 +2226,7 @@ public void testHandshakeUpdatesVersion() throws IOException { TransportRequestOptions.Type.STATE ); try (Transport.Connection connection = serviceA.openConnection(node, builder.build())) { - // OpenSearch [1.0:3.0) in bwc mode should only "upgrade" to Legacy v7.10.2 - assertEquals( - connection.getVersion(), - version.onOrAfter(Version.V_1_0_0) && version.before(V_3_0_0) ? LegacyESVersion.V_7_10_2 : version - ); + assertEquals(version, connection.getVersion()); } } } @@ -2276,9 +2271,7 @@ public void testTcpHandshake() { PlainActionFuture future = PlainActionFuture.newFuture(); serviceA.getOriginalTransport().openConnection(node, connectionProfile, future); try (Transport.Connection connection = future.actionGet()) { - // OpenSearch sends a handshake version spoofed as Legacy version 7_10_2 - // todo change for OpenSearch 3.0.0 when Legacy compatibility is removed - assertEquals(LegacyESVersion.V_7_10_2, connection.getVersion()); + assertEquals(connection.getVersion(), connection.getVersion()); } } } @@ -2624,8 +2617,8 @@ public String executor() { TransportStats transportStats = serviceC.transport.getStats(); // we did a single round-trip to do the initial handshake assertEquals(1, transportStats.getRxCount()); assertEquals(1, transportStats.getTxCount()); - assertEquals(25, transportStats.getRxSize().getBytes()); - assertEquals(51, transportStats.getTxSize().getBytes()); + assertEquals(29, transportStats.getRxSize().getBytes()); + assertEquals(55, transportStats.getTxSize().getBytes()); }); serviceC.sendRequest( connection, @@ -2639,16 +2632,16 @@ public String executor() { TransportStats transportStats = serviceC.transport.getStats(); // request has ben send assertEquals(1, transportStats.getRxCount()); assertEquals(2, transportStats.getTxCount()); - assertEquals(25, transportStats.getRxSize().getBytes()); - assertEquals(111, transportStats.getTxSize().getBytes()); + assertEquals(29, transportStats.getRxSize().getBytes()); + assertEquals(115, transportStats.getTxSize().getBytes()); }); sendResponseLatch.countDown(); responseLatch.await(); stats = serviceC.transport.getStats(); // response has been received assertEquals(2, stats.getRxCount()); assertEquals(2, stats.getTxCount()); - assertEquals(50, stats.getRxSize().getBytes()); - assertEquals(111, stats.getTxSize().getBytes()); + assertEquals(54, stats.getRxSize().getBytes()); + assertEquals(115, stats.getTxSize().getBytes()); } finally { serviceC.close(); } @@ -2745,8 +2738,8 @@ public String executor() { TransportStats transportStats = serviceC.transport.getStats(); // request has been sent assertEquals(1, transportStats.getRxCount()); assertEquals(1, transportStats.getTxCount()); - assertEquals(25, transportStats.getRxSize().getBytes()); - assertEquals(51, transportStats.getTxSize().getBytes()); + assertEquals(29, transportStats.getRxSize().getBytes()); + assertEquals(55, transportStats.getTxSize().getBytes()); }); serviceC.sendRequest( connection, @@ -2760,8 +2753,8 @@ public String executor() { TransportStats transportStats = serviceC.transport.getStats(); // request has been sent assertEquals(1, transportStats.getRxCount()); assertEquals(2, transportStats.getTxCount()); - assertEquals(25, transportStats.getRxSize().getBytes()); - assertEquals(111, transportStats.getTxSize().getBytes()); + assertEquals(29, transportStats.getRxSize().getBytes()); + assertEquals(115, transportStats.getTxSize().getBytes()); }); sendResponseLatch.countDown(); responseLatch.await(); @@ -2773,10 +2766,10 @@ public String executor() { BytesStreamOutput streamOutput = new BytesStreamOutput(); exception.writeTo(streamOutput); String failedMessage = "Unexpected read bytes size. The transport exception that was received=" + exception; - // 53 bytes are the non-exception message bytes that have been received. It should include the initial + // 57 bytes are the non-exception message bytes that have been received. It should include the initial // handshake message and the header, version, etc bytes in the exception message. - assertEquals(failedMessage, 53 + streamOutput.bytes().length(), stats.getRxSize().getBytes()); - assertEquals(111, stats.getTxSize().getBytes()); + assertEquals(failedMessage, 57 + streamOutput.bytes().length(), stats.getRxSize().getBytes()); + assertEquals(115, stats.getTxSize().getBytes()); } finally { serviceC.close(); } diff --git a/test/framework/src/test/java/org/opensearch/test/VersionUtilsTests.java b/test/framework/src/test/java/org/opensearch/test/VersionUtilsTests.java index de1f650aff20c..d007547ba0918 100644 --- a/test/framework/src/test/java/org/opensearch/test/VersionUtilsTests.java +++ b/test/framework/src/test/java/org/opensearch/test/VersionUtilsTests.java @@ -31,7 +31,6 @@ package org.opensearch.test; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.Booleans; import org.opensearch.common.collect.Tuple; @@ -76,21 +75,21 @@ public void testRandomVersionBetween() { assertTrue(got.onOrBefore(Version.CURRENT)); // sub range - got = VersionUtils.randomVersionBetween(random(), LegacyESVersion.fromId(7000099), LegacyESVersion.fromId(7010099)); - assertTrue(got.onOrAfter(LegacyESVersion.fromId(7000099))); - assertTrue(got.onOrBefore(LegacyESVersion.fromId(7010099))); + got = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_1_0); + assertTrue(got.onOrAfter(Version.V_2_0_0)); + assertTrue(got.onOrBefore(Version.V_2_1_0)); // unbounded lower - got = VersionUtils.randomVersionBetween(random(), null, LegacyESVersion.fromId(7000099)); + got = VersionUtils.randomVersionBetween(random(), null, Version.V_2_0_0); assertTrue(got.onOrAfter(VersionUtils.getFirstVersion())); - assertTrue(got.onOrBefore(LegacyESVersion.fromId(7000099))); + assertTrue(got.onOrBefore(Version.V_2_0_0)); got = VersionUtils.randomVersionBetween(random(), null, VersionUtils.allReleasedVersions().get(0)); assertTrue(got.onOrAfter(VersionUtils.getFirstVersion())); assertTrue(got.onOrBefore(VersionUtils.allReleasedVersions().get(0))); // unbounded upper - got = VersionUtils.randomVersionBetween(random(), LegacyESVersion.fromId(7000099), null); - assertTrue(got.onOrAfter(LegacyESVersion.fromId(7000099))); + got = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, null); + assertTrue(got.onOrAfter(Version.V_2_0_0)); assertTrue(got.onOrBefore(Version.CURRENT)); got = VersionUtils.randomVersionBetween(random(), VersionUtils.getPreviousVersion(), null); assertTrue(got.onOrAfter(VersionUtils.getPreviousVersion())); @@ -101,8 +100,8 @@ public void testRandomVersionBetween() { assertEquals(got, VersionUtils.getFirstVersion()); got = VersionUtils.randomVersionBetween(random(), Version.CURRENT, Version.CURRENT); assertEquals(got, Version.CURRENT); - got = VersionUtils.randomVersionBetween(random(), LegacyESVersion.fromId(7000099), LegacyESVersion.fromId(7000099)); - assertEquals(got, LegacyESVersion.fromId(7000099)); + got = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_0_0); + assertEquals(got, Version.V_2_0_0); // implicit range of one got = VersionUtils.randomVersionBetween(random(), null, VersionUtils.getFirstVersion()); diff --git a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/SkipSectionTests.java b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/SkipSectionTests.java index 2735128a4583d..5cdeeb70c1950 100644 --- a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/SkipSectionTests.java +++ b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/SkipSectionTests.java @@ -48,21 +48,21 @@ public class SkipSectionTests extends AbstractClientYamlTestFragmentParserTestCase { public void testSkipMultiRange() { - SkipSection section = new SkipSection("6.0.0 - 6.1.0, 7.1.0 - 7.5.0", Collections.emptyList(), "foobar"); + SkipSection section = new SkipSection("1.0.0 - 1.1.0, 2.1.0 - 2.5.0", Collections.emptyList(), "foobar"); assertFalse(section.skip(Version.CURRENT)); - assertFalse(section.skip(Version.fromString("6.2.0"))); - assertFalse(section.skip(Version.fromString("7.0.0"))); - assertFalse(section.skip(Version.fromString("7.6.0"))); - - assertTrue(section.skip(Version.fromString("6.0.0"))); - assertTrue(section.skip(Version.fromString("6.1.0"))); - assertTrue(section.skip(Version.fromString("7.1.0"))); - assertTrue(section.skip(Version.fromString("7.5.0"))); - - section = new SkipSection("- 7.1.0, 7.2.0 - 7.5.0", Collections.emptyList(), "foobar"); - assertTrue(section.skip(Version.fromString("7.0.0"))); - assertTrue(section.skip(Version.fromString("7.3.0"))); + assertFalse(section.skip(Version.fromString("1.2.0"))); + assertFalse(section.skip(Version.fromString("2.0.0"))); + assertFalse(section.skip(Version.fromString("2.6.0"))); + + assertTrue(section.skip(Version.fromString("1.0.0"))); + assertTrue(section.skip(Version.fromString("1.1.0"))); + assertTrue(section.skip(Version.fromString("2.1.0"))); + assertTrue(section.skip(Version.fromString("2.5.0"))); + + section = new SkipSection("- 2.1.0, 2.2.0 - 2.5.0", Collections.emptyList(), "foobar"); + assertTrue(section.skip(Version.fromString("2.0.0"))); + assertTrue(section.skip(Version.fromString("2.3.0"))); } public void testSkip() { From fbb2725ec1cde01bac8c9d5eb70d844f918b4d6b Mon Sep 17 00:00:00 2001 From: Nicholas Walter Knize Date: Mon, 4 Apr 2022 18:06:29 -0500 Subject: [PATCH 044/653] [Mute] tasks.list/10_basic/tasks_list Muting test until bwc fix is in place. Signed-off-by: Nicholas Walter Knize --- .../main/resources/rest-api-spec/test/tasks.list/10_basic.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml index d0385ac0125f4..b1f2e084295a8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml @@ -1,6 +1,8 @@ --- "tasks_list test": - skip: + version: "all" + reason: "AwaitsFix https://github.com/opensearch-project/OpenSearch/issues/2757" features: [arbitrary_key] - do: @@ -32,4 +34,3 @@ - is_true: tasks - match: { tasks.0.headers.X-Opaque-Id: "That is me" } - From b5d7805962da48ad556b788ef29d506a75492082 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Apr 2022 16:47:36 -0700 Subject: [PATCH 045/653] Bump azure-core-http-netty from 1.11.8 to 1.11.9 in /plugins/repository-azure (#2734) * Bump azure-core-http-netty in /plugins/repository-azure Bumps [azure-core-http-netty](https://github.com/Azure/azure-sdk-for-java) from 1.11.8 to 1.11.9. - [Release notes](https://github.com/Azure/azure-sdk-for-java/releases) - [Commits](https://github.com/Azure/azure-sdk-for-java/compare/azure-core-http-netty_1.11.8...azure-core-http-netty_1.11.9) --- updated-dependencies: - dependency-name: com.azure:azure-core-http-netty dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/repository-azure/build.gradle | 2 +- .../licenses/azure-core-http-netty-1.11.8.jar.sha1 | 1 - .../licenses/azure-core-http-netty-1.11.9.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-azure/licenses/azure-core-http-netty-1.11.8.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-core-http-netty-1.11.9.jar.sha1 diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 628b5f7c58c04..648c045d97d01 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -46,7 +46,7 @@ opensearchplugin { dependencies { api 'com.azure:azure-core:1.26.0' api 'com.azure:azure-storage-common:12.15.0' - api 'com.azure:azure-core-http-netty:1.11.8' + api 'com.azure:azure-core-http-netty:1.11.9' api "io.netty:netty-codec-dns:${versions.netty}" api "io.netty:netty-codec-socks:${versions.netty}" api "io.netty:netty-codec-http2:${versions.netty}" diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.11.8.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.11.8.jar.sha1 deleted file mode 100644 index df7d7ae4ce285..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-http-netty-1.11.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0ea66d4531fb41cb3b5ab55e2e7b7f301e7f8503 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.11.9.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.11.9.jar.sha1 new file mode 100644 index 0000000000000..936a02dfba4d7 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-http-netty-1.11.9.jar.sha1 @@ -0,0 +1 @@ +1d1f34b3e60db038f3913007a2706a820383dc26 \ No newline at end of file From 365e07ce4be444a2f12938f442698f50782e80dc Mon Sep 17 00:00:00 2001 From: Raphael Lopez <85206537+raphlopez@users.noreply.github.com> Date: Mon, 4 Apr 2022 20:12:58 -0400 Subject: [PATCH 046/653] Remove endpoint_suffix dependency on account key (#2485) Signed-off-by: Raphael Lopez --- .../opensearch/repositories/azure/AzureStorageSettings.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageSettings.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageSettings.java index c9a031451bccd..4a9aa51334d0a 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageSettings.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageSettings.java @@ -91,8 +91,7 @@ final class AzureStorageSettings { AZURE_CLIENT_PREFIX_KEY, "endpoint_suffix", key -> Setting.simpleString(key, Property.NodeScope), - () -> ACCOUNT_SETTING, - () -> KEY_SETTING + () -> ACCOUNT_SETTING ); // The overall operation timeout From 6103010e145cf1724c7603f1f1c4ec24e8985d40 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Apr 2022 17:40:08 -0700 Subject: [PATCH 047/653] Bump jcodings from 1.0.44 to 1.0.57 in /libs/grok (#2736) * Bump jcodings from 1.0.44 to 1.0.57 in /libs/grok Bumps [jcodings](https://github.com/jruby/jcodings) from 1.0.44 to 1.0.57. - [Release notes](https://github.com/jruby/jcodings/releases) - [Commits](https://github.com/jruby/jcodings/compare/jcodings-1.0.44...jcodings-1.0.57) --- updated-dependencies: - dependency-name: org.jruby.jcodings:jcodings dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Fix forbiddenApisCheck for libs/grok. This change removes the ignoreMissingClasses block of the thirdPartyAudit check. This class is not missing, so the check causes failures. Signed-off-by: Marc Handalian Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Marc Handalian --- libs/grok/build.gradle | 6 +----- libs/grok/licenses/jcodings-1.0.44.jar.sha1 | 1 - libs/grok/licenses/jcodings-1.0.57.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 6 deletions(-) delete mode 100644 libs/grok/licenses/jcodings-1.0.44.jar.sha1 create mode 100644 libs/grok/licenses/jcodings-1.0.57.jar.sha1 diff --git a/libs/grok/build.gradle b/libs/grok/build.gradle index ce23406721fe6..e406a80ee1c91 100644 --- a/libs/grok/build.gradle +++ b/libs/grok/build.gradle @@ -31,7 +31,7 @@ dependencies { api 'org.jruby.joni:joni:2.1.41' // joni dependencies: - api 'org.jruby.jcodings:jcodings:1.0.44' + api 'org.jruby.jcodings:jcodings:1.0.57' testImplementation(project(":test:framework")) { exclude group: 'org.opensearch', module: 'opensearch-grok' @@ -41,7 +41,3 @@ dependencies { tasks.named('forbiddenApisMain').configure { replaceSignatureFiles 'jdk-signatures' } - -thirdPartyAudit.ignoreMissingClasses( - 'org.jcodings.unicode.UnicodeCodeRange' -) \ No newline at end of file diff --git a/libs/grok/licenses/jcodings-1.0.44.jar.sha1 b/libs/grok/licenses/jcodings-1.0.44.jar.sha1 deleted file mode 100644 index 4449009d3395e..0000000000000 --- a/libs/grok/licenses/jcodings-1.0.44.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6884b2fd8fd9a56874db05afaa22435043a2e3e \ No newline at end of file diff --git a/libs/grok/licenses/jcodings-1.0.57.jar.sha1 b/libs/grok/licenses/jcodings-1.0.57.jar.sha1 new file mode 100644 index 0000000000000..1a703c2644787 --- /dev/null +++ b/libs/grok/licenses/jcodings-1.0.57.jar.sha1 @@ -0,0 +1 @@ +603a9ceac39cbf7f6f27fe18b2fded4714319b0a \ No newline at end of file From 21f9950806e0395e89bcfd70b76def9d4133b937 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Mon, 4 Apr 2022 21:18:32 -0700 Subject: [PATCH 048/653] Update ThirdPartyAuditTask to check for and list pointless exclusions. (#2760) This change swaps the order of the task to first check for no pointless exclusions. If not caught first these will be thrown inside of the bogusExcludesCount block that logs a useless error message. Signed-off-by: Marc Handalian --- .../org/opensearch/gradle/precommit/ThirdPartyAuditTask.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java index 097710b3f1a6e..2a49ae05db1fb 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java @@ -237,6 +237,7 @@ public void runThirdPartyAudit() throws IOException { Set jdkJarHellClasses = runJdkJarHellCheck(); if (missingClassExcludes != null) { + assertNoPointlessExclusions("are not missing", missingClassExcludes, missingClasses); long bogousExcludesCount = Stream.concat(missingClassExcludes.stream(), violationsExcludes.stream()) .filter(each -> missingClasses.contains(each) == false) .filter(each -> violationsClasses.contains(each) == false) @@ -247,7 +248,6 @@ public void runThirdPartyAudit() throws IOException { "All excluded classes seem to have no issues. " + "This is sometimes an indication that the check silently failed" ); } - assertNoPointlessExclusions("are not missing", missingClassExcludes, missingClasses); missingClasses.removeAll(missingClassExcludes); } assertNoPointlessExclusions("have no violations", violationsExcludes, violationsClasses); From 406ee36b154b007f144b3316c4fe13db2e7c3ad2 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Tue, 5 Apr 2022 06:25:45 -0500 Subject: [PATCH 049/653] Fix TaskInfo serialization and unmute task_list bwc test (#2766) Fixes the version check around resourceStats serialization and unmutes the failing bwc test. Signed-off-by: Nicholas Walter Knize --- .../main/resources/rest-api-spec/test/tasks.list/10_basic.yml | 2 -- server/src/main/java/org/opensearch/tasks/TaskInfo.java | 4 ++-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml index b1f2e084295a8..fd6bb7f96eb9d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml @@ -1,8 +1,6 @@ --- "tasks_list test": - skip: - version: "all" - reason: "AwaitsFix https://github.com/opensearch-project/OpenSearch/issues/2757" features: [arbitrary_key] - do: diff --git a/server/src/main/java/org/opensearch/tasks/TaskInfo.java b/server/src/main/java/org/opensearch/tasks/TaskInfo.java index e6ba94a71b61d..b6814282b5db4 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskInfo.java +++ b/server/src/main/java/org/opensearch/tasks/TaskInfo.java @@ -142,7 +142,7 @@ public TaskInfo(StreamInput in) throws IOException { } parentTaskId = TaskId.readFromStream(in); headers = in.readMap(StreamInput::readString, StreamInput::readString); - if (in.getVersion().onOrAfter(Version.V_2_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_1_0)) { resourceStats = in.readOptionalWriteable(TaskResourceStats::new); } else { resourceStats = null; @@ -164,7 +164,7 @@ public void writeTo(StreamOutput out) throws IOException { } parentTaskId.writeTo(out); out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); - if (out.getVersion().onOrAfter(Version.V_2_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_1_0)) { out.writeOptionalWriteable(resourceStats); } } From 0b1f4a20697b486fb2c08af6b5e7c8732c4d6101 Mon Sep 17 00:00:00 2001 From: Wenjun Ruan Date: Tue, 5 Apr 2022 21:23:08 +0800 Subject: [PATCH 050/653] Rename `file` to `dir` in Environment (#2730) * Rename `file` to `dir` in Environment Signed-off-by: ruanwenjun * Fix compile error Signed-off-by: ruanwenjun * fix compile error Signed-off-by: ruanwenjun --- .../settings/AddFileKeyStoreCommand.java | 2 +- .../settings/AddStringKeyStoreCommand.java | 2 +- .../settings/CreateKeyStoreCommand.java | 6 +- .../RemoveSettingKeyStoreCommand.java | 2 +- .../opensearch/bootstrap/BootstrapTests.java | 2 +- .../settings/AddFileKeyStoreCommandTests.java | 6 +- .../AddStringKeyStoreCommandTests.java | 20 ++-- .../settings/CreateKeyStoreCommandTests.java | 10 +- .../settings/KeyStoreCommandTestCase.java | 4 +- .../common/settings/KeyStoreWrapperTests.java | 38 +++---- .../settings/UpgradeKeyStoreCommandTests.java | 8 +- .../plugins/InstallPluginCommand.java | 18 ++-- .../plugins/ListPluginsCommand.java | 10 +- .../plugins/RemovePluginCommand.java | 16 +-- .../plugins/InstallPluginCommandTests.java | 34 +++--- .../plugins/ListPluginsCommandTests.java | 18 ++-- .../plugins/RemovePluginCommandTests.java | 58 +++++----- .../org/opensearch/upgrade/TaskInput.java | 4 +- .../opensearch/upgrade/UpgradeCliTests.java | 8 +- .../upgrade/ValidateInputTaskTests.java | 2 +- ...enationCompoundWordTokenFilterFactory.java | 2 +- .../ingest/geoip/IngestGeoIpPlugin.java | 4 +- .../useragent/IngestUserAgentPlugin.java | 2 +- .../index/reindex/ReindexSslConfig.java | 2 +- .../IcuCollationTokenFilterFactory.java | 4 +- .../index/analysis/IcuTokenizerFactory.java | 2 +- .../ExampleCustomSettingsConfig.java | 2 +- .../hdfs/HdfsSecurityContext.java | 2 +- .../bootstrap/EvilSecurityTests.java | 14 +-- .../bootstrap/SpawnerNoBootstrapTests.java | 14 +-- .../action/admin/ReloadSecureSettingsIT.java | 10 +- .../opensearch/index/shard/IndexShardIT.java | 4 +- ...nsportNodesReloadSecureSettingsAction.java | 2 +- .../org/opensearch/bootstrap/Bootstrap.java | 10 +- .../org/opensearch/bootstrap/OpenSearch.java | 2 +- .../org/opensearch/bootstrap/Security.java | 24 ++--- .../org/opensearch/bootstrap/Spawner.java | 8 +- .../metadata/MetadataCreateIndexService.java | 2 +- .../common/logging/LogConfigurator.java | 2 +- .../common/settings/BaseKeyStoreCommand.java | 4 +- .../ChangeKeyStorePasswordCommand.java | 2 +- .../settings/HasPasswordKeyStoreCommand.java | 2 +- .../settings/UpgradeKeyStoreCommand.java | 2 +- .../java/org/opensearch/env/Environment.java | 101 +++++++++--------- .../org/opensearch/env/NodeEnvironment.java | 2 +- .../opensearch/index/analysis/Analysis.java | 4 +- .../indices/analysis/HunspellService.java | 4 +- .../node/InternalSettingsPreparer.java | 2 +- .../main/java/org/opensearch/node/Node.java | 24 ++--- .../MetadataRolloverServiceTests.java | 4 +- .../org/opensearch/env/EnvironmentTests.java | 12 +-- .../indices/analysis/AnalysisModuleTests.java | 2 +- .../node/InternalSettingsPreparerTests.java | 2 +- .../plugins/PluginsServiceTests.java | 2 +- .../blobstore/BlobStoreRepositoryTests.java | 2 +- .../test/AbstractBuilderTestCase.java | 2 +- .../opensearch/test/InternalTestCluster.java | 2 +- 57 files changed, 277 insertions(+), 278 deletions(-) diff --git a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/AddFileKeyStoreCommand.java b/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/AddFileKeyStoreCommand.java index 56c04e019bd5d..b948be24350f4 100644 --- a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/AddFileKeyStoreCommand.java +++ b/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/AddFileKeyStoreCommand.java @@ -95,7 +95,7 @@ protected void executeCommand(Terminal terminal, OptionSet options, Environment keyStore.setFile(setting, Files.readAllBytes(file)); } - keyStore.save(env.configFile(), getKeyStorePassword().getChars()); + keyStore.save(env.configDir(), getKeyStorePassword().getChars()); } @SuppressForbidden(reason = "file arg for cli") diff --git a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/AddStringKeyStoreCommand.java b/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/AddStringKeyStoreCommand.java index 88ed9f74fb690..a8bc1dff8838f 100644 --- a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/AddStringKeyStoreCommand.java +++ b/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/AddStringKeyStoreCommand.java @@ -121,7 +121,7 @@ protected void executeCommand(Terminal terminal, OptionSet options, Environment } } - keyStore.save(env.configFile(), getKeyStorePassword().getChars()); + keyStore.save(env.configDir(), getKeyStorePassword().getChars()); } } diff --git a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/CreateKeyStoreCommand.java b/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/CreateKeyStoreCommand.java index b96dd46236b87..dbc5d897417ee 100644 --- a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/CreateKeyStoreCommand.java +++ b/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/CreateKeyStoreCommand.java @@ -59,7 +59,7 @@ class CreateKeyStoreCommand extends KeyStoreAwareCommand { @Override protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { try (SecureString password = options.has(passwordOption) ? readPassword(terminal, true) : new SecureString(new char[0])) { - Path keystoreFile = KeyStoreWrapper.keystorePath(env.configFile()); + Path keystoreFile = KeyStoreWrapper.keystorePath(env.configDir()); if (Files.exists(keystoreFile)) { if (terminal.promptYesNo("An opensearch keystore already exists. Overwrite?", false) == false) { terminal.println("Exiting without creating keystore."); @@ -67,8 +67,8 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th } } KeyStoreWrapper keystore = KeyStoreWrapper.create(); - keystore.save(env.configFile(), password.getChars()); - terminal.println("Created opensearch keystore in " + KeyStoreWrapper.keystorePath(env.configFile())); + keystore.save(env.configDir(), password.getChars()); + terminal.println("Created opensearch keystore in " + KeyStoreWrapper.keystorePath(env.configDir())); } catch (SecurityException e) { throw new UserException(ExitCodes.IO_ERROR, "Error creating the opensearch keystore."); } diff --git a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/RemoveSettingKeyStoreCommand.java b/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/RemoveSettingKeyStoreCommand.java index 54f0c9324f5c0..c57959117af15 100644 --- a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/RemoveSettingKeyStoreCommand.java +++ b/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/RemoveSettingKeyStoreCommand.java @@ -66,6 +66,6 @@ protected void executeCommand(Terminal terminal, OptionSet options, Environment } keyStore.remove(setting); } - keyStore.save(env.configFile(), getKeyStorePassword().getChars()); + keyStore.save(env.configDir(), getKeyStorePassword().getChars()); } } diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/bootstrap/BootstrapTests.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/bootstrap/BootstrapTests.java index 5b280f210af03..58beba16820c6 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/bootstrap/BootstrapTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/bootstrap/BootstrapTests.java @@ -71,7 +71,7 @@ public void setupEnv() throws IOException { } public void testLoadSecureSettings() throws Exception { - final Path configPath = env.configFile(); + final Path configPath = env.configDir(); final SecureString seed; try (KeyStoreWrapper keyStoreWrapper = KeyStoreWrapper.create()) { seed = KeyStoreWrapper.SEED_SETTING.get(Settings.builder().setSecureSettings(keyStoreWrapper).build()); diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/AddFileKeyStoreCommandTests.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/AddFileKeyStoreCommandTests.java index 4cbf54fd11bf0..b3cc7e10fdf8c 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/AddFileKeyStoreCommandTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/AddFileKeyStoreCommandTests.java @@ -66,14 +66,14 @@ private Path createRandomFile() throws IOException { for (int i = 0; i < length; ++i) { bytes[i] = randomByte(); } - Path file = env.configFile().resolve(randomAlphaOfLength(16)); + Path file = env.configDir().resolve(randomAlphaOfLength(16)); Files.write(file, bytes); return file; } private void addFile(KeyStoreWrapper keystore, String setting, Path file, String password) throws Exception { keystore.setFile(setting, Files.readAllBytes(file)); - keystore.save(env.configFile(), password.toCharArray()); + keystore.save(env.configDir(), password.toCharArray()); } public void testMissingCreateWithEmptyPasswordWhenPrompted() throws Exception { @@ -95,7 +95,7 @@ public void testMissingNoCreate() throws Exception { terminal.addSecretInput(randomFrom("", "keystorepassword")); terminal.addTextInput("n"); // explicit no execute("foo"); - assertNull(KeyStoreWrapper.load(env.configFile())); + assertNull(KeyStoreWrapper.load(env.configDir())); } public void testOverwritePromptDefault() throws Exception { diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/AddStringKeyStoreCommandTests.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/AddStringKeyStoreCommandTests.java index b80e60925c2a6..059c74ed8971c 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/AddStringKeyStoreCommandTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/AddStringKeyStoreCommandTests.java @@ -101,7 +101,7 @@ public void testMissingPromptCreateWithoutPasswordWithoutPromptIfForced() throws public void testMissingNoCreate() throws Exception { terminal.addTextInput("n"); // explicit no execute("foo"); - assertNull(KeyStoreWrapper.load(env.configFile())); + assertNull(KeyStoreWrapper.load(env.configDir())); } public void testOverwritePromptDefault() throws Exception { @@ -161,7 +161,7 @@ public void testForceNonExistent() throws Exception { public void testPromptForValue() throws Exception { String password = "keystorepassword"; - KeyStoreWrapper.create().save(env.configFile(), password.toCharArray()); + KeyStoreWrapper.create().save(env.configDir(), password.toCharArray()); terminal.addSecretInput(password); terminal.addSecretInput("secret value"); execute("foo"); @@ -170,7 +170,7 @@ public void testPromptForValue() throws Exception { public void testPromptForMultipleValues() throws Exception { final String password = "keystorepassword"; - KeyStoreWrapper.create().save(env.configFile(), password.toCharArray()); + KeyStoreWrapper.create().save(env.configDir(), password.toCharArray()); terminal.addSecretInput(password); terminal.addSecretInput("bar1"); terminal.addSecretInput("bar2"); @@ -183,7 +183,7 @@ public void testPromptForMultipleValues() throws Exception { public void testStdinShort() throws Exception { String password = "keystorepassword"; - KeyStoreWrapper.create().save(env.configFile(), password.toCharArray()); + KeyStoreWrapper.create().save(env.configDir(), password.toCharArray()); terminal.addSecretInput(password); setInput("secret value 1"); execute("-x", "foo"); @@ -192,7 +192,7 @@ public void testStdinShort() throws Exception { public void testStdinLong() throws Exception { String password = "keystorepassword"; - KeyStoreWrapper.create().save(env.configFile(), password.toCharArray()); + KeyStoreWrapper.create().save(env.configDir(), password.toCharArray()); terminal.addSecretInput(password); setInput("secret value 2"); execute("--stdin", "foo"); @@ -201,7 +201,7 @@ public void testStdinLong() throws Exception { public void testStdinNoInput() throws Exception { String password = "keystorepassword"; - KeyStoreWrapper.create().save(env.configFile(), password.toCharArray()); + KeyStoreWrapper.create().save(env.configDir(), password.toCharArray()); terminal.addSecretInput(password); setInput(""); execute("-x", "foo"); @@ -210,7 +210,7 @@ public void testStdinNoInput() throws Exception { public void testStdinInputWithLineBreaks() throws Exception { String password = "keystorepassword"; - KeyStoreWrapper.create().save(env.configFile(), password.toCharArray()); + KeyStoreWrapper.create().save(env.configDir(), password.toCharArray()); terminal.addSecretInput(password); setInput("Typedthisandhitenter\n"); execute("-x", "foo"); @@ -219,7 +219,7 @@ public void testStdinInputWithLineBreaks() throws Exception { public void testStdinInputWithCarriageReturn() throws Exception { String password = "keystorepassword"; - KeyStoreWrapper.create().save(env.configFile(), password.toCharArray()); + KeyStoreWrapper.create().save(env.configDir(), password.toCharArray()); terminal.addSecretInput(password); setInput("Typedthisandhitenter\r"); execute("-x", "foo"); @@ -228,7 +228,7 @@ public void testStdinInputWithCarriageReturn() throws Exception { public void testStdinWithMultipleValues() throws Exception { final String password = "keystorepassword"; - KeyStoreWrapper.create().save(env.configFile(), password.toCharArray()); + KeyStoreWrapper.create().save(env.configDir(), password.toCharArray()); terminal.addSecretInput(password); setInput("bar1\nbar2\nbar3"); execute(randomFrom("-x", "--stdin"), "foo1", "foo2", "foo3"); @@ -239,7 +239,7 @@ public void testStdinWithMultipleValues() throws Exception { public void testAddUtf8String() throws Exception { String password = "keystorepassword"; - KeyStoreWrapper.create().save(env.configFile(), password.toCharArray()); + KeyStoreWrapper.create().save(env.configDir(), password.toCharArray()); terminal.addSecretInput(password); final int stringSize = randomIntBetween(8, 16); try (CharArrayWriter secretChars = new CharArrayWriter(stringSize)) { diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/CreateKeyStoreCommandTests.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/CreateKeyStoreCommandTests.java index 7d07208de766e..11bfc26e2425c 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/CreateKeyStoreCommandTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/CreateKeyStoreCommandTests.java @@ -67,7 +67,7 @@ public void testNotMatchingPasswords() throws Exception { public void testDefaultNotPromptForPassword() throws Exception { execute(); - Path configDir = env.configFile(); + Path configDir = env.configDir(); assertNotNull(KeyStoreWrapper.load(configDir)); } @@ -76,7 +76,7 @@ public void testPosix() throws Exception { terminal.addSecretInput(password); terminal.addSecretInput(password); execute(); - Path configDir = env.configFile(); + Path configDir = env.configDir(); assertNotNull(KeyStoreWrapper.load(configDir)); } @@ -86,13 +86,13 @@ public void testNotPosix() throws Exception { terminal.addSecretInput(password); env = setupEnv(false, fileSystems); execute(); - Path configDir = env.configFile(); + Path configDir = env.configDir(); assertNotNull(KeyStoreWrapper.load(configDir)); } public void testOverwrite() throws Exception { String password = randomFrom("", "keystorepassword"); - Path keystoreFile = KeyStoreWrapper.keystorePath(env.configFile()); + Path keystoreFile = KeyStoreWrapper.keystorePath(env.configDir()); byte[] content = "not a keystore".getBytes(StandardCharsets.UTF_8); Files.write(keystoreFile, content); @@ -108,6 +108,6 @@ public void testOverwrite() throws Exception { terminal.addSecretInput(password); terminal.addSecretInput(password); execute(); - assertNotNull(KeyStoreWrapper.load(env.configFile())); + assertNotNull(KeyStoreWrapper.load(env.configDir())); } } diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/KeyStoreCommandTestCase.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/KeyStoreCommandTestCase.java index aa31e07368fc2..32618923498ff 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/KeyStoreCommandTestCase.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/KeyStoreCommandTestCase.java @@ -92,12 +92,12 @@ KeyStoreWrapper createKeystore(String password, String... settings) throws Excep for (int i = 0; i < settings.length; i += 2) { keystore.setString(settings[i], settings[i + 1].toCharArray()); } - keystore.save(env.configFile(), password.toCharArray()); + keystore.save(env.configDir(), password.toCharArray()); return keystore; } KeyStoreWrapper loadKeystore(String password) throws Exception { - KeyStoreWrapper keystore = KeyStoreWrapper.load(env.configFile()); + KeyStoreWrapper keystore = KeyStoreWrapper.load(env.configDir()); keystore.decrypt(password.toCharArray()); return keystore; } diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/KeyStoreWrapperTests.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/KeyStoreWrapperTests.java index 2688e7637c9ba..70046c567b00e 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/KeyStoreWrapperTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/KeyStoreWrapperTests.java @@ -103,8 +103,8 @@ public void testFileSettingExhaustiveBytes() throws Exception { bytes[i] = (byte) i; } keystore.setFile("foo", bytes); - keystore.save(env.configFile(), new char[0]); - keystore = KeyStoreWrapper.load(env.configFile()); + keystore.save(env.configDir(), new char[0]); + keystore = KeyStoreWrapper.load(env.configDir()); keystore.decrypt(new char[0]); try (InputStream stream = keystore.getFile("foo")) { for (int i = 0; i < 256; ++i) { @@ -125,11 +125,11 @@ public void testCreate() throws Exception { public void testDecryptKeyStoreWithWrongPassword() throws Exception { KeyStoreWrapper keystore = KeyStoreWrapper.create(); - keystore.save(env.configFile(), new char[0]); - final KeyStoreWrapper loadedkeystore = KeyStoreWrapper.load(env.configFile()); + keystore.save(env.configDir(), new char[0]); + final KeyStoreWrapper loadedKeystore = KeyStoreWrapper.load(env.configDir()); final SecurityException exception = expectThrows( SecurityException.class, - () -> loadedkeystore.decrypt(new char[] { 'i', 'n', 'v', 'a', 'l', 'i', 'd' }) + () -> loadedKeystore.decrypt(new char[] { 'i', 'n', 'v', 'a', 'l', 'i', 'd' }) ); if (inFipsJvm()) { assertThat( @@ -183,17 +183,17 @@ public void testValueSHA256Digest() throws Exception { public void testUpgradeNoop() throws Exception { KeyStoreWrapper keystore = KeyStoreWrapper.create(); SecureString seed = keystore.getString(KeyStoreWrapper.SEED_SETTING.getKey()); - keystore.save(env.configFile(), new char[0]); + keystore.save(env.configDir(), new char[0]); // upgrade does not overwrite seed - KeyStoreWrapper.upgrade(keystore, env.configFile(), new char[0]); + KeyStoreWrapper.upgrade(keystore, env.configDir(), new char[0]); assertEquals(seed.toString(), keystore.getString(KeyStoreWrapper.SEED_SETTING.getKey()).toString()); - keystore = KeyStoreWrapper.load(env.configFile()); + keystore = KeyStoreWrapper.load(env.configDir()); keystore.decrypt(new char[0]); assertEquals(seed.toString(), keystore.getString(KeyStoreWrapper.SEED_SETTING.getKey()).toString()); } public void testFailWhenCannotConsumeSecretStream() throws Exception { - Path configDir = env.configFile(); + Path configDir = env.configDir(); NIOFSDirectory directory = new NIOFSDirectory(configDir); try (IndexOutput indexOutput = directory.createOutput("opensearch.keystore", IOContext.DEFAULT)) { CodecUtil.writeHeader(indexOutput, "opensearch.keystore", 3); @@ -221,7 +221,7 @@ public void testFailWhenCannotConsumeSecretStream() throws Exception { } public void testFailWhenCannotConsumeEncryptedBytesStream() throws Exception { - Path configDir = env.configFile(); + Path configDir = env.configDir(); NIOFSDirectory directory = new NIOFSDirectory(configDir); try (IndexOutput indexOutput = directory.createOutput("opensearch.keystore", IOContext.DEFAULT)) { CodecUtil.writeHeader(indexOutput, "opensearch.keystore", 3); @@ -250,7 +250,7 @@ public void testFailWhenCannotConsumeEncryptedBytesStream() throws Exception { } public void testFailWhenSecretStreamNotConsumed() throws Exception { - Path configDir = env.configFile(); + Path configDir = env.configDir(); NIOFSDirectory directory = new NIOFSDirectory(configDir); try (IndexOutput indexOutput = directory.createOutput("opensearch.keystore", IOContext.DEFAULT)) { CodecUtil.writeHeader(indexOutput, "opensearch.keystore", 3); @@ -277,7 +277,7 @@ public void testFailWhenSecretStreamNotConsumed() throws Exception { } public void testFailWhenEncryptedBytesStreamIsNotConsumed() throws Exception { - Path configDir = env.configFile(); + Path configDir = env.configDir(); NIOFSDirectory directory = new NIOFSDirectory(configDir); try (IndexOutput indexOutput = directory.createOutput("opensearch.keystore", IOContext.DEFAULT)) { CodecUtil.writeHeader(indexOutput, "opensearch.keystore", 3); @@ -343,11 +343,11 @@ private void possiblyAlterEncryptedBytes( public void testUpgradeAddsSeed() throws Exception { KeyStoreWrapper keystore = KeyStoreWrapper.create(); keystore.remove(KeyStoreWrapper.SEED_SETTING.getKey()); - keystore.save(env.configFile(), new char[0]); - KeyStoreWrapper.upgrade(keystore, env.configFile(), new char[0]); + keystore.save(env.configDir(), new char[0]); + KeyStoreWrapper.upgrade(keystore, env.configDir(), new char[0]); SecureString seed = keystore.getString(KeyStoreWrapper.SEED_SETTING.getKey()); assertNotNull(seed); - keystore = KeyStoreWrapper.load(env.configFile()); + keystore = KeyStoreWrapper.load(env.configDir()); keystore.decrypt(new char[0]); assertEquals(seed.toString(), keystore.getString(KeyStoreWrapper.SEED_SETTING.getKey()).toString()); } @@ -364,7 +364,7 @@ public void testIllegalSettingName() throws Exception { public void testBackcompatV1() throws Exception { assumeFalse("Can't run in a FIPS JVM as PBE is not available", inFipsJvm()); - Path configDir = env.configFile(); + Path configDir = env.configDir(); NIOFSDirectory directory = new NIOFSDirectory(configDir); try (IndexOutput output = EndiannessReverserUtil.createOutput(directory, "opensearch.keystore", IOContext.DEFAULT)) { CodecUtil.writeHeader(output, "opensearch.keystore", 1); @@ -395,7 +395,7 @@ public void testBackcompatV1() throws Exception { public void testBackcompatV2() throws Exception { assumeFalse("Can't run in a FIPS JVM as PBE is not available", inFipsJvm()); - Path configDir = env.configFile(); + Path configDir = env.configDir(); NIOFSDirectory directory = new NIOFSDirectory(configDir); byte[] fileBytes = new byte[20]; random().nextBytes(fileBytes); @@ -457,10 +457,10 @@ public void testStringAndFileDistinction() throws Exception { final Path temp = createTempDir(); Files.write(temp.resolve("file_setting"), "file_value".getBytes(StandardCharsets.UTF_8)); wrapper.setFile("file_setting", Files.readAllBytes(temp.resolve("file_setting"))); - wrapper.save(env.configFile(), new char[0]); + wrapper.save(env.configDir(), new char[0]); wrapper.close(); - final KeyStoreWrapper afterSave = KeyStoreWrapper.load(env.configFile()); + final KeyStoreWrapper afterSave = KeyStoreWrapper.load(env.configDir()); assertNotNull(afterSave); afterSave.decrypt(new char[0]); assertThat(afterSave.getSettingNames(), equalTo(new HashSet<>(Arrays.asList("keystore.seed", "string_setting", "file_setting")))); diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/UpgradeKeyStoreCommandTests.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/UpgradeKeyStoreCommandTests.java index 8dd855ae6cf49..0fda83282c1f9 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/UpgradeKeyStoreCommandTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/UpgradeKeyStoreCommandTests.java @@ -63,7 +63,7 @@ protected Environment createEnv(final Map settings) { @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/468") public void testKeystoreUpgrade() throws Exception { - final Path keystore = KeyStoreWrapper.keystorePath(env.configFile()); + final Path keystore = KeyStoreWrapper.keystorePath(env.configDir()); try ( InputStream is = KeyStoreWrapperTests.class.getResourceAsStream("/format-v3-opensearch.keystore"); OutputStream os = Files.newOutputStream(keystore) @@ -74,12 +74,12 @@ public void testKeystoreUpgrade() throws Exception { os.write(buffer, 0, read); } } - try (KeyStoreWrapper beforeUpgrade = KeyStoreWrapper.load(env.configFile())) { + try (KeyStoreWrapper beforeUpgrade = KeyStoreWrapper.load(env.configDir())) { assertNotNull(beforeUpgrade); assertThat(beforeUpgrade.getFormatVersion(), equalTo(3)); } execute(); - try (KeyStoreWrapper afterUpgrade = KeyStoreWrapper.load(env.configFile())) { + try (KeyStoreWrapper afterUpgrade = KeyStoreWrapper.load(env.configDir())) { assertNotNull(afterUpgrade); assertThat(afterUpgrade.getFormatVersion(), equalTo(KeyStoreWrapper.FORMAT_VERSION)); afterUpgrade.decrypt(new char[0]); @@ -89,7 +89,7 @@ public void testKeystoreUpgrade() throws Exception { public void testKeystoreDoesNotExist() { final UserException e = expectThrows(UserException.class, this::execute); - assertThat(e, hasToString(containsString("keystore not found at [" + KeyStoreWrapper.keystorePath(env.configFile()) + "]"))); + assertThat(e, hasToString(containsString("keystore not found at [" + KeyStoreWrapper.keystorePath(env.configDir()) + "]"))); } } diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java index 8acf137043a92..86b44799eba68 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java @@ -269,8 +269,8 @@ void execute(Terminal terminal, List pluginIds, boolean isBatch, Environ final List deleteOnFailure = new ArrayList<>(); deleteOnFailures.put(pluginId, deleteOnFailure); - final Path pluginZip = download(terminal, pluginId, env.tmpFile(), isBatch); - final Path extractedZip = unzip(pluginZip, env.pluginsFile()); + final Path pluginZip = download(terminal, pluginId, env.tmpDir(), isBatch); + final Path extractedZip = unzip(pluginZip, env.pluginsDir()); deleteOnFailure.add(extractedZip); final PluginInfo pluginInfo = installPlugin(terminal, isBatch, extractedZip, env, deleteOnFailure); terminal.println("-> Installed " + pluginInfo.getName() + " with folder name " + pluginInfo.getTargetFolderName()); @@ -815,14 +815,14 @@ private PluginInfo loadPluginInfo(Terminal terminal, Path pluginRoot, Environmen PluginsService.verifyCompatibility(info); // checking for existing version of the plugin - verifyPluginName(env.pluginsFile(), info.getName()); + verifyPluginName(env.pluginsDir(), info.getName()); - PluginsService.checkForFailedPluginRemovals(env.pluginsFile()); + PluginsService.checkForFailedPluginRemovals(env.pluginsDir()); terminal.println(VERBOSE, info.toString()); // check for jar hell before any copying - jarHellCheck(info, pluginRoot, env.pluginsFile(), env.modulesFile()); + jarHellCheck(info, pluginRoot, env.pluginsDir(), env.modulesDir()); return info; } @@ -872,21 +872,21 @@ private PluginInfo installPlugin(Terminal terminal, boolean isBatch, Path tmpRoo Path policy = tmpRoot.resolve(PluginInfo.OPENSEARCH_PLUGIN_POLICY); final Set permissions; if (Files.exists(policy)) { - permissions = PluginSecurity.parsePermissions(policy, env.tmpFile()); + permissions = PluginSecurity.parsePermissions(policy, env.tmpDir()); } else { permissions = Collections.emptySet(); } PluginSecurity.confirmPolicyExceptions(terminal, permissions, isBatch); String targetFolderName = info.getTargetFolderName(); - final Path destination = env.pluginsFile().resolve(targetFolderName); + final Path destination = env.pluginsDir().resolve(targetFolderName); deleteOnFailure.add(destination); installPluginSupportFiles( info, tmpRoot, - env.binFile().resolve(targetFolderName), - env.configFile().resolve(targetFolderName), + env.binDir().resolve(targetFolderName), + env.configDir().resolve(targetFolderName), deleteOnFailure ); movePlugin(tmpRoot, destination); diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ListPluginsCommand.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ListPluginsCommand.java index ecf702c4675de..d269603656114 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ListPluginsCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ListPluginsCommand.java @@ -57,13 +57,13 @@ class ListPluginsCommand extends EnvironmentAwareCommand { @Override protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { - if (Files.exists(env.pluginsFile()) == false) { - throw new IOException("Plugins directory missing: " + env.pluginsFile()); + if (Files.exists(env.pluginsDir()) == false) { + throw new IOException("Plugins directory missing: " + env.pluginsDir()); } - terminal.println(Terminal.Verbosity.VERBOSE, "Plugins directory: " + env.pluginsFile()); + terminal.println(Terminal.Verbosity.VERBOSE, "Plugins directory: " + env.pluginsDir()); final List plugins = new ArrayList<>(); - try (DirectoryStream paths = Files.newDirectoryStream(env.pluginsFile())) { + try (DirectoryStream paths = Files.newDirectoryStream(env.pluginsDir())) { for (Path plugin : paths) { plugins.add(plugin); } @@ -75,7 +75,7 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th } private void printPlugin(Environment env, Terminal terminal, Path plugin, String prefix) throws IOException { - PluginInfo info = PluginInfo.readFromProperties(env.pluginsFile().resolve(plugin)); + PluginInfo info = PluginInfo.readFromProperties(env.pluginsDir().resolve(plugin)); terminal.println(Terminal.Verbosity.SILENT, prefix + info.getName()); terminal.println(Terminal.Verbosity.VERBOSE, info.toString(prefix)); if (info.getOpenSearchVersion().equals(Version.CURRENT) == false) { diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/RemovePluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/RemovePluginCommand.java index fb567e6609ba9..8fc98e5e14607 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/RemovePluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/RemovePluginCommand.java @@ -99,7 +99,7 @@ void execute(Terminal terminal, Environment env, String pluginName, boolean purg // first make sure nothing extends this plugin List usedBy = new ArrayList<>(); - Set bundles = PluginsService.getPluginBundles(env.pluginsFile()); + Set bundles = PluginsService.getPluginBundles(env.pluginsDir()); for (PluginsService.Bundle bundle : bundles) { for (String extendedPlugin : bundle.plugin.getExtendedPlugins()) { if (extendedPlugin.equals(pluginName)) { @@ -114,9 +114,9 @@ void execute(Terminal terminal, Environment env, String pluginName, boolean purg ); } - Path pluginDir = env.pluginsFile().resolve(pluginName); - Path pluginConfigDir = env.configFile().resolve(pluginName); - Path removing = env.pluginsFile().resolve(".removing-" + pluginName); + Path pluginDir = env.pluginsDir().resolve(pluginName); + Path pluginConfigDir = env.configDir().resolve(pluginName); + Path removing = env.pluginsDir().resolve(".removing-" + pluginName); /* * If the plugin directory is not found with the plugin name, scan the list of all installed plugins @@ -124,9 +124,9 @@ void execute(Terminal terminal, Environment env, String pluginName, boolean purg */ if (!Files.exists(pluginDir)) { terminal.println("searching in other folders to find if plugin exists with custom folder name"); - pluginDir = PluginHelper.verifyIfPluginExists(env.pluginsFile(), pluginName); - pluginConfigDir = env.configFile().resolve(pluginDir.getFileName()); - removing = env.pluginsFile().resolve(".removing-" + pluginDir.getFileName()); + pluginDir = PluginHelper.verifyIfPluginExists(env.pluginsDir(), pluginName); + pluginConfigDir = env.configDir().resolve(pluginDir.getFileName()); + removing = env.pluginsDir().resolve(".removing-" + pluginDir.getFileName()); } terminal.println("-> removing [" + pluginName + "]..."); @@ -158,7 +158,7 @@ void execute(Terminal terminal, Environment env, String pluginName, boolean purg terminal.println(VERBOSE, "removing [" + pluginDir + "]"); } - final Path pluginBinDir = env.binFile().resolve(pluginName); + final Path pluginBinDir = env.binDir().resolve(pluginName); if (Files.exists(pluginBinDir)) { if (!Files.isDirectory(pluginBinDir)) { throw new UserException(ExitCodes.IO_ERROR, "bin dir for " + pluginName + " is not a directory"); diff --git a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/InstallPluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/InstallPluginCommandTests.java index c1b4568759f4d..e4f477d78c16b 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/InstallPluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/InstallPluginCommandTests.java @@ -317,7 +317,7 @@ void installPlugins(final List pluginUrls, final Path home, final Instal } void assertPlugin(String name, Path original, Environment env) throws IOException { - assertPluginInternal(name, env.pluginsFile(), original); + assertPluginInternal(name, env.pluginsDir(), original); assertConfigAndBin(name, original, env); assertInstallCleaned(env); } @@ -353,12 +353,12 @@ void assertPluginInternal(String name, Path pluginsFile, Path originalPlugin) th void assertConfigAndBin(String name, Path original, Environment env) throws IOException { if (Files.exists(original.resolve("bin"))) { - Path binDir = env.binFile().resolve(name); + Path binDir = env.binDir().resolve(name); assertTrue("bin dir exists", Files.exists(binDir)); assertTrue("bin is a dir", Files.isDirectory(binDir)); PosixFileAttributes binAttributes = null; if (isPosix) { - binAttributes = Files.readAttributes(env.binFile(), PosixFileAttributes.class); + binAttributes = Files.readAttributes(env.binDir(), PosixFileAttributes.class); } try (DirectoryStream stream = Files.newDirectoryStream(binDir)) { for (Path file : stream) { @@ -371,7 +371,7 @@ void assertConfigAndBin(String name, Path original, Environment env) throws IOEx } } if (Files.exists(original.resolve("config"))) { - Path configDir = env.configFile().resolve(name); + Path configDir = env.configDir().resolve(name); assertTrue("config dir exists", Files.exists(configDir)); assertTrue("config is a dir", Files.isDirectory(configDir)); @@ -379,7 +379,7 @@ void assertConfigAndBin(String name, Path original, Environment env) throws IOEx GroupPrincipal group = null; if (isPosix) { - PosixFileAttributes configAttributes = Files.getFileAttributeView(env.configFile(), PosixFileAttributeView.class) + PosixFileAttributes configAttributes = Files.getFileAttributeView(env.configDir(), PosixFileAttributeView.class) .readAttributes(); user = configAttributes.owner(); group = configAttributes.group(); @@ -408,7 +408,7 @@ void assertConfigAndBin(String name, Path original, Environment env) throws IOEx } void assertInstallCleaned(Environment env) throws IOException { - try (DirectoryStream stream = Files.newDirectoryStream(env.pluginsFile())) { + try (DirectoryStream stream = Files.newDirectoryStream(env.pluginsDir())) { for (Path file : stream) { if (file.getFileName().toString().startsWith(".installing")) { fail("Installation dir still exists, " + file); @@ -458,7 +458,7 @@ public void testTransaction() throws Exception { () -> installPlugins(Arrays.asList(pluginZip, pluginZip + "does-not-exist"), env.v1()) ); assertThat(e, hasToString(containsString("does-not-exist"))); - final Path fakeInstallPath = env.v2().pluginsFile().resolve("fake"); + final Path fakeInstallPath = env.v2().pluginsDir().resolve("fake"); // fake should have been removed when the file not found exception occurred assertFalse(Files.exists(fakeInstallPath)); assertInstallCleaned(env.v2()); @@ -468,7 +468,7 @@ public void testInstallFailsIfPreviouslyRemovedPluginFailed() throws Exception { Tuple env = createEnv(fs, temp); Path pluginDir = createPluginDir(temp); String pluginZip = createPluginUrl("fake", pluginDir); - final Path removing = env.v2().pluginsFile().resolve(".removing-failed"); + final Path removing = env.v2().pluginsDir().resolve(".removing-failed"); Files.createDirectory(removing); final IllegalStateException e = expectThrows(IllegalStateException.class, () -> installPlugin(pluginZip, env.v1())); final String expected = String.format( @@ -520,11 +520,11 @@ public void testPluginsDirReadOnly() throws Exception { assumeTrue("posix and filesystem", isPosix && isReal); Tuple env = createEnv(fs, temp); Path pluginDir = createPluginDir(temp); - try (PosixPermissionsResetter pluginsAttrs = new PosixPermissionsResetter(env.v2().pluginsFile())) { + try (PosixPermissionsResetter pluginsAttrs = new PosixPermissionsResetter(env.v2().pluginsDir())) { pluginsAttrs.setPermissions(new HashSet<>()); String pluginZip = createPluginUrl("fake", pluginDir); IOException e = expectThrows(IOException.class, () -> installPlugin(pluginZip, env.v1())); - assertTrue(e.getMessage(), e.getMessage().contains(env.v2().pluginsFile().toString())); + assertTrue(e.getMessage(), e.getMessage().contains(env.v2().pluginsDir().toString())); } assertInstallCleaned(env.v2()); } @@ -629,7 +629,7 @@ public void testBinConflict() throws Exception { Files.createFile(binDir.resolve("somescript")); String pluginZip = createPluginUrl("opensearch", pluginDir); FileAlreadyExistsException e = expectThrows(FileAlreadyExistsException.class, () -> installPlugin(pluginZip, env.v1())); - assertTrue(e.getMessage(), e.getMessage().contains(env.v2().binFile().resolve("opensearch").toString())); + assertTrue(e.getMessage(), e.getMessage().contains(env.v2().binDir().resolve("opensearch").toString())); assertInstallCleaned(env.v2()); } @@ -641,7 +641,7 @@ public void testBinPermissions() throws Exception { Files.createDirectory(binDir); Files.createFile(binDir.resolve("somescript")); String pluginZip = createPluginUrl("fake", pluginDir); - try (PosixPermissionsResetter binAttrs = new PosixPermissionsResetter(env.v2().binFile())) { + try (PosixPermissionsResetter binAttrs = new PosixPermissionsResetter(env.v2().binDir())) { Set perms = binAttrs.getCopyPermissions(); // make sure at least one execute perm is missing, so we know we forced it during installation perms.remove(PosixFilePermission.GROUP_EXECUTE); @@ -672,7 +672,7 @@ public void testPluginPermissions() throws Exception { installPlugin(pluginZip, env.v1()); assertPlugin("fake", pluginDir, env.v2()); - final Path fake = env.v2().pluginsFile().resolve("fake"); + final Path fake = env.v2().pluginsDir().resolve("fake"); final Path resources = fake.resolve("resources"); final Path platform = fake.resolve("platform"); final Path platformName = platform.resolve("linux-x64"); @@ -725,7 +725,7 @@ public void testConfig() throws Exception { public void testExistingConfig() throws Exception { Tuple env = createEnv(fs, temp); - Path envConfigDir = env.v2().configFile().resolve("fake"); + Path envConfigDir = env.v2().configDir().resolve("fake"); Files.createDirectories(envConfigDir); Files.write(envConfigDir.resolve("custom.yml"), "existing config".getBytes(StandardCharsets.UTF_8)); Path pluginDir = createPluginDir(temp); @@ -902,7 +902,7 @@ public void testPluginAlreadyInstalled() throws Exception { e.getMessage(), equalTo( "plugin directory [" - + env.v2().pluginsFile().resolve("fake") + + env.v2().pluginsDir().resolve("fake") + "] already exists; " + "if you need to update the plugin, uninstall it first using command 'remove fake'" ) @@ -1493,7 +1493,7 @@ private void assertPolicyConfirmation(Tuple env, String plugi assertEquals("installation aborted by user", e.getMessage()); assertThat(terminal.getErrorOutput(), containsString("WARNING: " + warning)); - try (Stream fileStream = Files.list(env.v2().pluginsFile())) { + try (Stream fileStream = Files.list(env.v2().pluginsDir())) { assertThat(fileStream.collect(Collectors.toList()), empty()); } @@ -1506,7 +1506,7 @@ private void assertPolicyConfirmation(Tuple env, String plugi e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); assertEquals("installation aborted by user", e.getMessage()); assertThat(terminal.getErrorOutput(), containsString("WARNING: " + warning)); - try (Stream fileStream = Files.list(env.v2().pluginsFile())) { + try (Stream fileStream = Files.list(env.v2().pluginsDir())) { assertThat(fileStream.collect(Collectors.toList()), empty()); } } diff --git a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/ListPluginsCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/ListPluginsCommandTests.java index 376e470159731..d84f36d818046 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/ListPluginsCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/ListPluginsCommandTests.java @@ -111,7 +111,7 @@ private static void buildFakePlugin( final boolean hasNativeController ) throws IOException { PluginTestUtil.writePluginProperties( - env.pluginsFile().resolve(name), + env.pluginsDir().resolve(name), "description", description, "name", @@ -132,9 +132,9 @@ private static void buildFakePlugin( } public void testPluginsDirMissing() throws Exception { - Files.delete(env.pluginsFile()); + Files.delete(env.pluginsDir()); IOException e = expectThrows(IOException.class, () -> listPlugins(home)); - assertEquals("Plugins directory missing: " + env.pluginsFile(), e.getMessage()); + assertEquals("Plugins directory missing: " + env.pluginsDir(), e.getMessage()); } public void testNoPlugins() throws Exception { @@ -161,7 +161,7 @@ public void testPluginWithVerbose() throws Exception { MockTerminal terminal = listPlugins(home, params); assertEquals( buildMultiline( - "Plugins directory: " + env.pluginsFile(), + "Plugins directory: " + env.pluginsDir(), "fake_plugin", "- Plugin information:", "Name: fake_plugin", @@ -184,7 +184,7 @@ public void testPluginWithNativeController() throws Exception { MockTerminal terminal = listPlugins(home, params); assertEquals( buildMultiline( - "Plugins directory: " + env.pluginsFile(), + "Plugins directory: " + env.pluginsDir(), "fake_plugin1", "- Plugin information:", "Name: fake_plugin1", @@ -208,7 +208,7 @@ public void testPluginWithVerboseMultiplePlugins() throws Exception { MockTerminal terminal = listPlugins(home, params); assertEquals( buildMultiline( - "Plugins directory: " + env.pluginsFile(), + "Plugins directory: " + env.pluginsDir(), "fake_plugin1", "- Plugin information:", "Name: fake_plugin1", @@ -245,14 +245,14 @@ public void testPluginWithoutVerboseMultiplePlugins() throws Exception { } public void testPluginWithoutDescriptorFile() throws Exception { - final Path pluginDir = env.pluginsFile().resolve("fake1"); + final Path pluginDir = env.pluginsDir().resolve("fake1"); Files.createDirectories(pluginDir); NoSuchFileException e = expectThrows(NoSuchFileException.class, () -> listPlugins(home)); assertEquals(pluginDir.resolve(PluginInfo.OPENSEARCH_PLUGIN_PROPERTIES).toString(), e.getFile()); } public void testPluginWithWrongDescriptorFile() throws Exception { - final Path pluginDir = env.pluginsFile().resolve("fake1"); + final Path pluginDir = env.pluginsDir().resolve("fake1"); PluginTestUtil.writePluginProperties(pluginDir, "description", "fake desc"); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> listPlugins(home)); final Path descriptorPath = pluginDir.resolve(PluginInfo.OPENSEARCH_PLUGIN_PROPERTIES); @@ -261,7 +261,7 @@ public void testPluginWithWrongDescriptorFile() throws Exception { public void testExistingIncompatiblePlugin() throws Exception { PluginTestUtil.writePluginProperties( - env.pluginsFile().resolve("fake_plugin1"), + env.pluginsDir().resolve("fake_plugin1"), "description", "fake desc 1", "name", diff --git a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/RemovePluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/RemovePluginCommandTests.java index 8f9aa27be7e84..ab23dfad75683 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/RemovePluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/RemovePluginCommandTests.java @@ -93,11 +93,11 @@ public void setUp() throws Exception { } void createPlugin(String name, String... additionalProps) throws IOException { - createPlugin(env.pluginsFile(), name, Version.CURRENT, additionalProps); + createPlugin(env.pluginsDir(), name, Version.CURRENT, additionalProps); } void createPlugin(String name, Version version) throws IOException { - createPlugin(env.pluginsFile(), name, version); + createPlugin(env.pluginsDir(), name, version); } void createPlugin(Path path, String name, Version version, String... additionalProps) throws IOException { @@ -130,7 +130,7 @@ static MockTerminal removePlugin(String name, Path home, boolean purge) throws E } static void assertRemoveCleaned(Environment env) throws IOException { - try (DirectoryStream stream = Files.newDirectoryStream(env.pluginsFile())) { + try (DirectoryStream stream = Files.newDirectoryStream(env.pluginsDir())) { for (Path file : stream) { if (file.getFileName().toString().startsWith(".removing")) { fail("Removal dir still exists, " + file); @@ -147,23 +147,23 @@ public void testMissing() throws Exception { public void testBasic() throws Exception { createPlugin("fake"); - Files.createFile(env.pluginsFile().resolve("fake").resolve("plugin.jar")); - Files.createDirectory(env.pluginsFile().resolve("fake").resolve("subdir")); + Files.createFile(env.pluginsDir().resolve("fake").resolve("plugin.jar")); + Files.createDirectory(env.pluginsDir().resolve("fake").resolve("subdir")); createPlugin("other"); removePlugin("fake", home, randomBoolean()); - assertFalse(Files.exists(env.pluginsFile().resolve("fake"))); - assertTrue(Files.exists(env.pluginsFile().resolve("other"))); + assertFalse(Files.exists(env.pluginsDir().resolve("fake"))); + assertTrue(Files.exists(env.pluginsDir().resolve("other"))); assertRemoveCleaned(env); } public void testRemovePluginWithCustomFolderName() throws Exception { createPlugin("fake", "custom.foldername", "custom-folder"); - Files.createFile(env.pluginsFile().resolve("custom-folder").resolve("plugin.jar")); - Files.createDirectory(env.pluginsFile().resolve("custom-folder").resolve("subdir")); + Files.createFile(env.pluginsDir().resolve("custom-folder").resolve("plugin.jar")); + Files.createDirectory(env.pluginsDir().resolve("custom-folder").resolve("subdir")); createPlugin("other"); removePlugin("fake", home, randomBoolean()); - assertFalse(Files.exists(env.pluginsFile().resolve("custom-folder"))); - assertTrue(Files.exists(env.pluginsFile().resolve("other"))); + assertFalse(Files.exists(env.pluginsDir().resolve("custom-folder"))); + assertTrue(Files.exists(env.pluginsDir().resolve("other"))); assertRemoveCleaned(env); } @@ -177,62 +177,62 @@ public void testRemoveOldVersion() throws Exception { ) ); removePlugin("fake", home, randomBoolean()); - assertThat(Files.exists(env.pluginsFile().resolve("fake")), equalTo(false)); + assertThat(Files.exists(env.pluginsDir().resolve("fake")), equalTo(false)); assertRemoveCleaned(env); } public void testBin() throws Exception { createPlugin("fake"); - Path binDir = env.binFile().resolve("fake"); + Path binDir = env.binDir().resolve("fake"); Files.createDirectories(binDir); Files.createFile(binDir.resolve("somescript")); removePlugin("fake", home, randomBoolean()); - assertFalse(Files.exists(env.pluginsFile().resolve("fake"))); - assertTrue(Files.exists(env.binFile().resolve("opensearch"))); + assertFalse(Files.exists(env.pluginsDir().resolve("fake"))); + assertTrue(Files.exists(env.binDir().resolve("opensearch"))); assertFalse(Files.exists(binDir)); assertRemoveCleaned(env); } public void testBinNotDir() throws Exception { createPlugin("fake"); - Files.createFile(env.binFile().resolve("fake")); + Files.createFile(env.binDir().resolve("fake")); UserException e = expectThrows(UserException.class, () -> removePlugin("fake", home, randomBoolean())); assertTrue(e.getMessage(), e.getMessage().contains("not a directory")); - assertTrue(Files.exists(env.pluginsFile().resolve("fake"))); // did not remove - assertTrue(Files.exists(env.binFile().resolve("fake"))); + assertTrue(Files.exists(env.pluginsDir().resolve("fake"))); // did not remove + assertTrue(Files.exists(env.binDir().resolve("fake"))); assertRemoveCleaned(env); } public void testConfigDirPreserved() throws Exception { createPlugin("fake"); - final Path configDir = env.configFile().resolve("fake"); + final Path configDir = env.configDir().resolve("fake"); Files.createDirectories(configDir); Files.createFile(configDir.resolve("fake.yml")); final MockTerminal terminal = removePlugin("fake", home, false); - assertTrue(Files.exists(env.configFile().resolve("fake"))); + assertTrue(Files.exists(env.configDir().resolve("fake"))); assertThat(terminal.getOutput(), containsString(expectedConfigDirPreservedMessage(configDir))); assertRemoveCleaned(env); } public void testPurgePluginExists() throws Exception { createPlugin("fake"); - final Path configDir = env.configFile().resolve("fake"); + final Path configDir = env.configDir().resolve("fake"); if (randomBoolean()) { Files.createDirectories(configDir); Files.createFile(configDir.resolve("fake.yml")); } final MockTerminal terminal = removePlugin("fake", home, true); - assertFalse(Files.exists(env.configFile().resolve("fake"))); + assertFalse(Files.exists(env.configDir().resolve("fake"))); assertThat(terminal.getOutput(), not(containsString(expectedConfigDirPreservedMessage(configDir)))); assertRemoveCleaned(env); } public void testPurgePluginDoesNotExist() throws Exception { - final Path configDir = env.configFile().resolve("fake"); + final Path configDir = env.configDir().resolve("fake"); Files.createDirectories(configDir); Files.createFile(configDir.resolve("fake.yml")); final MockTerminal terminal = removePlugin("fake", home, true); - assertFalse(Files.exists(env.configFile().resolve("fake"))); + assertFalse(Files.exists(env.configDir().resolve("fake"))); assertThat(terminal.getOutput(), not(containsString(expectedConfigDirPreservedMessage(configDir)))); assertRemoveCleaned(env); } @@ -243,8 +243,8 @@ public void testPurgeNothingExists() throws Exception { } public void testPurgeOnlyMarkerFileExists() throws Exception { - final Path configDir = env.configFile().resolve("fake"); - final Path removing = env.pluginsFile().resolve(".removing-fake"); + final Path configDir = env.configDir().resolve("fake"); + final Path removing = env.pluginsDir().resolve(".removing-fake"); Files.createFile(removing); final MockTerminal terminal = removePlugin("fake", home, randomBoolean()); assertFalse(Files.exists(removing)); @@ -253,7 +253,7 @@ public void testPurgeOnlyMarkerFileExists() throws Exception { public void testNoConfigDirPreserved() throws Exception { createPlugin("fake"); - final Path configDir = env.configFile().resolve("fake"); + final Path configDir = env.configDir().resolve("fake"); final MockTerminal terminal = removePlugin("fake", home, randomBoolean()); assertThat(terminal.getOutput(), not(containsString(expectedConfigDirPreservedMessage(configDir)))); } @@ -293,8 +293,8 @@ public void testMissingPluginName() throws Exception { public void testRemoveWhenRemovingMarker() throws Exception { createPlugin("fake"); - Files.createFile(env.pluginsFile().resolve("fake").resolve("plugin.jar")); - Files.createFile(env.pluginsFile().resolve(".removing-fake")); + Files.createFile(env.pluginsDir().resolve("fake").resolve("plugin.jar")); + Files.createFile(env.pluginsDir().resolve(".removing-fake")); removePlugin("fake", home, randomBoolean()); } diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/TaskInput.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/TaskInput.java index dde84f3f0ebe8..2fbd5d9a0fa7c 100644 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/TaskInput.java +++ b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/TaskInput.java @@ -83,11 +83,11 @@ public void setEsHome(Path esHome) { } public Path getOpenSearchConfig() { - return openSearchEnv.configFile(); + return openSearchEnv.configDir(); } public Path getOpenSearchBin() { - return openSearchEnv.binFile(); + return openSearchEnv.binDir(); } public boolean isRunning() { diff --git a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/UpgradeCliTests.java b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/UpgradeCliTests.java index 3db782925a660..39fa2cef24bb1 100644 --- a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/UpgradeCliTests.java +++ b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/UpgradeCliTests.java @@ -121,7 +121,7 @@ private void assertYmlConfigImported() throws IOException { "path.logs: \"/var/log/eslogs\"" ) ); - List actualSettings = Files.readAllLines(env.configFile().resolve("opensearch.yml")) + List actualSettings = Files.readAllLines(env.configDir().resolve("opensearch.yml")) .stream() .filter(Objects::nonNull) .filter(line -> !line.isEmpty()) @@ -132,7 +132,7 @@ private void assertYmlConfigImported() throws IOException { private void assertKeystoreImported(String passwd) throws IOException, GeneralSecurityException { // assert keystore is created - KeyStoreWrapper keystore = KeyStoreWrapper.load(env.configFile()); + KeyStoreWrapper keystore = KeyStoreWrapper.load(env.configDir()); assertNotNull(keystore); // assert all keystore settings are imported @@ -148,13 +148,13 @@ private void assertKeystoreImported(String passwd) throws IOException, GeneralSe } private void assertJvmOptionsImported() throws IOException, GeneralSecurityException { - Path path = env.configFile().resolve("jvm.options.d"); + Path path = env.configDir().resolve("jvm.options.d"); assertThat(Files.exists(path), is(true)); assertThat(Files.isDirectory(path), is(true)); assertThat(Files.exists(path.resolve("test.options")), is(true)); } private void assertLog4jPropertiesImported() throws IOException, GeneralSecurityException { - assertThat(Files.exists(env.configFile().resolve("log4j2.properties")), is(true)); + assertThat(Files.exists(env.configDir().resolve("log4j2.properties")), is(true)); } } diff --git a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ValidateInputTaskTests.java b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ValidateInputTaskTests.java index f72e49d5961bf..07cb19b132f31 100644 --- a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ValidateInputTaskTests.java +++ b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ValidateInputTaskTests.java @@ -64,6 +64,6 @@ public void testGetSummaryFields() { assertThat(summary.get("Elasticsearch Version"), is("7.10.2")); assertThat(summary.get("Elasticsearch Plugins"), is("[plugin-1, plugin-2]")); assertThat(summary.get("Elasticsearch Config"), is("es_home")); - assertThat(summary.get("OpenSearch Config"), is(env.configFile().toString())); + assertThat(summary.get("OpenSearch Config"), is(env.configDir().toString())); } } diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/HyphenationCompoundWordTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/HyphenationCompoundWordTokenFilterFactory.java index 875c5261f8387..b46bef3e6c563 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/HyphenationCompoundWordTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/HyphenationCompoundWordTokenFilterFactory.java @@ -61,7 +61,7 @@ public class HyphenationCompoundWordTokenFilterFactory extends AbstractCompoundW throw new IllegalArgumentException("hyphenation_patterns_path is a required setting."); } - Path hyphenationPatternsFile = env.configFile().resolve(hyphenationPatternsPath); + Path hyphenationPatternsFile = env.configDir().resolve(hyphenationPatternsPath); try { InputStream in = Files.newInputStream(hyphenationPatternsFile); diff --git a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpPlugin.java b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpPlugin.java index 6af408c185374..790a9bb4bf978 100644 --- a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpPlugin.java +++ b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpPlugin.java @@ -82,7 +82,7 @@ public Map getProcessors(Processor.Parameters paramet throw new IllegalStateException("getProcessors called twice for geoip plugin!!"); } final Path geoIpDirectory = getGeoIpDirectory(parameters); - final Path geoIpConfigDirectory = parameters.env.configFile().resolve("ingest-geoip"); + final Path geoIpConfigDirectory = parameters.env.configDir().resolve("ingest-geoip"); long cacheSize = CACHE_SIZE.get(parameters.env.settings()); try { databaseReaders = loadDatabaseReaders(geoIpDirectory, geoIpConfigDirectory); @@ -102,7 +102,7 @@ public Map getProcessors(Processor.Parameters paramet private Path getGeoIpDirectory(Processor.Parameters parameters) { final Path geoIpDirectory; if (parameters.env.settings().get("ingest.geoip.database_path") == null) { - geoIpDirectory = parameters.env.modulesFile().resolve("ingest-geoip"); + geoIpDirectory = parameters.env.modulesDir().resolve("ingest-geoip"); } else { geoIpDirectory = PathUtils.get(parameters.env.settings().get("ingest.geoip.database_path")); } diff --git a/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentPlugin.java b/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentPlugin.java index ee424ad1322fb..dc005ae36dff8 100644 --- a/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentPlugin.java +++ b/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentPlugin.java @@ -62,7 +62,7 @@ public class IngestUserAgentPlugin extends Plugin implements IngestPlugin { @Override public Map getProcessors(Processor.Parameters parameters) { - Path userAgentConfigDirectory = parameters.env.configFile().resolve("ingest-user-agent"); + Path userAgentConfigDirectory = parameters.env.configDir().resolve("ingest-user-agent"); if (Files.exists(userAgentConfigDirectory) == false && Files.isDirectory(userAgentConfigDirectory)) { throw new IllegalStateException( diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java index f48422d41ea9e..34fcd245289be 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java @@ -126,7 +126,7 @@ protected List getSettingAsList(String key) throws Exception { return settings.getAsList(key); } }; - configuration = loader.load(environment.configFile()); + configuration = loader.load(environment.configDir()); reload(); final FileChangesListener listener = new FileChangesListener() { diff --git a/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/IcuCollationTokenFilterFactory.java b/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/IcuCollationTokenFilterFactory.java index 757a55487a162..cd2898c9d64b4 100644 --- a/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/IcuCollationTokenFilterFactory.java +++ b/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/IcuCollationTokenFilterFactory.java @@ -33,7 +33,7 @@ package org.opensearch.index.analysis; import java.io.IOException; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.InvalidPathException; @@ -72,7 +72,7 @@ public IcuCollationTokenFilterFactory(IndexSettings indexSettings, Environment e if (rules != null) { Exception failureToResolve = null; try { - rules = Streams.copyToString(Files.newBufferedReader(environment.configFile().resolve(rules), Charset.forName("UTF-8"))); + rules = Streams.copyToString(Files.newBufferedReader(environment.configDir().resolve(rules), StandardCharsets.UTF_8)); } catch (IOException | SecurityException | InvalidPathException e) { failureToResolve = e; } diff --git a/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/IcuTokenizerFactory.java b/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/IcuTokenizerFactory.java index 37c60e02bb3b0..0ac9e0c106a91 100644 --- a/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/IcuTokenizerFactory.java +++ b/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/IcuTokenizerFactory.java @@ -120,7 +120,7 @@ public RuleBasedBreakIterator getBreakIterator(int script) { // parse a single RBBi rule file private BreakIterator parseRules(String filename, Environment env) throws IOException { - final Path path = env.configFile().resolve(filename); + final Path path = env.configDir().resolve(filename); String rules = Files.readAllLines(path).stream().filter((v) -> v.startsWith("#") == false).collect(Collectors.joining("\n")); return new RuleBasedBreakIterator(rules.toString()); diff --git a/plugins/examples/custom-settings/src/main/java/org/opensearch/example/customsettings/ExampleCustomSettingsConfig.java b/plugins/examples/custom-settings/src/main/java/org/opensearch/example/customsettings/ExampleCustomSettingsConfig.java index 5f494147c870f..8413a750e2741 100644 --- a/plugins/examples/custom-settings/src/main/java/org/opensearch/example/customsettings/ExampleCustomSettingsConfig.java +++ b/plugins/examples/custom-settings/src/main/java/org/opensearch/example/customsettings/ExampleCustomSettingsConfig.java @@ -96,7 +96,7 @@ public class ExampleCustomSettingsConfig { public ExampleCustomSettingsConfig(final Environment environment) { // Elasticsearch config directory - final Path configDir = environment.configFile(); + final Path configDir = environment.configDir(); // Resolve the plugin's custom settings file final Path customSettingsYamlFile = configDir.resolve("custom-settings/custom.yml"); diff --git a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java index 9078e2b76cc6d..03abb94e1263c 100644 --- a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java +++ b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java @@ -102,7 +102,7 @@ class HdfsSecurityContext { * Expects keytab file to exist at {@code $CONFIG_DIR$/repository-hdfs/krb5.keytab} */ static Path locateKeytabFile(Environment environment) { - Path keytabPath = environment.configFile().resolve("repository-hdfs").resolve("krb5.keytab"); + Path keytabPath = environment.configDir().resolve("repository-hdfs").resolve("krb5.keytab"); try { if (Files.exists(keytabPath) == false) { throw new RuntimeException("Could not locate keytab at [" + keytabPath + "]."); diff --git a/qa/evil-tests/src/test/java/org/opensearch/bootstrap/EvilSecurityTests.java b/qa/evil-tests/src/test/java/org/opensearch/bootstrap/EvilSecurityTests.java index 6f9368fa767b0..3dcc547fa002b 100644 --- a/qa/evil-tests/src/test/java/org/opensearch/bootstrap/EvilSecurityTests.java +++ b/qa/evil-tests/src/test/java/org/opensearch/bootstrap/EvilSecurityTests.java @@ -125,23 +125,23 @@ public void testEnvironmentPaths() throws Exception { // check that all directories got permissions: // bin file: ro - assertExactPermissions(new FilePermission(environment.binFile().toString(), "read,readlink"), permissions); + assertExactPermissions(new FilePermission(environment.binDir().toString(), "read,readlink"), permissions); // lib file: ro - assertExactPermissions(new FilePermission(environment.libFile().toString(), "read,readlink"), permissions); + assertExactPermissions(new FilePermission(environment.libDir().toString(), "read,readlink"), permissions); // modules file: ro - assertExactPermissions(new FilePermission(environment.modulesFile().toString(), "read,readlink"), permissions); + assertExactPermissions(new FilePermission(environment.modulesDir().toString(), "read,readlink"), permissions); // config file: ro - assertExactPermissions(new FilePermission(environment.configFile().toString(), "read,readlink"), permissions); + assertExactPermissions(new FilePermission(environment.configDir().toString(), "read,readlink"), permissions); // plugins: ro - assertExactPermissions(new FilePermission(environment.pluginsFile().toString(), "read,readlink"), permissions); + assertExactPermissions(new FilePermission(environment.pluginsDir().toString(), "read,readlink"), permissions); // data paths: r/w for (Path dataPath : environment.dataFiles()) { assertExactPermissions(new FilePermission(dataPath.toString(), "read,readlink,write,delete"), permissions); } - assertExactPermissions(new FilePermission(environment.sharedDataFile().toString(), "read,readlink,write,delete"), permissions); + assertExactPermissions(new FilePermission(environment.sharedDataDir().toString(), "read,readlink,write,delete"), permissions); // logs: r/w - assertExactPermissions(new FilePermission(environment.logsFile().toString(), "read,readlink,write,delete"), permissions); + assertExactPermissions(new FilePermission(environment.logsDir().toString(), "read,readlink,write,delete"), permissions); // temp dir: r/w assertExactPermissions(new FilePermission(fakeTmpDir.toString(), "read,readlink,write,delete"), permissions); // PID file: delete only (for the shutdown hook) diff --git a/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java b/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java index 949369899dc82..c3c332aecfd4c 100644 --- a/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java +++ b/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java @@ -90,8 +90,8 @@ public void testNoControllerSpawn() throws IOException { Environment environment = TestEnvironment.newEnvironment(settings); // This plugin will NOT have a controller daemon - Path plugin = environment.modulesFile().resolve("a_plugin"); - Files.createDirectories(environment.modulesFile()); + Path plugin = environment.modulesDir().resolve("a_plugin"); + Files.createDirectories(environment.modulesDir()); Files.createDirectories(plugin); PluginTestUtil.writePluginProperties( plugin, @@ -113,8 +113,8 @@ public void testNoControllerSpawn() throws IOException { * Two plugins - one with a controller daemon and one without. */ public void testControllerSpawn() throws Exception { - assertControllerSpawns(Environment::pluginsFile, false); - assertControllerSpawns(Environment::modulesFile, true); + assertControllerSpawns(Environment::pluginsDir, false); + assertControllerSpawns(Environment::modulesDir, true); } private void assertControllerSpawns(final Function pluginsDirFinder, boolean expectSpawn) throws Exception { @@ -133,8 +133,8 @@ private void assertControllerSpawns(final Function pluginsDir // this plugin will have a controller daemon Path plugin = pluginsDirFinder.apply(environment).resolve("test_plugin"); - Files.createDirectories(environment.modulesFile()); - Files.createDirectories(environment.pluginsFile()); + Files.createDirectories(environment.modulesDir()); + Files.createDirectories(environment.pluginsDir()); Files.createDirectories(plugin); PluginTestUtil.writePluginProperties( plugin, @@ -192,7 +192,7 @@ public void testControllerSpawnWithIncorrectDescriptor() throws IOException { Environment environment = TestEnvironment.newEnvironment(settings); - Path plugin = environment.modulesFile().resolve("test_plugin"); + Path plugin = environment.modulesDir().resolve("test_plugin"); Files.createDirectories(plugin); PluginTestUtil.writePluginProperties( plugin, diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/ReloadSecureSettingsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/ReloadSecureSettingsIT.java index 7e6dad47121a9..c220591b47856 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/ReloadSecureSettingsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/ReloadSecureSettingsIT.java @@ -77,7 +77,7 @@ public void testMissingKeystoreFile() throws Exception { final Environment environment = internalCluster().getInstance(Environment.class); final AtomicReference reloadSettingsError = new AtomicReference<>(); // keystore file should be missing for this test case - Files.deleteIfExists(KeyStoreWrapper.keystorePath(environment.configFile())); + Files.deleteIfExists(KeyStoreWrapper.keystorePath(environment.configDir())); final int initialReloadCount = mockReloadablePlugin.getReloadCount(); final CountDownLatch latch = new CountDownLatch(1); final SecureString emptyPassword = randomBoolean() ? new SecureString(new char[0]) : null; @@ -130,10 +130,10 @@ public void testInvalidKeystoreFile() throws Exception { final int initialReloadCount = mockReloadablePlugin.getReloadCount(); // invalid "keystore" file should be present in the config dir try (InputStream keystore = ReloadSecureSettingsIT.class.getResourceAsStream("invalid.txt.keystore")) { - if (Files.exists(environment.configFile()) == false) { - Files.createDirectory(environment.configFile()); + if (Files.exists(environment.configDir()) == false) { + Files.createDirectory(environment.configDir()); } - Files.copy(keystore, KeyStoreWrapper.keystorePath(environment.configFile()), StandardCopyOption.REPLACE_EXISTING); + Files.copy(keystore, KeyStoreWrapper.keystorePath(environment.configDir()), StandardCopyOption.REPLACE_EXISTING); } final CountDownLatch latch = new CountDownLatch(1); final SecureString emptyPassword = randomBoolean() ? new SecureString(new char[0]) : null; @@ -452,7 +452,7 @@ public void onFailure(Exception e) { private SecureSettings writeEmptyKeystore(Environment environment, char[] password) throws Exception { final KeyStoreWrapper keyStoreWrapper = KeyStoreWrapper.create(); try { - keyStoreWrapper.save(environment.configFile(), password); + keyStoreWrapper.save(environment.configDir(), password); } catch (final AccessControlException e) { if (e.getPermission() instanceof RuntimePermission && e.getPermission().getName().equals("accessUserInformation")) { // this is expected: the save method is extra diligent and wants to make sure diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java index efc522a1f9741..5f014e89e330e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java @@ -247,7 +247,7 @@ public void testUpdatePriority() { public void testIndexDirIsDeletedWhenShardRemoved() throws Exception { Environment env = getInstanceFromNode(Environment.class); - Path idxPath = env.sharedDataFile().resolve(randomAlphaOfLength(10)); + Path idxPath = env.sharedDataDir().resolve(randomAlphaOfLength(10)); logger.info("--> idxPath: [{}]", idxPath); Settings idxSettings = Settings.builder().put(IndexMetadata.SETTING_DATA_PATH, idxPath).build(); createIndex("test", idxSettings); @@ -282,7 +282,7 @@ public void testExpectedShardSizeIsPresent() throws InterruptedException { public void testIndexCanChangeCustomDataPath() throws Exception { final String index = "test-custom-data-path"; - final Path sharedDataPath = getInstanceFromNode(Environment.class).sharedDataFile().resolve(randomAsciiLettersOfLength(10)); + final Path sharedDataPath = getInstanceFromNode(Environment.class).sharedDataDir().resolve(randomAsciiLettersOfLength(10)); final Path indexDataPath = sharedDataPath.resolve("start-" + randomAsciiLettersOfLength(10)); logger.info("--> creating index [{}] with data_path [{}]", index, indexDataPath); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java index e51ba62c804d7..d4e1a936263c9 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java @@ -144,7 +144,7 @@ protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeReque final SecureString secureSettingsPassword = request.hasPassword() ? request.getSecureSettingsPassword() : new SecureString(new char[0]); - try (KeyStoreWrapper keystore = KeyStoreWrapper.load(environment.configFile())) { + try (KeyStoreWrapper keystore = KeyStoreWrapper.load(environment.configDir())) { // reread keystore from config file if (keystore == null) { return new NodesReloadSecureSettingsResponse.NodeResponse( diff --git a/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java b/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java index 30a883454ba8c..58ca3cdf78033 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java +++ b/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java @@ -189,7 +189,7 @@ private void setup(boolean addShutdownHook, Environment environment) throws Boot } initializeNatives( - environment.tmpFile(), + environment.tmpDir(), BootstrapSettings.MEMORY_LOCK_SETTING.get(settings), BootstrapSettings.SYSTEM_CALL_FILTER_SETTING.get(settings), BootstrapSettings.CTRLHANDLER_SETTING.get(settings) @@ -254,7 +254,7 @@ protected void validateNodeBeforeAcceptingRequests( static SecureSettings loadSecureSettings(Environment initialEnv) throws BootstrapException { final KeyStoreWrapper keystore; try { - keystore = KeyStoreWrapper.load(initialEnv.configFile()); + keystore = KeyStoreWrapper.load(initialEnv.configDir()); } catch (IOException e) { throw new BootstrapException(e); } @@ -273,11 +273,11 @@ static SecureSettings loadSecureSettings(Environment initialEnv) throws Bootstra try { if (keystore == null) { final KeyStoreWrapper keyStoreWrapper = KeyStoreWrapper.create(); - keyStoreWrapper.save(initialEnv.configFile(), new char[0]); + keyStoreWrapper.save(initialEnv.configDir(), new char[0]); return keyStoreWrapper; } else { keystore.decrypt(password.getChars()); - KeyStoreWrapper.upgrade(keystore, initialEnv.configFile(), password.getChars()); + KeyStoreWrapper.upgrade(keystore, initialEnv.configDir(), password.getChars()); } } catch (Exception e) { throw new BootstrapException(e); @@ -366,7 +366,7 @@ static void init(final boolean foreground, final Path pidFile, final boolean qui INSTANCE = new Bootstrap(); final SecureSettings keystore = loadSecureSettings(initialEnv); - final Environment environment = createEnvironment(pidFile, keystore, initialEnv.settings(), initialEnv.configFile()); + final Environment environment = createEnvironment(pidFile, keystore, initialEnv.settings(), initialEnv.configDir()); LogConfigurator.setNodeName(Node.NODE_NAME_SETTING.get(environment.settings())); try { diff --git a/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java b/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java index 4395ae98c9ba6..7f96ea425d17e 100644 --- a/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java +++ b/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java @@ -160,7 +160,7 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th // a misconfigured java.io.tmpdir can cause hard-to-diagnose problems later, so reject it immediately try { - env.validateTmpFile(); + env.validateTmpDir(); } catch (IOException e) { throw new UserException(ExitCodes.CONFIG, e.getMessage()); } diff --git a/server/src/main/java/org/opensearch/bootstrap/Security.java b/server/src/main/java/org/opensearch/bootstrap/Security.java index b3f4339f3e386..59ca91a68e025 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Security.java +++ b/server/src/main/java/org/opensearch/bootstrap/Security.java @@ -178,11 +178,11 @@ static Map getCodebaseJarMap(Set urls) { * we look for matching plugins and set URLs to fit */ @SuppressForbidden(reason = "proper use of URL") - static Map getPluginPermissions(Environment environment) throws IOException, NoSuchAlgorithmException { + static Map getPluginPermissions(Environment environment) throws IOException { Map map = new HashMap<>(); // collect up set of plugins and modules by listing directories. - Set pluginsAndModules = new LinkedHashSet<>(PluginsService.findPluginDirs(environment.pluginsFile())); - pluginsAndModules.addAll(PluginsService.findPluginDirs(environment.modulesFile())); + Set pluginsAndModules = new LinkedHashSet<>(PluginsService.findPluginDirs(environment.pluginsDir())); + pluginsAndModules.addAll(PluginsService.findPluginDirs(environment.modulesDir())); // now process each one for (Path plugin : pluginsAndModules) { @@ -310,19 +310,19 @@ static void addClasspathPermissions(Permissions policy) throws IOException { */ static void addFilePermissions(Permissions policy, Environment environment) throws IOException { // read-only dirs - addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.binFile(), "read,readlink", false); - addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.libFile(), "read,readlink", false); - addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.modulesFile(), "read,readlink", false); - addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.pluginsFile(), "read,readlink", false); - addDirectoryPath(policy, "path.conf'", environment.configFile(), "read,readlink", false); + addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.binDir(), "read,readlink", false); + addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.libDir(), "read,readlink", false); + addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.modulesDir(), "read,readlink", false); + addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.pluginsDir(), "read,readlink", false); + addDirectoryPath(policy, "path.conf'", environment.configDir(), "read,readlink", false); // read-write dirs - addDirectoryPath(policy, "java.io.tmpdir", environment.tmpFile(), "read,readlink,write,delete", false); - addDirectoryPath(policy, Environment.PATH_LOGS_SETTING.getKey(), environment.logsFile(), "read,readlink,write,delete", false); - if (environment.sharedDataFile() != null) { + addDirectoryPath(policy, "java.io.tmpdir", environment.tmpDir(), "read,readlink,write,delete", false); + addDirectoryPath(policy, Environment.PATH_LOGS_SETTING.getKey(), environment.logsDir(), "read,readlink,write,delete", false); + if (environment.sharedDataDir() != null) { addDirectoryPath( policy, Environment.PATH_SHARED_DATA_SETTING.getKey(), - environment.sharedDataFile(), + environment.sharedDataDir(), "read,readlink,write,delete", false ); diff --git a/server/src/main/java/org/opensearch/bootstrap/Spawner.java b/server/src/main/java/org/opensearch/bootstrap/Spawner.java index c5e7171946790..e2fae1f196412 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Spawner.java +++ b/server/src/main/java/org/opensearch/bootstrap/Spawner.java @@ -77,14 +77,14 @@ void spawnNativeControllers(final Environment environment, final boolean inherit if (!spawned.compareAndSet(false, true)) { throw new IllegalStateException("native controllers already spawned"); } - if (!Files.exists(environment.modulesFile())) { - throw new IllegalStateException("modules directory [" + environment.modulesFile() + "] not found"); + if (!Files.exists(environment.modulesDir())) { + throw new IllegalStateException("modules directory [" + environment.modulesDir() + "] not found"); } /* * For each module, attempt to spawn the controller daemon. Silently ignore any module that doesn't include a controller for the * correct platform. */ - List paths = PluginsService.findPluginDirs(environment.modulesFile()); + List paths = PluginsService.findPluginDirs(environment.modulesDir()); for (final Path modules : paths) { final PluginInfo info = PluginInfo.readFromProperties(modules); final Path spawnPath = Platforms.nativeControllerPath(modules); @@ -99,7 +99,7 @@ void spawnNativeControllers(final Environment environment, final boolean inherit ); throw new IllegalArgumentException(message); } - final Process process = spawnNativeController(spawnPath, environment.tmpFile(), inheritIo); + final Process process = spawnNativeController(spawnPath, environment.tmpDir(), inheritIo); processes.add(process); } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 244ad4e6eda76..64198dce89cef 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -1167,7 +1167,7 @@ public void validateIndexSettings(String indexName, final Settings settings, fin } List getIndexSettingsValidationErrors(final Settings settings, final boolean forbidPrivateIndexSettings) { - List validationErrors = validateIndexCustomPath(settings, env.sharedDataFile()); + List validationErrors = validateIndexCustomPath(settings, env.sharedDataDir()); if (forbidPrivateIndexSettings) { validationErrors.addAll(validatePrivateSettingsNotExplicitlySet(settings, indexScopedSettings)); } diff --git a/server/src/main/java/org/opensearch/common/logging/LogConfigurator.java b/server/src/main/java/org/opensearch/common/logging/LogConfigurator.java index f75be70851a4b..38803c002ca78 100644 --- a/server/src/main/java/org/opensearch/common/logging/LogConfigurator.java +++ b/server/src/main/java/org/opensearch/common/logging/LogConfigurator.java @@ -139,7 +139,7 @@ public static void configure(final Environment environment) throws IOException, // whether or not the error listener check failed we can remove the listener now StatusLogger.getLogger().removeListener(ERROR_LISTENER); } - configure(environment.settings(), environment.configFile(), environment.logsFile()); + configure(environment.settings(), environment.configDir(), environment.logsDir()); } /** diff --git a/server/src/main/java/org/opensearch/common/settings/BaseKeyStoreCommand.java b/server/src/main/java/org/opensearch/common/settings/BaseKeyStoreCommand.java index b52e55e099cb9..a5fe3f8b0b34f 100644 --- a/server/src/main/java/org/opensearch/common/settings/BaseKeyStoreCommand.java +++ b/server/src/main/java/org/opensearch/common/settings/BaseKeyStoreCommand.java @@ -57,14 +57,14 @@ public BaseKeyStoreCommand(String description, boolean keyStoreMustExist) { @Override protected final void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { try { - final Path configFile = env.configFile(); + final Path configFile = env.configDir(); keyStore = KeyStoreWrapper.load(configFile); if (keyStore == null) { if (keyStoreMustExist) { throw new UserException( ExitCodes.DATA_ERROR, "OpenSearch keystore not found at [" - + KeyStoreWrapper.keystorePath(env.configFile()) + + KeyStoreWrapper.keystorePath(env.configDir()) + "]. Use 'create' command to create one." ); } else if (options.has(forceOption) == false) { diff --git a/server/src/main/java/org/opensearch/common/settings/ChangeKeyStorePasswordCommand.java b/server/src/main/java/org/opensearch/common/settings/ChangeKeyStorePasswordCommand.java index d66caca68bfeb..e386fffc3f5dc 100644 --- a/server/src/main/java/org/opensearch/common/settings/ChangeKeyStorePasswordCommand.java +++ b/server/src/main/java/org/opensearch/common/settings/ChangeKeyStorePasswordCommand.java @@ -51,7 +51,7 @@ class ChangeKeyStorePasswordCommand extends BaseKeyStoreCommand { protected void executeCommand(Terminal terminal, OptionSet options, Environment env) throws Exception { try (SecureString newPassword = readPassword(terminal, true)) { final KeyStoreWrapper keyStore = getKeyStore(); - keyStore.save(env.configFile(), newPassword.getChars()); + keyStore.save(env.configDir(), newPassword.getChars()); terminal.println("OpenSearch keystore password changed successfully."); } catch (SecurityException e) { throw new UserException(ExitCodes.DATA_ERROR, e.getMessage()); diff --git a/server/src/main/java/org/opensearch/common/settings/HasPasswordKeyStoreCommand.java b/server/src/main/java/org/opensearch/common/settings/HasPasswordKeyStoreCommand.java index dc8993f55fae2..3733c985046c1 100644 --- a/server/src/main/java/org/opensearch/common/settings/HasPasswordKeyStoreCommand.java +++ b/server/src/main/java/org/opensearch/common/settings/HasPasswordKeyStoreCommand.java @@ -52,7 +52,7 @@ public class HasPasswordKeyStoreCommand extends KeyStoreAwareCommand { @Override protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { - final Path configFile = env.configFile(); + final Path configFile = env.configDir(); final KeyStoreWrapper keyStore = KeyStoreWrapper.load(configFile); // We handle error printing here so we can respect the "--silent" flag diff --git a/server/src/main/java/org/opensearch/common/settings/UpgradeKeyStoreCommand.java b/server/src/main/java/org/opensearch/common/settings/UpgradeKeyStoreCommand.java index 36707a8c32428..e897b0989176a 100644 --- a/server/src/main/java/org/opensearch/common/settings/UpgradeKeyStoreCommand.java +++ b/server/src/main/java/org/opensearch/common/settings/UpgradeKeyStoreCommand.java @@ -47,7 +47,7 @@ public class UpgradeKeyStoreCommand extends BaseKeyStoreCommand { @Override protected void executeCommand(final Terminal terminal, final OptionSet options, final Environment env) throws Exception { - KeyStoreWrapper.upgrade(getKeyStore(), env.configFile(), getKeyStorePassword().getChars()); + KeyStoreWrapper.upgrade(getKeyStore(), env.configDir(), getKeyStorePassword().getChars()); } } diff --git a/server/src/main/java/org/opensearch/env/Environment.java b/server/src/main/java/org/opensearch/env/Environment.java index 88f324f4dbea9..4a1e5e5069ccb 100644 --- a/server/src/main/java/org/opensearch/env/Environment.java +++ b/server/src/main/java/org/opensearch/env/Environment.java @@ -87,27 +87,27 @@ public class Environment { private final Path[] repoFiles; - private final Path configFile; + private final Path configDir; - private final Path pluginsFile; + private final Path pluginsDir; - private final Path modulesFile; + private final Path modulesDir; - private final Path sharedDataFile; + private final Path sharedDataDir; /** location of bin/, used by plugin manager */ - private final Path binFile; + private final Path binDir; /** location of lib/, */ - private final Path libFile; + private final Path libDir; - private final Path logsFile; + private final Path logsDir; /** Path to the PID file (can be null if no PID file is configured) **/ private final Path pidFile; /** Path to the temporary file directory used by the JDK */ - private final Path tmpFile; + private final Path tmpDir; public Environment(final Settings settings, final Path configPath) { this(settings, configPath, true); @@ -127,14 +127,14 @@ public Environment(final Settings settings, final Path configPath, final boolean } if (configPath != null) { - configFile = configPath.toAbsolutePath().normalize(); + configDir = configPath.toAbsolutePath().normalize(); } else { - configFile = homeFile.resolve("config"); + configDir = homeFile.resolve("config"); } - tmpFile = Objects.requireNonNull(tmpPath); + tmpDir = Objects.requireNonNull(tmpPath); - pluginsFile = homeFile.resolve("plugins"); + pluginsDir = homeFile.resolve("plugins"); List dataPaths = PATH_DATA_SETTING.get(settings); if (nodeLocalStorage) { @@ -155,9 +155,9 @@ public Environment(final Settings settings, final Path configPath, final boolean } } if (PATH_SHARED_DATA_SETTING.exists(settings)) { - sharedDataFile = PathUtils.get(PATH_SHARED_DATA_SETTING.get(settings)).toAbsolutePath().normalize(); + sharedDataDir = PathUtils.get(PATH_SHARED_DATA_SETTING.get(settings)).toAbsolutePath().normalize(); } else { - sharedDataFile = null; + sharedDataDir = null; } List repoPaths = PATH_REPO_SETTING.get(settings); if (repoPaths.isEmpty()) { @@ -171,9 +171,9 @@ public Environment(final Settings settings, final Path configPath, final boolean // this is trappy, Setting#get(Settings) will get a fallback setting yet return false for Settings#exists(Settings) if (PATH_LOGS_SETTING.exists(settings)) { - logsFile = PathUtils.get(PATH_LOGS_SETTING.get(settings)).toAbsolutePath().normalize(); + logsDir = PathUtils.get(PATH_LOGS_SETTING.get(settings)).toAbsolutePath().normalize(); } else { - logsFile = homeFile.resolve("logs"); + logsDir = homeFile.resolve("logs"); } if (NODE_PIDFILE_SETTING.exists(settings) || PIDFILE_SETTING.exists(settings)) { @@ -182,16 +182,16 @@ public Environment(final Settings settings, final Path configPath, final boolean pidFile = null; } - binFile = homeFile.resolve("bin"); - libFile = homeFile.resolve("lib"); - modulesFile = homeFile.resolve("modules"); + binDir = homeFile.resolve("bin"); + libDir = homeFile.resolve("lib"); + modulesDir = homeFile.resolve("modules"); final Settings.Builder finalSettings = Settings.builder().put(settings); if (PATH_DATA_SETTING.exists(settings)) { finalSettings.putList(PATH_DATA_SETTING.getKey(), Arrays.stream(dataFiles).map(Path::toString).collect(Collectors.toList())); } finalSettings.put(PATH_HOME_SETTING.getKey(), homeFile); - finalSettings.put(PATH_LOGS_SETTING.getKey(), logsFile.toString()); + finalSettings.put(PATH_LOGS_SETTING.getKey(), logsDir.toString()); if (PATH_REPO_SETTING.exists(settings)) { finalSettings.putList( Environment.PATH_REPO_SETTING.getKey(), @@ -199,8 +199,8 @@ public Environment(final Settings settings, final Path configPath, final boolean ); } if (PATH_SHARED_DATA_SETTING.exists(settings)) { - assert sharedDataFile != null; - finalSettings.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), sharedDataFile.toString()); + assert sharedDataDir != null; + finalSettings.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), sharedDataDir.toString()); } if (NODE_PIDFILE_SETTING.exists(settings)) { assert pidFile != null; @@ -229,8 +229,8 @@ public Path[] dataFiles() { /** * The shared data location */ - public Path sharedDataFile() { - return sharedDataFile; + public Path sharedDataDir() { + return sharedDataDir; } /** @@ -295,32 +295,31 @@ public URL resolveRepoURL(URL url) { } } - // TODO: rename all these "file" methods to "dir" /** * The config directory. */ - public Path configFile() { - return configFile; + public Path configDir() { + return configDir; } - public Path pluginsFile() { - return pluginsFile; + public Path pluginsDir() { + return pluginsDir; } - public Path binFile() { - return binFile; + public Path binDir() { + return binDir; } - public Path libFile() { - return libFile; + public Path libDir() { + return libDir; } - public Path modulesFile() { - return modulesFile; + public Path modulesDir() { + return modulesDir; } - public Path logsFile() { - return logsFile; + public Path logsDir() { + return logsDir; } /** @@ -331,17 +330,17 @@ public Path pidFile() { } /** Path to the default temp directory used by the JDK */ - public Path tmpFile() { - return tmpFile; + public Path tmpDir() { + return tmpDir; } /** Ensure the configured temp directory is a valid directory */ - public void validateTmpFile() throws IOException { - if (Files.exists(tmpFile) == false) { - throw new FileNotFoundException("Temporary file directory [" + tmpFile + "] does not exist or is not accessible"); + public void validateTmpDir() throws IOException { + if (Files.exists(tmpDir) == false) { + throw new FileNotFoundException("Temporary file directory [" + tmpDir + "] does not exist or is not accessible"); } - if (Files.isDirectory(tmpFile) == false) { - throw new IOException("Configured temporary file directory [" + tmpFile + "] is not a directory"); + if (Files.isDirectory(tmpDir) == false) { + throw new IOException("Configured temporary file directory [" + tmpDir + "] is not a directory"); } } @@ -356,14 +355,14 @@ public static FileStore getFileStore(final Path path) throws IOException { public static void assertEquivalent(Environment actual, Environment expected) { assertEquals(actual.dataFiles(), expected.dataFiles(), "dataFiles"); assertEquals(actual.repoFiles(), expected.repoFiles(), "repoFiles"); - assertEquals(actual.configFile(), expected.configFile(), "configFile"); - assertEquals(actual.pluginsFile(), expected.pluginsFile(), "pluginsFile"); - assertEquals(actual.binFile(), expected.binFile(), "binFile"); - assertEquals(actual.libFile(), expected.libFile(), "libFile"); - assertEquals(actual.modulesFile(), expected.modulesFile(), "modulesFile"); - assertEquals(actual.logsFile(), expected.logsFile(), "logsFile"); + assertEquals(actual.configDir(), expected.configDir(), "configDir"); + assertEquals(actual.pluginsDir(), expected.pluginsDir(), "pluginsDir"); + assertEquals(actual.binDir(), expected.binDir(), "binDir"); + assertEquals(actual.libDir(), expected.libDir(), "libDir"); + assertEquals(actual.modulesDir(), expected.modulesDir(), "modulesDir"); + assertEquals(actual.logsDir(), expected.logsDir(), "logsDir"); assertEquals(actual.pidFile(), expected.pidFile(), "pidFile"); - assertEquals(actual.tmpFile(), expected.tmpFile(), "tmpFile"); + assertEquals(actual.tmpDir(), expected.tmpDir(), "tmpDir"); } private static void assertEquals(Object actual, Object expected, String name) { diff --git a/server/src/main/java/org/opensearch/env/NodeEnvironment.java b/server/src/main/java/org/opensearch/env/NodeEnvironment.java index 555ca990b736d..a09e462c474f4 100644 --- a/server/src/main/java/org/opensearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/opensearch/env/NodeEnvironment.java @@ -289,7 +289,7 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce NodeLock nodeLock = null; try { - sharedDataPath = environment.sharedDataFile(); + sharedDataPath = environment.sharedDataDir(); IOException lastException = null; int maxLocalStorageNodes = MAX_LOCAL_STORAGE_NODES_SETTING.get(settings); diff --git a/server/src/main/java/org/opensearch/index/analysis/Analysis.java b/server/src/main/java/org/opensearch/index/analysis/Analysis.java index 90bb21cfc0a4b..805575b97c9fb 100644 --- a/server/src/main/java/org/opensearch/index/analysis/Analysis.java +++ b/server/src/main/java/org/opensearch/index/analysis/Analysis.java @@ -246,7 +246,7 @@ public static List getWordList( } } - final Path path = env.configFile().resolve(wordListPath); + final Path path = env.configDir().resolve(wordListPath); try { return loadWordList(path, removeComments); @@ -291,7 +291,7 @@ public static Reader getReaderFromFile(Environment env, Settings settings, Strin if (filePath == null) { return null; } - final Path path = env.configFile().resolve(filePath); + final Path path = env.configDir().resolve(filePath); try { return Files.newBufferedReader(path, StandardCharsets.UTF_8); } catch (CharacterCodingException ex) { diff --git a/server/src/main/java/org/opensearch/indices/analysis/HunspellService.java b/server/src/main/java/org/opensearch/indices/analysis/HunspellService.java index 08f8b18920eb9..189a24191f12f 100644 --- a/server/src/main/java/org/opensearch/indices/analysis/HunspellService.java +++ b/server/src/main/java/org/opensearch/indices/analysis/HunspellService.java @@ -144,7 +144,7 @@ public Dictionary getDictionary(String locale) { } private Path resolveHunspellDirectory(Environment env) { - return env.configFile().resolve("hunspell"); + return env.configDir().resolve("hunspell"); } /** @@ -218,7 +218,7 @@ private Dictionary loadDictionary(String locale, Settings nodeSettings, Environm affixStream = Files.newInputStream(affixFiles[0]); - try (Directory tmp = new NIOFSDirectory(env.tmpFile())) { + try (Directory tmp = new NIOFSDirectory(env.tmpDir())) { return new Dictionary(tmp, "hunspell", affixStream, dicStreams, ignoreCase); } diff --git a/server/src/main/java/org/opensearch/node/InternalSettingsPreparer.java b/server/src/main/java/org/opensearch/node/InternalSettingsPreparer.java index 0879b498ac41c..9d10cd139c741 100644 --- a/server/src/main/java/org/opensearch/node/InternalSettingsPreparer.java +++ b/server/src/main/java/org/opensearch/node/InternalSettingsPreparer.java @@ -85,7 +85,7 @@ public static Environment prepareEnvironment( Environment environment = new Environment(output.build(), configPath); output = Settings.builder(); // start with a fresh output - Path path = environment.configFile().resolve("opensearch.yml"); + Path path = environment.configDir().resolve("opensearch.yml"); if (Files.exists(path)) { try { output.loadFromPath(path); diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 73c2f221bc7cc..8ede6fdf76653 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -197,7 +197,7 @@ import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; @@ -388,18 +388,18 @@ protected Node( if (logger.isDebugEnabled()) { logger.debug( "using config [{}], data [{}], logs [{}], plugins [{}]", - initialEnvironment.configFile(), + initialEnvironment.configDir(), Arrays.toString(initialEnvironment.dataFiles()), - initialEnvironment.logsFile(), - initialEnvironment.pluginsFile() + initialEnvironment.logsDir(), + initialEnvironment.pluginsDir() ); } this.pluginsService = new PluginsService( tmpSettings, - initialEnvironment.configFile(), - initialEnvironment.modulesFile(), - initialEnvironment.pluginsFile(), + initialEnvironment.configDir(), + initialEnvironment.modulesDir(), + initialEnvironment.pluginsDir(), classpathPlugins ); final Settings settings = pluginsService.updatedSettings(); @@ -415,7 +415,7 @@ protected Node( * Create the environment based on the finalized view of the settings. This is to ensure that components get the same setting * values, no matter they ask for them from. */ - this.environment = new Environment(settings, initialEnvironment.configFile(), Node.NODE_LOCAL_STORAGE_SETTING.get(settings)); + this.environment = new Environment(settings, initialEnvironment.configDir(), Node.NODE_LOCAL_STORAGE_SETTING.get(settings)); Environment.assertEquivalent(initialEnvironment, this.environment); nodeEnvironment = new NodeEnvironment(tmpSettings, environment); logger.info( @@ -816,7 +816,7 @@ protected Node( clusterService.getClusterSettings(), pluginsService.filterPlugins(DiscoveryPlugin.class), clusterModule.getAllocationService(), - environment.configFile(), + environment.configDir(), gatewayMetaState, rerouteService, fsHealthService @@ -1339,8 +1339,8 @@ protected void validateNodeBeforeAcceptingRequests( /** Writes a file to the logs dir containing the ports for the given transport type */ private void writePortsFile(String type, BoundTransportAddress boundAddress) { - Path tmpPortsFile = environment.logsFile().resolve(type + ".ports.tmp"); - try (BufferedWriter writer = Files.newBufferedWriter(tmpPortsFile, Charset.forName("UTF-8"))) { + Path tmpPortsFile = environment.logsDir().resolve(type + ".ports.tmp"); + try (BufferedWriter writer = Files.newBufferedWriter(tmpPortsFile, StandardCharsets.UTF_8)) { for (TransportAddress address : boundAddress.boundAddresses()) { InetAddress inetAddress = InetAddress.getByName(address.getAddress()); writer.write(NetworkAddress.format(new InetSocketAddress(inetAddress, address.getPort())) + "\n"); @@ -1348,7 +1348,7 @@ private void writePortsFile(String type, BoundTransportAddress boundAddress) { } catch (IOException e) { throw new RuntimeException("Failed to write ports file", e); } - Path portsFile = environment.logsFile().resolve(type + ".ports"); + Path portsFile = environment.logsDir().resolve(type + ".ports"); try { Files.move(tmpPortsFile, portsFile, StandardCopyOption.ATOMIC_MOVE); } catch (IOException e) { diff --git a/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java b/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java index 6265bcab82966..fd052308ed87b 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java @@ -596,7 +596,7 @@ public void testRolloverClusterState() throws Exception { try { ClusterService clusterService = ClusterServiceUtils.createClusterService(testThreadPool); Environment env = mock(Environment.class); - when(env.sharedDataFile()).thenReturn(null); + when(env.sharedDataDir()).thenReturn(null); AllocationService allocationService = mock(AllocationService.class); when(allocationService.reroute(any(ClusterState.class), any(String.class))).then(i -> i.getArguments()[0]); IndicesService indicesService = mockIndicesServices(); @@ -722,7 +722,7 @@ public void testRolloverClusterStateForDataStream() throws Exception { ClusterService clusterService = ClusterServiceUtils.createClusterService(testThreadPool); Environment env = mock(Environment.class); - when(env.sharedDataFile()).thenReturn(null); + when(env.sharedDataDir()).thenReturn(null); AllocationService allocationService = mock(AllocationService.class); when(allocationService.reroute(any(ClusterState.class), any(String.class))).then(i -> i.getArguments()[0]); DocumentMapper documentMapper = mock(DocumentMapper.class); diff --git a/server/src/test/java/org/opensearch/env/EnvironmentTests.java b/server/src/test/java/org/opensearch/env/EnvironmentTests.java index a5d6021b29376..0e343a6e43ba7 100644 --- a/server/src/test/java/org/opensearch/env/EnvironmentTests.java +++ b/server/src/test/java/org/opensearch/env/EnvironmentTests.java @@ -117,28 +117,28 @@ public void testPathLogsWhenNotSet() { final Path pathHome = createTempDir().toAbsolutePath(); final Settings settings = Settings.builder().put("path.home", pathHome).build(); final Environment environment = new Environment(settings, null); - assertThat(environment.logsFile(), equalTo(pathHome.resolve("logs"))); + assertThat(environment.logsDir(), equalTo(pathHome.resolve("logs"))); } public void testDefaultConfigPath() { final Path path = createTempDir().toAbsolutePath(); final Settings settings = Settings.builder().put("path.home", path).build(); final Environment environment = new Environment(settings, null); - assertThat(environment.configFile(), equalTo(path.resolve("config"))); + assertThat(environment.configDir(), equalTo(path.resolve("config"))); } public void testConfigPath() { final Path configPath = createTempDir().toAbsolutePath(); final Settings settings = Settings.builder().put("path.home", createTempDir().toAbsolutePath()).build(); final Environment environment = new Environment(settings, configPath); - assertThat(environment.configFile(), equalTo(configPath)); + assertThat(environment.configDir(), equalTo(configPath)); } public void testConfigPathWhenNotSet() { final Path pathHome = createTempDir().toAbsolutePath(); final Settings settings = Settings.builder().put("path.home", pathHome).build(); final Environment environment = new Environment(settings, null); - assertThat(environment.configFile(), equalTo(pathHome.resolve("config"))); + assertThat(environment.configDir(), equalTo(pathHome.resolve("config"))); } public void testNodeDoesNotRequireLocalStorage() { @@ -164,7 +164,7 @@ public void testNodeDoesNotRequireLocalStorageButHasPathData() { public void testNonExistentTempPathValidation() { Settings build = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build(); Environment environment = new Environment(build, null, true, createTempDir().resolve("this_does_not_exist")); - FileNotFoundException e = expectThrows(FileNotFoundException.class, environment::validateTmpFile); + FileNotFoundException e = expectThrows(FileNotFoundException.class, environment::validateTmpDir); assertThat(e.getMessage(), startsWith("Temporary file directory [")); assertThat(e.getMessage(), endsWith("this_does_not_exist] does not exist or is not accessible")); } @@ -172,7 +172,7 @@ public void testNonExistentTempPathValidation() { public void testTempPathValidationWhenRegularFile() throws IOException { Settings build = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build(); Environment environment = new Environment(build, null, true, createTempFile("something", ".test")); - IOException e = expectThrows(IOException.class, environment::validateTmpFile); + IOException e = expectThrows(IOException.class, environment::validateTmpDir); assertThat(e.getMessage(), startsWith("Configured temporary file directory [")); assertThat(e.getMessage(), endsWith(".test] is not a directory")); } diff --git a/server/src/test/java/org/opensearch/indices/analysis/AnalysisModuleTests.java b/server/src/test/java/org/opensearch/indices/analysis/AnalysisModuleTests.java index 5db837c2314a6..483e9401bb075 100644 --- a/server/src/test/java/org/opensearch/indices/analysis/AnalysisModuleTests.java +++ b/server/src/test/java/org/opensearch/indices/analysis/AnalysisModuleTests.java @@ -444,7 +444,7 @@ public void testRegisterHunspellDictionary() throws Exception { InputStream aff = getClass().getResourceAsStream("/indices/analyze/conf_dir/hunspell/en_US/en_US.aff"); InputStream dic = getClass().getResourceAsStream("/indices/analyze/conf_dir/hunspell/en_US/en_US.dic"); Dictionary dictionary; - try (Directory tmp = new NIOFSDirectory(environment.tmpFile())) { + try (Directory tmp = new NIOFSDirectory(environment.tmpDir())) { dictionary = new Dictionary(tmp, "hunspell", aff, dic); } AnalysisModule module = new AnalysisModule(environment, singletonList(new AnalysisPlugin() { diff --git a/server/src/test/java/org/opensearch/node/InternalSettingsPreparerTests.java b/server/src/test/java/org/opensearch/node/InternalSettingsPreparerTests.java index d49bc62583af7..0f1aad5389c23 100644 --- a/server/src/test/java/org/opensearch/node/InternalSettingsPreparerTests.java +++ b/server/src/test/java/org/opensearch/node/InternalSettingsPreparerTests.java @@ -85,7 +85,7 @@ public void testEmptySettings() { assertNotNull(settings.get(ClusterName.CLUSTER_NAME_SETTING.getKey())); // a cluster name was set assertEquals(settings.toString(), size + 1 /* path.home is in the base settings */, settings.names().size()); String home = Environment.PATH_HOME_SETTING.get(baseEnvSettings); - String configDir = env.configFile().toString(); + String configDir = env.configDir().toString(); assertTrue(configDir, configDir.startsWith(home)); } diff --git a/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java index e022e78e7424b..d22776cf01f0e 100644 --- a/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java @@ -102,7 +102,7 @@ static PluginsService newPluginsService(Settings settings, Class { throw new AssertionError("node.name must be set"); } ); PluginsService pluginsService; - pluginsService = new PluginsService(nodeSettings, null, env.modulesFile(), env.pluginsFile(), plugins); + pluginsService = new PluginsService(nodeSettings, null, env.modulesDir(), env.pluginsDir(), plugins); client = (Client) Proxy.newProxyInstance(Client.class.getClassLoader(), new Class[] { Client.class }, clientInvocationHandler); ScriptModule scriptModule = createScriptModule(pluginsService.filterPlugins(ScriptPlugin.class)); diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index a7c819609c619..9e3a2c3564a00 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -1740,7 +1740,7 @@ private void rebuildUnicastHostFiles(List newNodes) { .distinct() .collect(Collectors.toList()); Set configPaths = Stream.concat(currentNodes.stream(), newNodes.stream()) - .map(nac -> nac.node.getEnvironment().configFile()) + .map(nac -> nac.node.getEnvironment().configDir()) .collect(Collectors.toSet()); logger.debug("configuring discovery with {} at {}", discoveryFileContents, configPaths); for (final Path configPath : configPaths) { From 7aa496f9dda6ba2c26db19206951a5ccf6387e1b Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Tue, 5 Apr 2022 10:47:52 -0700 Subject: [PATCH 051/653] Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Index APIs except index template APIs (#2660) - Deprecate the request parameter `master_timeout` that used in Index APIs which have got the parameter. (except index template APIs, which is addressed in PR https://github.com/opensearch-project/OpenSearch/pull/2678) - Add alternative new request parameter `cluster_manager_timeout`. - Add unit tests. Signed-off-by: Tianli Feng --- .../http/DanglingIndicesRestIT.java | 4 +- ...angling_indices.delete_dangling_index.json | 14 +- ...angling_indices.import_dangling_index.json | 14 +- .../rest-api-spec/api/indices.add_block.json | 10 +- .../rest-api-spec/api/indices.clone.json | 14 +- .../rest-api-spec/api/indices.close.json | 10 +- .../rest-api-spec/api/indices.create.json | 10 +- .../api/indices.delete_alias.json | 10 +- .../rest-api-spec/api/indices.get.json | 10 +- .../api/indices.get_mapping.json | 10 +- .../api/indices.get_settings.json | 10 +- .../api/indices.get_template.json | 10 +- .../rest-api-spec/api/indices.put_alias.json | 10 +- .../api/indices.put_mapping.json | 10 +- .../api/indices.put_settings.json | 10 +- .../rest-api-spec/api/indices.rollover.json | 10 +- .../rest-api-spec/api/indices.shrink.json | 14 +- .../rest-api-spec/api/indices.split.json | 14 +- .../api/indices.update_aliases.json | 10 +- .../test/indices.clone/10_basic.yml | 6 + .../test/indices.clone/20_source_mapping.yml | 3 + .../test/indices.clone/30_copy_settings.yml | 4 +- .../test/indices.shrink/10_basic.yml | 5 + .../test/indices.shrink/20_source_mapping.yml | 4 +- .../test/indices.shrink/30_copy_settings.yml | 5 + .../test/indices.split/10_basic.yml | 16 ++ .../test/indices.split/20_source_mapping.yml | 5 + .../test/indices.split/30_copy_settings.yml | 6 +- .../RestDeleteDanglingIndexAction.java | 6 +- .../RestImportDanglingIndexAction.java | 6 +- .../indices/RestAddIndexBlockAction.java | 6 +- .../admin/indices/RestCloseIndexAction.java | 6 +- .../admin/indices/RestCreateIndexAction.java | 6 +- .../admin/indices/RestDeleteIndexAction.java | 6 +- .../admin/indices/RestGetIndicesAction.java | 6 +- .../admin/indices/RestGetMappingAction.java | 19 +- .../admin/indices/RestGetSettingsAction.java | 6 +- .../indices/RestIndexDeleteAliasesAction.java | 6 +- .../indices/RestIndexPutAliasAction.java | 6 +- .../indices/RestIndicesAliasesAction.java | 6 +- .../admin/indices/RestOpenIndexAction.java | 6 +- .../admin/indices/RestPutMappingAction.java | 6 +- .../admin/indices/RestResizeHandler.java | 3 +- .../indices/RestRolloverIndexAction.java | 3 +- .../indices/RestUpdateSettingsAction.java | 6 +- .../RenamedTimeoutRequestParameterTests.java | 172 ++++++++++++++++++ 46 files changed, 489 insertions(+), 50 deletions(-) diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/DanglingIndicesRestIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/DanglingIndicesRestIT.java index d5dcde2492046..3b32ac40917e4 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/DanglingIndicesRestIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/DanglingIndicesRestIT.java @@ -135,7 +135,7 @@ public void testDanglingIndicesCanBeImported() throws Exception { importRequest.addParameter("accept_data_loss", "true"); // Ensure this parameter is accepted importRequest.addParameter("timeout", "20s"); - importRequest.addParameter("master_timeout", "20s"); + importRequest.addParameter("cluster_manager_timeout", "20s"); final Response importResponse = restClient.performRequest(importRequest); assertThat(importResponse.getStatusLine().getStatusCode(), equalTo(ACCEPTED.getStatus())); @@ -171,7 +171,7 @@ public void testDanglingIndicesCanBeDeleted() throws Exception { deleteRequest.addParameter("accept_data_loss", "true"); // Ensure these parameters is accepted deleteRequest.addParameter("timeout", "20s"); - deleteRequest.addParameter("master_timeout", "20s"); + deleteRequest.addParameter("cluster_manager_timeout", "20s"); final Response deleteResponse = restClient.performRequest(deleteRequest); assertThat(deleteResponse.getStatusLine().getStatusCode(), equalTo(ACCEPTED.getStatus())); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.delete_dangling_index.json b/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.delete_dangling_index.json index 1e3d74784591b..5d832fc794f4f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.delete_dangling_index.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.delete_dangling_index.json @@ -30,9 +30,17 @@ "type": "time", "description": "Explicit operation timeout" }, - "master_timeout": { - "type": "time", - "description": "Specify timeout for connection to master" + "master_timeout":{ + "type":"time", + "description":"Specify timeout for connection to master", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.import_dangling_index.json b/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.import_dangling_index.json index e9dce01a76727..5b056e1fa145f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.import_dangling_index.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.import_dangling_index.json @@ -30,9 +30,17 @@ "type": "time", "description": "Explicit operation timeout" }, - "master_timeout": { - "type": "time", - "description": "Specify timeout for connection to master" + "master_timeout":{ + "type":"time", + "description":"Specify timeout for connection to master", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.add_block.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.add_block.json index 7389fb1322824..af10b9f50091f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.add_block.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.add_block.json @@ -32,7 +32,15 @@ }, "master_timeout":{ "type":"time", - "description":"Specify timeout for connection to master" + "description":"Specify timeout for connection to master", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" }, "ignore_unavailable":{ "type":"boolean", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clone.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clone.json index d3a249583bd84..b55d43371005f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clone.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clone.json @@ -31,9 +31,17 @@ "type" : "time", "description" : "Explicit operation timeout" }, - "master_timeout": { - "type" : "time", - "description" : "Specify timeout for connection to master" + "master_timeout":{ + "type":"time", + "description":"Specify timeout for connection to master", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" }, "wait_for_active_shards": { "type" : "string", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.close.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.close.json index f26c8e77a06a6..1182b73541f93 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.close.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.close.json @@ -28,7 +28,15 @@ }, "master_timeout":{ "type":"time", - "description":"Specify timeout for connection to master" + "description":"Specify timeout for connection to master", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" }, "ignore_unavailable":{ "type":"boolean", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json index 922183d628ac6..53ea4cbd80803 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json @@ -32,7 +32,15 @@ }, "master_timeout":{ "type":"time", - "description":"Specify timeout for connection to master" + "description":"Specify timeout for connection to master", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" } }, "body":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_alias.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_alias.json index 13abf70ca739b..049a397c6b3e2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_alias.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_alias.json @@ -48,7 +48,15 @@ }, "master_timeout":{ "type":"time", - "description":"Specify timeout for connection to master" + "description":"Specify timeout for connection to master", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json index 90a1274ecb059..0a43f6481d86d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json @@ -57,7 +57,15 @@ }, "master_timeout":{ "type":"time", - "description":"Specify timeout for connection to master" + "description":"Specify timeout for connection to master", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_mapping.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_mapping.json index 24fd668069697..321bfaba4f941 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_mapping.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_mapping.json @@ -50,7 +50,15 @@ }, "master_timeout":{ "type":"time", - "description":"Specify timeout for connection to master" + "description":"Specify timeout for connection to master", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" }, "local":{ "type":"boolean", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_settings.json index 68e325446d3dc..1bdaea01f87bf 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_settings.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_settings.json @@ -58,7 +58,15 @@ "params":{ "master_timeout":{ "type":"time", - "description":"Specify timeout for connection to master" + "description":"Specify timeout for connection to master", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" }, "ignore_unavailable":{ "type":"boolean", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_template.json index 337016763ad0a..04d2f846e6ac1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_template.json @@ -34,7 +34,15 @@ }, "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "local":{ "type":"boolean", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_alias.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_alias.json index 603f24b665eb7..00767afbaec04 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_alias.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_alias.json @@ -50,7 +50,15 @@ }, "master_timeout":{ "type":"time", - "description":"Specify timeout for connection to master" + "description":"Specify timeout for connection to master", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" } }, "body":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json index 451cbccd8d329..c8b63d4e1cee1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json @@ -29,7 +29,15 @@ }, "master_timeout":{ "type":"time", - "description":"Specify timeout for connection to master" + "description":"Specify timeout for connection to master", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" }, "ignore_unavailable":{ "type":"boolean", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json index 66fe23bab8ba2..ca245ec543da3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json @@ -30,7 +30,15 @@ "params":{ "master_timeout":{ "type":"time", - "description":"Specify timeout for connection to master" + "description":"Specify timeout for connection to master", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" }, "timeout":{ "type":"time", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json index fef1f03d1c9a7..303b7c7b03c19 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json @@ -48,7 +48,15 @@ }, "master_timeout":{ "type":"time", - "description":"Specify timeout for connection to master" + "description":"Specify timeout for connection to master", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" }, "wait_for_active_shards":{ "type":"string", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json index fd6d705d6a5fa..6bb09ee0019e1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json @@ -35,9 +35,17 @@ "type" : "time", "description" : "Explicit operation timeout" }, - "master_timeout": { - "type" : "time", - "description" : "Specify timeout for connection to master" + "master_timeout":{ + "type":"time", + "description":"Specify timeout for connection to master", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" }, "wait_for_active_shards": { "type" : "string", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json index 02df3cdedf01f..d1b5a28c9ff0f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json @@ -35,9 +35,17 @@ "type" : "time", "description" : "Explicit operation timeout" }, - "master_timeout": { - "type" : "time", - "description" : "Specify timeout for connection to master" + "master_timeout":{ + "type":"time", + "description":"Specify timeout for connection to master", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" }, "wait_for_active_shards": { "type" : "string", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.update_aliases.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.update_aliases.json index d4a222f2061c8..c31cb8fe59c0f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.update_aliases.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.update_aliases.json @@ -22,7 +22,15 @@ }, "master_timeout":{ "type":"time", - "description":"Specify timeout for connection to master" + "description":"Specify timeout for connection to master", + "deprecated":{ + "version":"2.0.0", + "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" } }, "body":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml index a4d1841ed7108..6488e4960e08f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml @@ -31,6 +31,7 @@ setup: - skip: version: " - 7.3.99" reason: index cloning was added in 7.4.0 + features: allowed_warnings # make it read-only - do: indices.put_settings: @@ -46,6 +47,8 @@ setup: # now we do the actual clone - do: + allowed_warnings: + - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." indices.clone: index: "source" target: "target" @@ -94,9 +97,12 @@ setup: - skip: version: " - 7.3.99" reason: index cloning was added in 7.4.0 + features: allowed_warnings # try to do an illegal clone with illegal number_of_shards - do: catch: /illegal_argument_exception/ + allowed_warnings: + - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." indices.clone: index: "source" target: "target" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/20_source_mapping.yml index 625f574fa73de..1a3074d091399 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/20_source_mapping.yml @@ -3,6 +3,7 @@ - skip: version: " - 7.3.99" reason: index cloning was added in 7.4.0 + features: allowed_warnings # create index - do: indices.create: @@ -50,6 +51,8 @@ # now we do the actual clone - do: + allowed_warnings: + - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." indices.clone: index: "source" target: "target" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/30_copy_settings.yml index 503cc15609072..467f5266122eb 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/30_copy_settings.yml @@ -3,7 +3,7 @@ - skip: version: " - 7.3.99" reason: index cloning was added in 7.4.0 - features: [arbitrary_key] + features: [arbitrary_key, allowed_warnings] - do: nodes.info: @@ -36,6 +36,8 @@ # now we do an actual clone and copy settings - do: + allowed_warnings: + - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." indices.clone: index: "source" target: "copy-settings-target" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml index a5b1cb8607b3a..4db7ca353334f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml @@ -7,6 +7,9 @@ # which node is the one with the highest version and that is the only one that can safely # be used to shrink the index. + - skip: + features: allowed_warnings + - do: nodes.info: node_id: data:true @@ -53,6 +56,8 @@ # now we do the actual shrink - do: + allowed_warnings: + - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." indices.shrink: index: "source" target: "target" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml index dec0760fc6b19..4ddf122d82691 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml @@ -3,7 +3,7 @@ - skip: version: " - 6.9.99" reason: expects warnings that pre-7.0.0 will not send - features: [warnings, arbitrary_key] + features: [warnings, arbitrary_key, allowed_warnings] - do: nodes.info: @@ -60,6 +60,8 @@ # now we do the actual shrink - do: + allowed_warnings: + - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." indices.shrink: index: "source" target: "target" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml index a744895c4ce38..112303a3a7298 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml @@ -47,6 +47,7 @@ index.merge.scheduler.max_thread_count: 2 allowed_warnings: - "parameter [copy_settings] is deprecated and will be removed in 8.0.0" + - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." - do: cluster.health: @@ -64,6 +65,8 @@ # now we do a actual shrink and copy settings (by default) - do: + allowed_warnings: + - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." indices.shrink: index: "source" target: "default-copy-settings-target" @@ -91,6 +94,8 @@ # now we do a actual shrink and try to set no copy settings - do: catch: /illegal_argument_exception/ + allowed_warnings: + - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." indices.shrink: index: "source" target: "explicit-no-copy-settings-target" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml index 4ae1d0002a237..01781e35b9ae9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml @@ -29,6 +29,9 @@ setup: --- "Split index via API": + - skip: + features: allowed_warnings + # make it read-only - do: indices.put_settings: @@ -44,6 +47,8 @@ setup: # now we do the actual split - do: + allowed_warnings: + - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." indices.split: index: "source" target: "target" @@ -90,6 +95,8 @@ setup: --- "Split from 1 to N": + - skip: + features: allowed_warnings - do: indices.create: index: source_one_shard @@ -131,6 +138,8 @@ setup: # now we do the actual split from 1 to 5 - do: + allowed_warnings: + - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." indices.split: index: "source_one_shard" target: "target" @@ -176,9 +185,14 @@ setup: --- "Create illegal split indices": + - skip: + features: allowed_warnings + # try to do an illegal split with number_of_routing_shards set - do: catch: /illegal_argument_exception/ + allowed_warnings: + - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." indices.split: index: "source" target: "target" @@ -193,6 +207,8 @@ setup: # try to do an illegal split with illegal number_of_shards - do: catch: /illegal_state_exception/ + allowed_warnings: + - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." indices.split: index: "source" target: "target" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml index c86e49aac0561..0baae30238013 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml @@ -1,5 +1,8 @@ --- "Split index ignores target template mapping": + - skip: + features: allowed_warnings + # create index - do: indices.create: @@ -48,6 +51,8 @@ # now we do the actual split - do: + allowed_warnings: + - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." indices.split: index: "source" target: "target" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml index 0ceacf1f064ca..ace49ff6dd917 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml @@ -49,7 +49,7 @@ index.merge.scheduler.max_thread_count: 2 allowed_warnings: - "parameter [copy_settings] is deprecated and will be removed in 8.0.0" - + - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." - do: cluster.health: @@ -67,6 +67,8 @@ # now we do a actual shrink and copy settings (by default) - do: + allowed_warnings: + - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." indices.split: index: "source" target: "default-copy-settings-target" @@ -94,6 +96,8 @@ - do: catch: /illegal_argument_exception/ + allowed_warnings: + - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." indices.split: index: "source" target: "explicit-no-copy-settings-target" diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java index 8024d97743cc8..3e8883058d18e 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java @@ -35,6 +35,7 @@ import org.opensearch.action.admin.indices.dangling.delete.DeleteDanglingIndexRequest; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestStatus; @@ -49,6 +50,8 @@ public class RestDeleteDanglingIndexAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestDeleteDanglingIndexAction.class); + @Override public List routes() { return singletonList(new Route(DELETE, "/_dangling/{index_uuid}")); @@ -67,7 +70,8 @@ public RestChannelConsumer prepareRequest(final RestRequest request, NodeClient ); deleteRequest.timeout(request.paramAsTime("timeout", deleteRequest.timeout())); - deleteRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteRequest.masterNodeTimeout())); + deleteRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", deleteRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(deleteRequest, request, deprecationLogger, getName()); return channel -> client.admin() .cluster() diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java index b7ba7361c2980..5a48159bd9651 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java @@ -42,12 +42,15 @@ import org.opensearch.action.admin.indices.dangling.import_index.ImportDanglingIndexRequest; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestStatus; import org.opensearch.rest.action.RestToXContentListener; public class RestImportDanglingIndexAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestImportDanglingIndexAction.class); + @Override public List routes() { return singletonList(new Route(POST, "/_dangling/{index_uuid}")); @@ -66,7 +69,8 @@ public RestChannelConsumer prepareRequest(final RestRequest request, NodeClient ); importRequest.timeout(request.paramAsTime("timeout", importRequest.timeout())); - importRequest.masterNodeTimeout(request.paramAsTime("master_timeout", importRequest.masterNodeTimeout())); + importRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", importRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(importRequest, request, deprecationLogger, getName()); return channel -> client.admin() .cluster() diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestAddIndexBlockAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestAddIndexBlockAction.java index 89faeb1b7c7d9..6854662e3bb18 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestAddIndexBlockAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestAddIndexBlockAction.java @@ -37,6 +37,7 @@ import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Strings; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; @@ -49,6 +50,8 @@ public class RestAddIndexBlockAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestAddIndexBlockAction.class); + @Override public List routes() { return Collections.singletonList(new Route(PUT, "/{index}/_block/{block}")); @@ -65,7 +68,8 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC IndexMetadata.APIBlock.fromName(request.param("block")), Strings.splitStringByCommaToArray(request.param("index")) ); - addIndexBlockRequest.masterNodeTimeout(request.paramAsTime("master_timeout", addIndexBlockRequest.masterNodeTimeout())); + addIndexBlockRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", addIndexBlockRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(addIndexBlockRequest, request, deprecationLogger, getName()); addIndexBlockRequest.timeout(request.paramAsTime("timeout", addIndexBlockRequest.timeout())); addIndexBlockRequest.indicesOptions(IndicesOptions.fromRequest(request, addIndexBlockRequest.indicesOptions())); return channel -> client.admin().indices().addBlock(addIndexBlockRequest, new RestToXContentListener<>(channel)); diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestCloseIndexAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestCloseIndexAction.java index 8da0ec3c5a349..168d553112fe1 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestCloseIndexAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestCloseIndexAction.java @@ -37,6 +37,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.node.NodeClient; import org.opensearch.common.Strings; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; @@ -50,6 +51,8 @@ public class RestCloseIndexAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestCloseIndexAction.class); + @Override public List routes() { return unmodifiableList(asList(new Route(POST, "/_close"), new Route(POST, "/{index}/_close"))); @@ -63,7 +66,8 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { CloseIndexRequest closeIndexRequest = new CloseIndexRequest(Strings.splitStringByCommaToArray(request.param("index"))); - closeIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", closeIndexRequest.masterNodeTimeout())); + closeIndexRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", closeIndexRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(closeIndexRequest, request, deprecationLogger, getName()); closeIndexRequest.timeout(request.paramAsTime("timeout", closeIndexRequest.timeout())); closeIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, closeIndexRequest.indicesOptions())); String waitForActiveShards = request.param("wait_for_active_shards"); diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestCreateIndexAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestCreateIndexAction.java index 5b628bc094c41..54199b8e68516 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestCreateIndexAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestCreateIndexAction.java @@ -35,6 +35,7 @@ import org.opensearch.action.admin.indices.create.CreateIndexRequest; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.index.mapper.MapperService; @@ -53,6 +54,8 @@ public class RestCreateIndexAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestIndexPutAliasAction.class); + @Override public List routes() { return singletonList(new Route(PUT, "/{index}")); @@ -74,7 +77,8 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC } createIndexRequest.timeout(request.paramAsTime("timeout", createIndexRequest.timeout())); - createIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", createIndexRequest.masterNodeTimeout())); + createIndexRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", createIndexRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(createIndexRequest, request, deprecationLogger, getName()); createIndexRequest.waitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); return channel -> client.admin().indices().create(createIndexRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestDeleteIndexAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestDeleteIndexAction.java index e8b5caf8f234a..63fa8a4b29d85 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestDeleteIndexAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestDeleteIndexAction.java @@ -36,6 +36,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.node.NodeClient; import org.opensearch.common.Strings; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; @@ -49,6 +50,8 @@ public class RestDeleteIndexAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestDeleteIndexAction.class); + @Override public List routes() { return unmodifiableList(asList(new Route(DELETE, "/"), new Route(DELETE, "/{index}"))); @@ -63,7 +66,8 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(Strings.splitStringByCommaToArray(request.param("index"))); deleteIndexRequest.timeout(request.paramAsTime("timeout", deleteIndexRequest.timeout())); - deleteIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteIndexRequest.masterNodeTimeout())); + deleteIndexRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", deleteIndexRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(deleteIndexRequest, request, deprecationLogger, getName()); deleteIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, deleteIndexRequest.indicesOptions())); return channel -> client.admin().indices().delete(deleteIndexRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetIndicesAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetIndicesAction.java index 37c8162c6d31b..c94d691d4e99b 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetIndicesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetIndicesAction.java @@ -36,6 +36,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.node.NodeClient; import org.opensearch.common.Strings; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; @@ -55,6 +56,8 @@ */ public class RestGetIndicesAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestGetIndicesAction.class); + @Override public List routes() { return unmodifiableList(asList(new Route(GET, "/{index}"), new Route(HEAD, "/{index}"))); @@ -72,7 +75,8 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC getIndexRequest.indices(indices); getIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, getIndexRequest.indicesOptions())); getIndexRequest.local(request.paramAsBoolean("local", getIndexRequest.local())); - getIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getIndexRequest.masterNodeTimeout())); + getIndexRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", getIndexRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(getIndexRequest, request, deprecationLogger, getName()); getIndexRequest.humanReadable(request.paramAsBoolean("human", false)); getIndexRequest.includeDefaults(request.paramAsBoolean("include_defaults", false)); return channel -> client.admin().indices().getIndex(getIndexRequest, new RestToXContentListener<>(channel)); diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetMappingAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetMappingAction.java index f4f33905408e7..62ea315fd89a0 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetMappingAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetMappingAction.java @@ -32,6 +32,7 @@ package org.opensearch.rest.action.admin.indices; +import org.opensearch.OpenSearchParseException; import org.opensearch.OpenSearchTimeoutException; import org.opensearch.action.ActionRunnable; import org.opensearch.action.admin.indices.mapping.get.GetMappingsRequest; @@ -39,6 +40,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.node.NodeClient; import org.opensearch.common.Strings; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.rest.BaseRestHandler; @@ -59,6 +61,12 @@ public class RestGetMappingAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestGetMappingAction.class); + private static final String MASTER_TIMEOUT_DEPRECATED_MESSAGE = + "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version."; + private static final String DUPLICATE_PARAMETER_ERROR_MESSAGE = + "Please only use one of the request parameters [master_timeout, cluster_manager_timeout]."; + private final ThreadPool threadPool; public RestGetMappingAction(ThreadPool threadPool) { @@ -89,7 +97,16 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final GetMappingsRequest getMappingsRequest = new GetMappingsRequest(); getMappingsRequest.indices(indices); getMappingsRequest.indicesOptions(IndicesOptions.fromRequest(request, getMappingsRequest.indicesOptions())); - final TimeValue timeout = request.paramAsTime("master_timeout", getMappingsRequest.masterNodeTimeout()); + TimeValue clusterManagerTimeout = request.paramAsTime("cluster_manager_timeout", getMappingsRequest.masterNodeTimeout()); + // TODO: Remove the if condition and statements inside after removing MASTER_ROLE. + if (request.hasParam("master_timeout")) { + deprecationLogger.deprecate("get_mapping_master_timeout_parameter", MASTER_TIMEOUT_DEPRECATED_MESSAGE); + if (request.hasParam("cluster_manager_timeout")) { + throw new OpenSearchParseException(DUPLICATE_PARAMETER_ERROR_MESSAGE); + } + clusterManagerTimeout = request.paramAsTime("master_timeout", getMappingsRequest.masterNodeTimeout()); + } + final TimeValue timeout = clusterManagerTimeout; getMappingsRequest.masterNodeTimeout(timeout); getMappingsRequest.local(request.paramAsBoolean("local", getMappingsRequest.local())); return channel -> client.admin().indices().getMappings(getMappingsRequest, new RestActionListener(channel) { diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetSettingsAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetSettingsAction.java index 589bdfdbe79dd..293078b3568e3 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetSettingsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetSettingsAction.java @@ -36,6 +36,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.node.NodeClient; import org.opensearch.common.Strings; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; @@ -49,6 +50,8 @@ public class RestGetSettingsAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestGetSettingsAction.class); + @Override public List routes() { return unmodifiableList( @@ -79,7 +82,8 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC .includeDefaults(renderDefaults) .names(names); getSettingsRequest.local(request.paramAsBoolean("local", getSettingsRequest.local())); - getSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getSettingsRequest.masterNodeTimeout())); + getSettingsRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", getSettingsRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(getSettingsRequest, request, deprecationLogger, getName()); return channel -> client.admin().indices().getSettings(getSettingsRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestIndexDeleteAliasesAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestIndexDeleteAliasesAction.java index 6a8098dfaf633..9f0b543a456f2 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestIndexDeleteAliasesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestIndexDeleteAliasesAction.java @@ -35,6 +35,7 @@ import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.opensearch.client.node.NodeClient; import org.opensearch.common.Strings; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; @@ -48,6 +49,8 @@ public class RestIndexDeleteAliasesAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestIndexPutAliasAction.class); + @Override public List routes() { return unmodifiableList(asList(new Route(DELETE, "/{index}/_alias/{name}"), new Route(DELETE, "/{index}/_aliases/{name}"))); @@ -65,7 +68,8 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); indicesAliasesRequest.timeout(request.paramAsTime("timeout", indicesAliasesRequest.timeout())); indicesAliasesRequest.addAliasAction(AliasActions.remove().indices(indices).aliases(aliases)); - indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", indicesAliasesRequest.masterNodeTimeout())); + indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", indicesAliasesRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(indicesAliasesRequest, request, deprecationLogger, getName()); return channel -> client.admin().indices().aliases(indicesAliasesRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestIndexPutAliasAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestIndexPutAliasAction.java index cc31dc1117946..8f8eeaf1c8e1c 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestIndexPutAliasAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestIndexPutAliasAction.java @@ -35,6 +35,7 @@ import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.opensearch.client.node.NodeClient; import org.opensearch.common.Strings; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; @@ -51,6 +52,8 @@ public class RestIndexPutAliasAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestIndexPutAliasAction.class); + @Override public List routes() { return unmodifiableList( @@ -124,7 +127,8 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); indicesAliasesRequest.timeout(request.paramAsTime("timeout", indicesAliasesRequest.timeout())); - indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", indicesAliasesRequest.masterNodeTimeout())); + indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", indicesAliasesRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(indicesAliasesRequest, request, deprecationLogger, getName()); IndicesAliasesRequest.AliasActions aliasAction = AliasActions.add().indices(indices).alias(alias); if (routing != null) { diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestIndicesAliasesAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestIndicesAliasesAction.java index 138343a2e7335..20b67fa73a3f5 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestIndicesAliasesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestIndicesAliasesAction.java @@ -34,6 +34,7 @@ import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; @@ -47,6 +48,8 @@ public class RestIndicesAliasesAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestIndicesAliasesAction.class); + @Override public String getName() { return "indices_aliases_action"; @@ -60,7 +63,8 @@ public List routes() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); - indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", indicesAliasesRequest.masterNodeTimeout())); + indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", indicesAliasesRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(indicesAliasesRequest, request, deprecationLogger, getName()); indicesAliasesRequest.timeout(request.paramAsTime("timeout", indicesAliasesRequest.timeout())); try (XContentParser parser = request.contentParser()) { IndicesAliasesRequest.PARSER.parse(parser, indicesAliasesRequest, null); diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestOpenIndexAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestOpenIndexAction.java index 4b6450c35233f..a6d434b1e865e 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestOpenIndexAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestOpenIndexAction.java @@ -37,6 +37,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.node.NodeClient; import org.opensearch.common.Strings; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; @@ -50,6 +51,8 @@ public class RestOpenIndexAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestOpenIndexAction.class); + @Override public List routes() { return unmodifiableList(asList(new Route(POST, "/_open"), new Route(POST, "/{index}/_open"))); @@ -64,7 +67,8 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { OpenIndexRequest openIndexRequest = new OpenIndexRequest(Strings.splitStringByCommaToArray(request.param("index"))); openIndexRequest.timeout(request.paramAsTime("timeout", openIndexRequest.timeout())); - openIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", openIndexRequest.masterNodeTimeout())); + openIndexRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", openIndexRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(openIndexRequest, request, deprecationLogger, getName()); openIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, openIndexRequest.indicesOptions())); String waitForActiveShards = request.param("wait_for_active_shards"); if (waitForActiveShards != null) { diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutMappingAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutMappingAction.java index f65dea1ebe3d2..5c305b7c75038 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutMappingAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutMappingAction.java @@ -36,6 +36,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.node.NodeClient; import org.opensearch.common.Strings; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.index.mapper.MapperService; import org.opensearch.rest.BaseRestHandler; @@ -54,6 +55,8 @@ public class RestPutMappingAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestPutMappingAction.class); + @Override public List routes() { return unmodifiableList( @@ -83,7 +86,8 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC putMappingRequest.source(sourceAsMap); putMappingRequest.timeout(request.paramAsTime("timeout", putMappingRequest.timeout())); - putMappingRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putMappingRequest.masterNodeTimeout())); + putMappingRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", putMappingRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(putMappingRequest, request, deprecationLogger, getName()); putMappingRequest.indicesOptions(IndicesOptions.fromRequest(request, putMappingRequest.indicesOptions())); putMappingRequest.writeIndexOnly(request.paramAsBoolean("write_index_only", false)); return channel -> client.admin().indices().putMapping(putMappingRequest, new RestToXContentListener<>(channel)); diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestResizeHandler.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestResizeHandler.java index a0d479890f2d0..4168c7ad48e7c 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestResizeHandler.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestResizeHandler.java @@ -91,7 +91,8 @@ public final RestChannelConsumer prepareRequest(final RestRequest request, final resizeRequest.setCopySettings(copySettings); request.applyContentParser(resizeRequest::fromXContent); resizeRequest.timeout(request.paramAsTime("timeout", resizeRequest.timeout())); - resizeRequest.masterNodeTimeout(request.paramAsTime("master_timeout", resizeRequest.masterNodeTimeout())); + resizeRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", resizeRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(resizeRequest, request, deprecationLogger, getName()); resizeRequest.setWaitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); return channel -> client.admin().indices().resizeIndex(resizeRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestRolloverIndexAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestRolloverIndexAction.java index 08b84cc6fe6cc..b9f8b936ff23e 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestRolloverIndexAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestRolloverIndexAction.java @@ -72,7 +72,8 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC request.applyContentParser(parser -> rolloverIndexRequest.fromXContent(parser)); rolloverIndexRequest.dryRun(request.paramAsBoolean("dry_run", false)); rolloverIndexRequest.timeout(request.paramAsTime("timeout", rolloverIndexRequest.timeout())); - rolloverIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", rolloverIndexRequest.masterNodeTimeout())); + rolloverIndexRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", rolloverIndexRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(rolloverIndexRequest, request, deprecationLogger, getName()); rolloverIndexRequest.getCreateIndexRequest() .waitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); return channel -> client.admin().indices().rolloverIndex(rolloverIndexRequest, new RestToXContentListener<>(channel)); diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestUpdateSettingsAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestUpdateSettingsAction.java index 8356901dbc7ab..bfb634d421f2d 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestUpdateSettingsAction.java @@ -36,6 +36,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.node.NodeClient; import org.opensearch.common.Strings; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; @@ -52,6 +53,8 @@ public class RestUpdateSettingsAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestUpdateSettingsAction.class); + @Override public List routes() { return unmodifiableList(asList(new Route(PUT, "/{index}/_settings"), new Route(PUT, "/_settings"))); @@ -67,7 +70,8 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC UpdateSettingsRequest updateSettingsRequest = updateSettingsRequest(Strings.splitStringByCommaToArray(request.param("index"))); updateSettingsRequest.timeout(request.paramAsTime("timeout", updateSettingsRequest.timeout())); updateSettingsRequest.setPreserveExisting(request.paramAsBoolean("preserve_existing", updateSettingsRequest.isPreserveExisting())); - updateSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", updateSettingsRequest.masterNodeTimeout())); + updateSettingsRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", updateSettingsRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(updateSettingsRequest, request, deprecationLogger, getName()); updateSettingsRequest.indicesOptions(IndicesOptions.fromRequest(request, updateSettingsRequest.indicesOptions())); updateSettingsRequest.fromXContent(request.contentParser()); diff --git a/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java b/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java index 8bc9afc152382..df93d5c16d8e0 100644 --- a/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java +++ b/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java @@ -24,6 +24,23 @@ import org.opensearch.rest.action.admin.cluster.RestClusterRerouteAction; import org.opensearch.rest.action.admin.cluster.RestClusterStateAction; import org.opensearch.rest.action.admin.cluster.RestClusterUpdateSettingsAction; +import org.opensearch.rest.action.admin.cluster.dangling.RestDeleteDanglingIndexAction; +import org.opensearch.rest.action.admin.cluster.dangling.RestImportDanglingIndexAction; +import org.opensearch.rest.action.admin.indices.RestAddIndexBlockAction; +import org.opensearch.rest.action.admin.indices.RestCloseIndexAction; +import org.opensearch.rest.action.admin.indices.RestCreateIndexAction; +import org.opensearch.rest.action.admin.indices.RestDeleteIndexAction; +import org.opensearch.rest.action.admin.indices.RestGetIndicesAction; +import org.opensearch.rest.action.admin.indices.RestGetMappingAction; +import org.opensearch.rest.action.admin.indices.RestGetSettingsAction; +import org.opensearch.rest.action.admin.indices.RestIndexDeleteAliasesAction; +import org.opensearch.rest.action.admin.indices.RestIndexPutAliasAction; +import org.opensearch.rest.action.admin.indices.RestIndicesAliasesAction; +import org.opensearch.rest.action.admin.indices.RestOpenIndexAction; +import org.opensearch.rest.action.admin.indices.RestPutMappingAction; +import org.opensearch.rest.action.admin.indices.RestResizeHandler; +import org.opensearch.rest.action.admin.indices.RestRolloverIndexAction; +import org.opensearch.rest.action.admin.indices.RestUpdateSettingsAction; import org.opensearch.rest.action.cat.RestAllocationAction; import org.opensearch.rest.action.cat.RestRepositoriesAction; import org.opensearch.rest.action.cat.RestThreadPoolAction; @@ -45,6 +62,7 @@ import java.util.Collections; import static org.hamcrest.Matchers.containsString; +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_READ_ONLY_SETTING; /** * As of 2.0, the request parameter 'master_timeout' in all applicable REST APIs is deprecated, @@ -253,6 +271,160 @@ public void testClusterPendingTasks() { assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); } + public void testAddIndexBlock() { + FakeRestRequest request = new FakeRestRequest(); + request.params().put("cluster_manager_timeout", "1h"); + request.params().put("master_timeout", "3s"); + request.params().put("block", "metadata"); + NodeClient client = new NodeClient(Settings.builder().put(INDEX_READ_ONLY_SETTING.getKey(), Boolean.FALSE).build(), threadPool); + RestAddIndexBlockAction action = new RestAddIndexBlockAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(request, client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testCloseIndex() { + RestCloseIndexAction action = new RestCloseIndexAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testCreateIndex() { + RestCreateIndexAction action = new RestCreateIndexAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testDeleteIndex() { + RestDeleteIndexAction action = new RestDeleteIndexAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testGetIndices() { + RestGetIndicesAction action = new RestGetIndicesAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testGetMapping() { + RestGetMappingAction action = new RestGetMappingAction(threadPool); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testGetSettings() { + RestGetSettingsAction action = new RestGetSettingsAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testIndexDeleteAliases() { + FakeRestRequest request = new FakeRestRequest(); + request.params().put("cluster_manager_timeout", "1h"); + request.params().put("master_timeout", "3s"); + request.params().put("name", "*"); + request.params().put("index", "test"); + RestIndexDeleteAliasesAction action = new RestIndexDeleteAliasesAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(request, client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testIndexPutAlias() { + RestIndexPutAliasAction action = new RestIndexPutAliasAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testIndicesAliases() { + RestIndicesAliasesAction action = new RestIndicesAliasesAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testOpenIndex() { + RestOpenIndexAction action = new RestOpenIndexAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testPutMapping() { + RestPutMappingAction action = new RestPutMappingAction(); + Exception e = assertThrows( + OpenSearchParseException.class, + () -> action.prepareRequest(getRestRequestWithBodyWithBothParams(), client) + ); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testShrinkIndex() { + RestResizeHandler.RestShrinkIndexAction action = new RestResizeHandler.RestShrinkIndexAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testSplitIndex() { + RestResizeHandler.RestSplitIndexAction action = new RestResizeHandler.RestSplitIndexAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testCloneIndex() { + RestResizeHandler.RestCloneIndexAction action = new RestResizeHandler.RestCloneIndexAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testRolloverIndex() { + RestRolloverIndexAction action = new RestRolloverIndexAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testUpdateSettings() { + RestUpdateSettingsAction action = new RestUpdateSettingsAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testDeleteDanglingIndex() { + FakeRestRequest request = new FakeRestRequest(); + request.params().put("cluster_manager_timeout", "1h"); + request.params().put("master_timeout", "3s"); + request.params().put("index_uuid", "test"); + RestDeleteDanglingIndexAction action = new RestDeleteDanglingIndexAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(request, client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testImportDanglingIndex() { + FakeRestRequest request = new FakeRestRequest(); + request.params().put("cluster_manager_timeout", "1h"); + request.params().put("master_timeout", "3s"); + request.params().put("index_uuid", "test"); + RestImportDanglingIndexAction action = new RestImportDanglingIndexAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(request, client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + private MasterNodeRequest getMasterNodeRequest() { return new MasterNodeRequest() { @Override From 1dbeda0f325960a4562f39a75b3e50d6b468edbf Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 5 Apr 2022 18:49:42 -0400 Subject: [PATCH 052/653] Update azure-storage-blob to 12.15.0 (#2774) Signed-off-by: Andriy Redko --- plugins/repository-azure/build.gradle | 2 +- .../licenses/azure-storage-blob-12.14.4.jar.sha1 | 1 - .../licenses/azure-storage-blob-12.15.0.jar.sha1 | 1 + .../repositories/azure/AzureBlobContainerRetriesTests.java | 7 ++++++- .../src/main/java/fixture/azure/AzureHttpHandler.java | 4 +++- 5 files changed, 11 insertions(+), 4 deletions(-) delete mode 100644 plugins/repository-azure/licenses/azure-storage-blob-12.14.4.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-storage-blob-12.15.0.jar.sha1 diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 648c045d97d01..040a29750b967 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -54,7 +54,7 @@ dependencies { api "io.netty:netty-resolver-dns:${versions.netty}" api "io.netty:netty-transport-native-unix-common:${versions.netty}" implementation project(':modules:transport-netty4') - api 'com.azure:azure-storage-blob:12.14.4' + api 'com.azure:azure-storage-blob:12.15.0' api 'org.reactivestreams:reactive-streams:1.0.3' api 'io.projectreactor:reactor-core:3.4.15' api 'io.projectreactor.netty:reactor-netty:1.0.17' diff --git a/plugins/repository-azure/licenses/azure-storage-blob-12.14.4.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-blob-12.14.4.jar.sha1 deleted file mode 100644 index 5333f8fa90ada..0000000000000 --- a/plugins/repository-azure/licenses/azure-storage-blob-12.14.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2b92020693d09e4980b96d278e8038a1087afea0 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-blob-12.15.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-blob-12.15.0.jar.sha1 new file mode 100644 index 0000000000000..513cb017f798d --- /dev/null +++ b/plugins/repository-azure/licenses/azure-storage-blob-12.15.0.jar.sha1 @@ -0,0 +1 @@ +a53a6bdf7564f4e3a7b0b93cd96b7f5f95c03d36 \ No newline at end of file diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java index c9e6e299c7120..c973cb325b658 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java @@ -231,6 +231,8 @@ public void testReadBlobWithRetries() throws Exception { exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); exchange.getResponseHeaders().add("Content-Length", String.valueOf(length)); exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob"); + exchange.getResponseHeaders() + .add("Content-Range", String.format("bytes %d-%d/%d", rangeStart, bytes.length, bytes.length)); exchange.sendResponseHeaders(RestStatus.OK.getStatus(), length); exchange.getResponseBody().write(bytes, rangeStart, length); return; @@ -247,7 +249,8 @@ public void testReadBlobWithRetries() throws Exception { final BlobContainer blobContainer = createBlobContainer(maxRetries); try (InputStream inputStream = blobContainer.readBlob("read_blob_max_retries")) { assertArrayEquals(bytes, BytesReference.toBytes(Streams.readFully(inputStream))); - assertThat(countDownHead.isCountedDown(), is(true)); + // No more getProperties() calls in BlobClientBase::openInputStream(), HEAD should not be invoked + assertThat(countDownHead.isCountedDown(), is(false)); assertThat(countDownGet.isCountedDown(), is(true)); } } @@ -278,6 +281,8 @@ public void testReadRangeBlobWithRetries() throws Exception { assertThat(length, lessThanOrEqualTo(bytes.length - rangeStart)); exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); exchange.getResponseHeaders().add("Content-Length", String.valueOf(length)); + exchange.getResponseHeaders() + .add("Content-Range", String.format("bytes %d-%d/%d", rangeStart, rangeEnd.get(), bytes.length)); exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob"); exchange.sendResponseHeaders(RestStatus.OK.getStatus(), length); exchange.getResponseBody().write(bytes, rangeStart, length); diff --git a/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java b/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java index f12a4579a2d0c..8389bd839d165 100644 --- a/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java +++ b/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java @@ -150,12 +150,14 @@ public void handle(final HttpExchange exchange) throws IOException { } final int start = Integer.parseInt(matcher.group(1)); - final int length = Integer.parseInt(matcher.group(2)) - start + 1; + final int end = Integer.parseInt(matcher.group(2)); + final int length = Math.min(end - start + 1, blob.length()); exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); exchange.getResponseHeaders().add("Content-Length", String.valueOf(length)); exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob"); exchange.getResponseHeaders().add("x-ms-request-server-encrypted", "false"); + exchange.getResponseHeaders().add("Content-Range", String.format("bytes %d-%d/%d", start, Math.min(end, length), length)); exchange.sendResponseHeaders(RestStatus.OK.getStatus(), length); exchange.getResponseBody().write(blob.toBytesRef().bytes, start, length); From ed040e9f1a36abc23b7605cc47b48bb57a569c04 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Tue, 5 Apr 2022 17:48:39 -0700 Subject: [PATCH 053/653] Replace blacklist in Gradle build environment configuration (#2752) - Replace `blacklist` with `denylist` in all `tests.rest.blacklist` and `REST_TESTS_BLACKLIST` Signed-off-by: Tianli Feng --- TESTING.md | 2 +- plugins/repository-s3/build.gradle | 6 +++--- .../test/junit/listeners/ReproduceInfoPrinter.java | 2 +- .../rest/yaml/OpenSearchClientYamlSuiteTestCase.java | 12 ++++++------ 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/TESTING.md b/TESTING.md index 4a2a786469b67..d6f246dbd6dcc 100644 --- a/TESTING.md +++ b/TESTING.md @@ -245,7 +245,7 @@ The YAML REST tests support all the options provided by the randomized runner, p - `tests.rest.suite`: comma separated paths of the test suites to be run (by default loaded from /rest-api-spec/test). It is possible to run only a subset of the tests providing a sub-folder or even a single yaml file (the default /rest-api-spec/test prefix is optional when files are loaded from classpath) e.g. `-Dtests.rest.suite=index,get,create/10_with_id` -- `tests.rest.blacklist`: comma separated globs that identify tests that are denylisted and need to be skipped e.g. `-Dtests.rest.blacklist=index/**/Index document,get/10_basic/**` +- `tests.rest.denylist`: comma separated globs that identify tests that are denylisted and need to be skipped e.g. `-Dtests.rest.denylist=index/**/Index document,get/10_basic/**` Java REST tests can be run with the "javaRestTest" task. diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 072683e3bd5e5..33448b0039ce2 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -190,7 +190,7 @@ internalClusterTest { } yamlRestTest { - systemProperty 'tests.rest.blacklist', ( + systemProperty 'tests.rest.denylist', ( useFixture ? ['repository_s3/50_repository_ecs_credentials/*'] : @@ -246,7 +246,7 @@ if (useFixture) { setClasspath(yamlRestTestSourceSet.getRuntimeClasspath()) // Minio only supports a single access key, see https://github.com/minio/minio/pull/5968 - systemProperty 'tests.rest.blacklist', [ + systemProperty 'tests.rest.denylist', [ 'repository_s3/30_repository_temporary_credentials/*', 'repository_s3/40_repository_ec2_credentials/*', 'repository_s3/50_repository_ecs_credentials/*' @@ -272,7 +272,7 @@ if (useFixture) { SourceSet yamlRestTestSourceSet = sourceSets.getByName(YamlRestTestPlugin.SOURCE_SET_NAME) setTestClassesDirs(yamlRestTestSourceSet.getOutput().getClassesDirs()) setClasspath(yamlRestTestSourceSet.getRuntimeClasspath()) - systemProperty 'tests.rest.blacklist', [ + systemProperty 'tests.rest.denylist', [ 'repository_s3/10_basic/*', 'repository_s3/20_repository_permanent_credentials/*', 'repository_s3/30_repository_temporary_credentials/*', diff --git a/test/framework/src/main/java/org/opensearch/test/junit/listeners/ReproduceInfoPrinter.java b/test/framework/src/main/java/org/opensearch/test/junit/listeners/ReproduceInfoPrinter.java index 668526d9d6d0d..3d5a906e50836 100644 --- a/test/framework/src/main/java/org/opensearch/test/junit/listeners/ReproduceInfoPrinter.java +++ b/test/framework/src/main/java/org/opensearch/test/junit/listeners/ReproduceInfoPrinter.java @@ -200,7 +200,7 @@ private ReproduceErrorMessageBuilder appendESProperties() { public ReproduceErrorMessageBuilder appendClientYamlSuiteProperties() { return appendProperties( OpenSearchClientYamlSuiteTestCase.REST_TESTS_SUITE, - OpenSearchClientYamlSuiteTestCase.REST_TESTS_BLACKLIST + OpenSearchClientYamlSuiteTestCase.REST_TESTS_DENYLIST ); } diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java index 5a404ccd4b9fc..70e3adbefbfc3 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java @@ -89,14 +89,14 @@ public abstract class OpenSearchClientYamlSuiteTestCase extends OpenSearchRestTe public static final String REST_TESTS_SUITE = "tests.rest.suite"; /** * Property that allows to denylist some of the REST tests based on a comma separated list of globs - * e.g. "-Dtests.rest.blacklist=get/10_basic/*" + * e.g. "-Dtests.rest.denylist=get/10_basic/*" */ - public static final String REST_TESTS_BLACKLIST = "tests.rest.blacklist"; + public static final String REST_TESTS_DENYLIST = "tests.rest.denylist"; /** - * We use tests.rest.blacklist in build files to denylist tests; this property enables a user to add additional denylisted tests on + * We use tests.rest.denylist in build files to denylist tests; this property enables a user to add additional denylisted tests on * top of the tests denylisted in the build. */ - public static final String REST_TESTS_BLACKLIST_ADDITIONS = "tests.rest.blacklist_additions"; + public static final String REST_TESTS_DENYLIST_ADDITIONS = "tests.rest.denylist_additions"; /** * Property that allows to control whether spec validation is enabled or not (default true). */ @@ -154,12 +154,12 @@ public void initAndResetContext() throws Exception { clientYamlTestClient = initClientYamlTestClient(restSpec, client(), hosts, minVersion, masterVersion); restTestExecutionContext = new ClientYamlTestExecutionContext(clientYamlTestClient, randomizeContentType()); adminExecutionContext = new ClientYamlTestExecutionContext(clientYamlTestClient, false); - final String[] denylist = resolvePathsProperty(REST_TESTS_BLACKLIST, null); + final String[] denylist = resolvePathsProperty(REST_TESTS_DENYLIST, null); denylistPathMatchers = new ArrayList<>(); for (final String entry : denylist) { denylistPathMatchers.add(new BlacklistedPathPatternMatcher(entry)); } - final String[] denylistAdditions = resolvePathsProperty(REST_TESTS_BLACKLIST_ADDITIONS, null); + final String[] denylistAdditions = resolvePathsProperty(REST_TESTS_DENYLIST_ADDITIONS, null); for (final String entry : denylistAdditions) { denylistPathMatchers.add(new BlacklistedPathPatternMatcher(entry)); } From ce5c55dbbc58f9309662e2919834ed54358d28cb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Apr 2022 10:33:37 -0400 Subject: [PATCH 054/653] Bump geoip2 from 2.16.1 to 3.0.1 in /modules/ingest-geoip (#2646) * Bump geoip2 from 2.16.1 to 3.0.1 in /modules/ingest-geoip Bumps [geoip2](https://github.com/maxmind/GeoIP2-java) from 2.16.1 to 3.0.1. - [Release notes](https://github.com/maxmind/GeoIP2-java/releases) - [Changelog](https://github.com/maxmind/GeoIP2-java/blob/main/CHANGELOG.md) - [Commits](https://github.com/maxmind/GeoIP2-java/compare/v2.16.1...v3.0.1) --- updated-dependencies: - dependency-name: com.maxmind.geoip2:geoip2 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Fix breaking change with geoip2 version 3.0.1. Signed-off-by: Marc Handalian * Fix precommit failures caused by ignoreMissingClasses check. Precommit is failing because forbiddenApis was configured to ignore missing classes that are present. Signed-off-by: Marc Handalian * Change asn in GeoIpProcessorTests from int to long. Signed-off-by: Marc Handalian Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Marc Handalian --- modules/ingest-geoip/build.gradle | 20 +------------------ .../licenses/geoip2-2.16.1.jar.sha1 | 1 - .../licenses/geoip2-3.0.1.jar.sha1 | 1 + .../ingest/geoip/GeoIpProcessor.java | 2 +- .../ingest/geoip/GeoIpProcessorTests.java | 2 +- 5 files changed, 4 insertions(+), 22 deletions(-) delete mode 100644 modules/ingest-geoip/licenses/geoip2-2.16.1.jar.sha1 create mode 100644 modules/ingest-geoip/licenses/geoip2-3.0.1.jar.sha1 diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index b1d5afbe68a17..f3be0fe61d4be 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -39,7 +39,7 @@ opensearchplugin { } dependencies { - api('com.maxmind.geoip2:geoip2:2.16.1') + api('com.maxmind.geoip2:geoip2:3.0.1') // geoip2 dependencies: api("com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}") api("com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}") @@ -67,24 +67,6 @@ tasks.named("bundlePlugin").configure { } } -tasks.named("thirdPartyAudit").configure { - ignoreMissingClasses( - // geoip WebServiceClient needs apache http client, but we're not using WebServiceClient: - 'org.apache.http.HttpEntity', - 'org.apache.http.HttpResponse', - 'org.apache.http.StatusLine', - 'org.apache.http.client.config.RequestConfig$Builder', - 'org.apache.http.client.config.RequestConfig', - 'org.apache.http.client.methods.CloseableHttpResponse', - 'org.apache.http.client.methods.HttpGet', - 'org.apache.http.client.utils.URIBuilder', - 'org.apache.http.impl.auth.BasicScheme', - 'org.apache.http.impl.client.CloseableHttpClient', - 'org.apache.http.impl.client.HttpClientBuilder', - 'org.apache.http.util.EntityUtils' - ) -} - if (Os.isFamily(Os.FAMILY_WINDOWS)) { tasks.named("test").configure { // Windows cannot cleanup database files properly unless it loads everything on heap. diff --git a/modules/ingest-geoip/licenses/geoip2-2.16.1.jar.sha1 b/modules/ingest-geoip/licenses/geoip2-2.16.1.jar.sha1 deleted file mode 100644 index 0221476794d3a..0000000000000 --- a/modules/ingest-geoip/licenses/geoip2-2.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c92040bd6ef2cb59be71c6749d08c141ca546caf \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/geoip2-3.0.1.jar.sha1 b/modules/ingest-geoip/licenses/geoip2-3.0.1.jar.sha1 new file mode 100644 index 0000000000000..f1d5ac5aea546 --- /dev/null +++ b/modules/ingest-geoip/licenses/geoip2-3.0.1.jar.sha1 @@ -0,0 +1 @@ +8a814ae92a1d8c35f82d0ff76d86927c191b7916 \ No newline at end of file diff --git a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/GeoIpProcessor.java b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/GeoIpProcessor.java index 384ae6f14dc4d..030f75bf48e18 100644 --- a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/GeoIpProcessor.java +++ b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/GeoIpProcessor.java @@ -364,7 +364,7 @@ private Map retrieveAsnGeoData(InetAddress ipAddress) { }) ); - Integer asn = response.getAutonomousSystemNumber(); + Long asn = response.getAutonomousSystemNumber(); String organization_name = response.getAutonomousSystemOrganization(); Network network = response.getNetwork(); diff --git a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorTests.java b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorTests.java index f06802af8b571..34c80fec520aa 100644 --- a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorTests.java +++ b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorTests.java @@ -308,7 +308,7 @@ public void testAsn() throws Exception { Map geoData = (Map) ingestDocument.getSourceAndMetadata().get("target_field"); assertThat(geoData.size(), equalTo(4)); assertThat(geoData.get("ip"), equalTo(ip)); - assertThat(geoData.get("asn"), equalTo(1136)); + assertThat(geoData.get("asn"), equalTo(1136L)); assertThat(geoData.get("organization_name"), equalTo("KPN B.V.")); assertThat(geoData.get("network"), equalTo("82.168.0.0/14")); } From dd24e17ea6ed557829e6094c0a2af9f05c1cdebd Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Wed, 6 Apr 2022 12:01:12 -0700 Subject: [PATCH 055/653] Fix issue that deprecated setting 'cluster.initial_master_nodes' is not identified in node bootstrap check (#2779) * Fix issue that deprecated setting 'cluster.initial_master_nodes' is not identified during node bootstrap Signed-off-by: Tianli Feng * Restore a variable name Signed-off-by: Tianli Feng --- .../coordination/ClusterBootstrapService.java | 13 +++++++------ .../opensearch/bootstrap/BootstrapChecksTests.java | 2 ++ .../coordination/ClusterBootstrapServiceTests.java | 11 ++++++++++- 3 files changed, 19 insertions(+), 7 deletions(-) diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java b/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java index 8df561149eb3d..c7708a54f9031 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java @@ -113,12 +113,12 @@ public ClusterBootstrapService( BooleanSupplier isBootstrappedSupplier, Consumer votingConfigurationConsumer ) { + // TODO: Remove variable 'initialClusterManagerSettingName' after removing MASTER_ROLE. + String initialClusterManagerSettingName = INITIAL_CLUSTER_MANAGER_NODES_SETTING.exists(settings) + ? INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey() + : INITIAL_MASTER_NODES_SETTING.getKey(); if (DiscoveryModule.isSingleNodeDiscovery(settings)) { if (INITIAL_CLUSTER_MANAGER_NODES_SETTING.existsOrFallbackExists(settings)) { - // TODO: Remove variable 'initialClusterManagerSettingName' after removing MASTER_ROLE. - String initialClusterManagerSettingName = INITIAL_CLUSTER_MANAGER_NODES_SETTING.exists(settings) - ? INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey() - : INITIAL_MASTER_NODES_SETTING.getKey(); throw new IllegalArgumentException( "setting [" + initialClusterManagerSettingName @@ -145,7 +145,7 @@ public ClusterBootstrapService( bootstrapRequirements = unmodifiableSet(new LinkedHashSet<>(initialMasterNodes)); if (bootstrapRequirements.size() != initialMasterNodes.size()) { throw new IllegalArgumentException( - "setting [" + INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey() + "] contains duplicates: " + initialMasterNodes + "setting [" + initialClusterManagerSettingName + "] contains duplicates: " + initialMasterNodes ); } unconfiguredBootstrapTimeout = discoveryIsConfigured(settings) ? null : UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING.get(settings); @@ -163,7 +163,8 @@ public static boolean discoveryIsConfigured(Settings settings) { LEGACY_DISCOVERY_HOSTS_PROVIDER_SETTING, DISCOVERY_SEED_HOSTS_SETTING, LEGACY_DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING, - INITIAL_CLUSTER_MANAGER_NODES_SETTING + INITIAL_CLUSTER_MANAGER_NODES_SETTING, + INITIAL_MASTER_NODES_SETTING ).anyMatch(s -> s.exists(settings)); } diff --git a/server/src/test/java/org/opensearch/bootstrap/BootstrapChecksTests.java b/server/src/test/java/org/opensearch/bootstrap/BootstrapChecksTests.java index d941c624509da..c59ca1dd60dc7 100644 --- a/server/src/test/java/org/opensearch/bootstrap/BootstrapChecksTests.java +++ b/server/src/test/java/org/opensearch/bootstrap/BootstrapChecksTests.java @@ -818,5 +818,7 @@ public void testDiscoveryConfiguredCheck() throws NodeValidationException { ensureChecksPass.accept(Settings.builder().putList(ClusterBootstrapService.INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey())); ensureChecksPass.accept(Settings.builder().putList(DiscoveryModule.DISCOVERY_SEED_PROVIDERS_SETTING.getKey())); ensureChecksPass.accept(Settings.builder().putList(SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING.getKey())); + // Validate the deprecated setting is still valid during the node bootstrap. + ensureChecksPass.accept(Settings.builder().putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey())); } } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java index dd55d078fe2c6..3e4148cef61cd 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java @@ -166,10 +166,19 @@ public void testDoesNothingByDefaultIfSeedHostsConfigured() { testDoesNothingWithSettings(builder().putList(DISCOVERY_SEED_HOSTS_SETTING.getKey())); } - public void testDoesNothingByDefaultIfMasterNodesConfigured() { + public void testDoesNothingByDefaultIfClusterManagerNodesConfigured() { testDoesNothingWithSettings(builder().putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey())); } + // Validate the deprecated setting is still valid during the cluster bootstrap. + public void testDoesNothingByDefaultIfMasterNodesConfigured() { + testDoesNothingWithSettings(builder().putList(INITIAL_MASTER_NODES_SETTING.getKey())); + assertWarnings( + "[cluster.initial_master_nodes] setting was deprecated in OpenSearch and will be removed in a future release! " + + "See the breaking changes documentation for the next major version." + ); + } + public void testDoesNothingByDefaultOnMasterIneligibleNodes() { localNode = new DiscoveryNode( "local", From 0244b2a2df1816cd7bdcca85bd079c7e3e580155 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 6 Apr 2022 17:28:14 -0400 Subject: [PATCH 056/653] Update azure-storage-blob to 12.15.0: fix test flakiness (#2795) Signed-off-by: Andriy Redko --- .../repositories/azure/AzureBlobContainerRetriesTests.java | 5 ++--- .../src/main/java/fixture/azure/AzureHttpHandler.java | 4 +++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java index c973cb325b658..e8417f9ceaf2c 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java @@ -231,8 +231,7 @@ public void testReadBlobWithRetries() throws Exception { exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); exchange.getResponseHeaders().add("Content-Length", String.valueOf(length)); exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob"); - exchange.getResponseHeaders() - .add("Content-Range", String.format("bytes %d-%d/%d", rangeStart, bytes.length, bytes.length)); + exchange.getResponseHeaders().add("Content-Range", "bytes " + rangeStart + "-" + bytes.length + "/" + bytes.length); exchange.sendResponseHeaders(RestStatus.OK.getStatus(), length); exchange.getResponseBody().write(bytes, rangeStart, length); return; @@ -282,7 +281,7 @@ public void testReadRangeBlobWithRetries() throws Exception { exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); exchange.getResponseHeaders().add("Content-Length", String.valueOf(length)); exchange.getResponseHeaders() - .add("Content-Range", String.format("bytes %d-%d/%d", rangeStart, rangeEnd.get(), bytes.length)); + .add("Content-Range", "bytes " + rangeStart + "-" + rangeEnd.get() + "/" + bytes.length); exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob"); exchange.sendResponseHeaders(RestStatus.OK.getStatus(), length); exchange.getResponseBody().write(bytes, rangeStart, length); diff --git a/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java b/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java index 8389bd839d165..4879425b7bcd6 100644 --- a/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java +++ b/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java @@ -157,7 +157,9 @@ public void handle(final HttpExchange exchange) throws IOException { exchange.getResponseHeaders().add("Content-Length", String.valueOf(length)); exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob"); exchange.getResponseHeaders().add("x-ms-request-server-encrypted", "false"); - exchange.getResponseHeaders().add("Content-Range", String.format("bytes %d-%d/%d", start, Math.min(end, length), length)); + exchange.getResponseHeaders() + .add("Content-Range", "bytes " + start + "-" + Math.min(end, length) + "/" + blob.length()); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), length); exchange.getResponseBody().write(blob.toBytesRef().bytes, start, length); From 566ebfa4fef4bba2f71cb6964c03ab5320be98e0 Mon Sep 17 00:00:00 2001 From: Kartik Date: Thu, 7 Apr 2022 14:35:40 -0700 Subject: [PATCH 057/653] Bugfix to guard against stack overflow errors caused by very large reg-ex input (#2810) * Bugfix to guard against stack overflow errors caused by very large reg-ex input This change fixes a code path that did not properly impose the index-level max_regex_length limit. Therefore, it was possibly to provide ar arbitrarily large string as the include/exclude reg-ex value under search aggregations. This exposed the underlying node to crashes from a StackOverflowError, due to how the Lucene RegExp class processes strings using stack frames. Signed-off-by: Kartik Ganesh * Adding integration tests for large string RegEx Signed-off-by: Kartik Ganesh * Spotless Signed-off-by: Kartik Ganesh --- .../AggregationsIntegrationIT.java | 60 ++++++++++++ .../bucket/terms/IncludeExclude.java | 94 ++++++++++--------- .../terms/RareTermsAggregatorFactory.java | 6 +- .../SignificantTermsAggregatorFactory.java | 13 ++- .../SignificantTextAggregatorFactory.java | 6 +- .../bucket/terms/TermsAggregatorFactory.java | 11 ++- .../aggregations/bucket/RareTermsTests.java | 7 +- .../bucket/SignificantTermsTests.java | 7 +- .../aggregations/bucket/TermsTests.java | 7 +- .../terms/BinaryTermsAggregatorTests.java | 3 +- .../terms/NumericTermsAggregatorTests.java | 3 +- .../support/IncludeExcludeTests.java | 12 +-- 12 files changed, 156 insertions(+), 73 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java index 6778765599fe9..b73b7722f9728 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java @@ -32,10 +32,18 @@ package org.opensearch.search.aggregations; +import org.opensearch.OpenSearchException; import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.unit.TimeValue; +import org.opensearch.search.aggregations.bucket.terms.IncludeExclude; +import org.opensearch.search.aggregations.bucket.terms.RareTermsAggregationBuilder; +import org.opensearch.search.aggregations.bucket.terms.SignificantTermsAggregationBuilder; +import org.opensearch.search.aggregations.bucket.terms.SignificantTermsAggregatorFactory; import org.opensearch.search.aggregations.bucket.terms.Terms; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregatorFactory; import org.opensearch.test.OpenSearchIntegTestCase; import java.util.ArrayList; @@ -50,6 +58,11 @@ public class AggregationsIntegrationIT extends OpenSearchIntegTestCase { static int numDocs; + private static final String LARGE_STRING = "a".repeat(2000); + private static final String LARGE_STRING_EXCEPTION_MESSAGE = "The length of regex [" + + LARGE_STRING.length() + + "] used in the request has exceeded the allowed maximum"; + @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(prepareCreate("index").setMapping("f", "type=keyword").get()); @@ -85,4 +98,51 @@ public void testScroll() { assertEquals(numDocs, total); } + public void testLargeRegExTermsAggregation() { + for (TermsAggregatorFactory.ExecutionMode executionMode : TermsAggregatorFactory.ExecutionMode.values()) { + TermsAggregationBuilder termsAggregation = terms("my_terms").field("f") + .includeExclude(getLargeStringInclude()) + .executionHint(executionMode.toString()); + runLargeStringAggregationTest(termsAggregation); + } + } + + public void testLargeRegExSignificantTermsAggregation() { + for (SignificantTermsAggregatorFactory.ExecutionMode executionMode : SignificantTermsAggregatorFactory.ExecutionMode.values()) { + SignificantTermsAggregationBuilder significantTerms = new SignificantTermsAggregationBuilder("my_terms").field("f") + .includeExclude(getLargeStringInclude()) + .executionHint(executionMode.toString()); + runLargeStringAggregationTest(significantTerms); + } + } + + public void testLargeRegExRareTermsAggregation() { + // currently this only supports "map" as an execution hint + RareTermsAggregationBuilder rareTerms = new RareTermsAggregationBuilder("my_terms").field("f") + .includeExclude(getLargeStringInclude()) + .maxDocCount(2); + runLargeStringAggregationTest(rareTerms); + } + + private IncludeExclude getLargeStringInclude() { + return new IncludeExclude(LARGE_STRING, null); + } + + private void runLargeStringAggregationTest(AggregationBuilder aggregation) { + boolean exceptionThrown = false; + IncludeExclude include = new IncludeExclude(LARGE_STRING, null); + try { + client().prepareSearch("index").addAggregation(aggregation).get(); + } catch (SearchPhaseExecutionException ex) { + exceptionThrown = true; + Throwable nestedException = ex.getCause(); + assertNotNull(nestedException); + assertTrue(nestedException instanceof OpenSearchException); + assertNotNull(nestedException.getCause()); + assertTrue(nestedException.getCause() instanceof IllegalArgumentException); + String actualExceptionMessage = nestedException.getCause().getMessage(); + assertTrue(actualExceptionMessage.startsWith(LARGE_STRING_EXCEPTION_MESSAGE)); + } + assertTrue("Exception should have been thrown", exceptionThrown); + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/IncludeExclude.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/IncludeExclude.java index e632a13b95fb7..acb3a6629c734 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/IncludeExclude.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/IncludeExclude.java @@ -48,6 +48,7 @@ import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.automaton.RegExp; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.Nullable; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -55,6 +56,7 @@ import org.opensearch.common.xcontent.ToXContentFragment; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.index.IndexSettings; import org.opensearch.search.DocValueFormat; import java.io.IOException; @@ -337,19 +339,16 @@ public LongBitSet acceptedGlobalOrdinals(SortedSetDocValues globalOrdinals) thro } - private final RegExp include, exclude; + private final String include, exclude; private final SortedSet includeValues, excludeValues; private final int incZeroBasedPartition; private final int incNumPartitions; /** - * @param include The regular expression pattern for the terms to be included - * @param exclude The regular expression pattern for the terms to be excluded + * @param include The string or regular expression pattern for the terms to be included + * @param exclude The string or regular expression pattern for the terms to be excluded */ - public IncludeExclude(RegExp include, RegExp exclude) { - if (include == null && exclude == null) { - throw new IllegalArgumentException(); - } + public IncludeExclude(String include, String exclude) { this.include = include; this.exclude = exclude; this.includeValues = null; @@ -358,10 +357,6 @@ public IncludeExclude(RegExp include, RegExp exclude) { this.incNumPartitions = 0; } - public IncludeExclude(String include, String exclude) { - this(include == null ? null : new RegExp(include), exclude == null ? null : new RegExp(exclude)); - } - /** * @param includeValues The terms to be included * @param excludeValues The terms to be excluded @@ -412,10 +407,8 @@ public IncludeExclude(StreamInput in) throws IOException { excludeValues = null; incZeroBasedPartition = 0; incNumPartitions = 0; - String includeString = in.readOptionalString(); - include = includeString == null ? null : new RegExp(includeString); - String excludeString = in.readOptionalString(); - exclude = excludeString == null ? null : new RegExp(excludeString); + include = in.readOptionalString(); + exclude = in.readOptionalString(); return; } include = null; @@ -447,8 +440,8 @@ public void writeTo(StreamOutput out) throws IOException { boolean regexBased = isRegexBased(); out.writeBoolean(regexBased); if (regexBased) { - out.writeOptionalString(include == null ? null : include.getOriginalString()); - out.writeOptionalString(exclude == null ? null : exclude.getOriginalString()); + out.writeOptionalString(include); + out.writeOptionalString(exclude); } else { boolean hasIncludes = includeValues != null; out.writeBoolean(hasIncludes); @@ -584,26 +577,54 @@ public boolean isPartitionBased() { return incNumPartitions > 0; } - private Automaton toAutomaton() { - Automaton a = null; + private Automaton toAutomaton(@Nullable IndexSettings indexSettings) { + int maxRegexLength = indexSettings == null ? -1 : indexSettings.getMaxRegexLength(); + Automaton a; if (include != null) { - a = include.toAutomaton(); + if (include.length() > maxRegexLength) { + throw new IllegalArgumentException( + "The length of regex [" + + include.length() + + "] used in the request has exceeded " + + "the allowed maximum of [" + + maxRegexLength + + "]. " + + "This maximum can be set by changing the [" + + IndexSettings.MAX_REGEX_LENGTH_SETTING.getKey() + + "] index level setting." + ); + } + a = new RegExp(include).toAutomaton(); } else if (includeValues != null) { a = Automata.makeStringUnion(includeValues); } else { a = Automata.makeAnyString(); } if (exclude != null) { - a = Operations.minus(a, exclude.toAutomaton(), Operations.DEFAULT_DETERMINIZE_WORK_LIMIT); + if (exclude.length() > maxRegexLength) { + throw new IllegalArgumentException( + "The length of regex [" + + exclude.length() + + "] used in the request has exceeded " + + "the allowed maximum of [" + + maxRegexLength + + "]. " + + "This maximum can be set by changing the [" + + IndexSettings.MAX_REGEX_LENGTH_SETTING.getKey() + + "] index level setting." + ); + } + Automaton excludeAutomaton = new RegExp(exclude).toAutomaton(); + a = Operations.minus(a, excludeAutomaton, Operations.DEFAULT_DETERMINIZE_WORK_LIMIT); } else if (excludeValues != null) { a = Operations.minus(a, Automata.makeStringUnion(excludeValues), Operations.DEFAULT_DETERMINIZE_WORK_LIMIT); } return a; } - public StringFilter convertToStringFilter(DocValueFormat format) { + public StringFilter convertToStringFilter(DocValueFormat format, IndexSettings indexSettings) { if (isRegexBased()) { - return new AutomatonBackedStringFilter(toAutomaton()); + return new AutomatonBackedStringFilter(toAutomaton(indexSettings)); } if (isPartitionBased()) { return new PartitionedStringFilter(); @@ -624,10 +645,10 @@ private static SortedSet parseForDocValues(SortedSet endUser return result; } - public OrdinalsFilter convertToOrdinalsFilter(DocValueFormat format) { + public OrdinalsFilter convertToOrdinalsFilter(DocValueFormat format, IndexSettings indexSettings) { if (isRegexBased()) { - return new AutomatonBackedOrdinalsFilter(toAutomaton()); + return new AutomatonBackedOrdinalsFilter(toAutomaton(indexSettings)); } if (isPartitionBased()) { return new PartitionedOrdinalsFilter(); @@ -684,7 +705,7 @@ public LongFilter convertToDoubleFilter() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (include != null) { - builder.field(INCLUDE_FIELD.getPreferredName(), include.getOriginalString()); + builder.field(INCLUDE_FIELD.getPreferredName(), include); } else if (includeValues != null) { builder.startArray(INCLUDE_FIELD.getPreferredName()); for (BytesRef value : includeValues) { @@ -698,7 +719,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); } if (exclude != null) { - builder.field(EXCLUDE_FIELD.getPreferredName(), exclude.getOriginalString()); + builder.field(EXCLUDE_FIELD.getPreferredName(), exclude); } else if (excludeValues != null) { builder.startArray(EXCLUDE_FIELD.getPreferredName()); for (BytesRef value : excludeValues) { @@ -711,14 +732,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public int hashCode() { - return Objects.hash( - include == null ? null : include.getOriginalString(), - exclude == null ? null : exclude.getOriginalString(), - includeValues, - excludeValues, - incZeroBasedPartition, - incNumPartitions - ); + return Objects.hash(include, exclude, includeValues, excludeValues, incZeroBasedPartition, incNumPartitions); } @Override @@ -730,14 +744,8 @@ public boolean equals(Object obj) { return false; } IncludeExclude other = (IncludeExclude) obj; - return Objects.equals( - include == null ? null : include.getOriginalString(), - other.include == null ? null : other.include.getOriginalString() - ) - && Objects.equals( - exclude == null ? null : exclude.getOriginalString(), - other.exclude == null ? null : other.exclude.getOriginalString() - ) + return Objects.equals(include, other.include) + && Objects.equals(exclude, other.exclude) && Objects.equals(includeValues, other.includeValues) && Objects.equals(excludeValues, other.excludeValues) && Objects.equals(incZeroBasedPartition, other.incZeroBasedPartition) diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java index 0e03f87b070e1..c0a5c77a98170 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java @@ -34,6 +34,7 @@ import org.opensearch.common.ParseField; import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.index.IndexSettings; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; @@ -250,7 +251,10 @@ Aggregator create( double precision, CardinalityUpperBound cardinality ) throws IOException { - final IncludeExclude.StringFilter filter = includeExclude == null ? null : includeExclude.convertToStringFilter(format); + IndexSettings indexSettings = context.getQueryShardContext().getIndexSettings(); + final IncludeExclude.StringFilter filter = includeExclude == null + ? null + : includeExclude.convertToStringFilter(format, indexSettings); return new StringRareTermsAggregator( name, factories, diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java index db6106d3ce9bc..4b93121ae06ef 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java @@ -34,6 +34,7 @@ import org.opensearch.common.ParseField; import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.index.IndexSettings; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.DocValueFormat; @@ -325,8 +326,10 @@ Aggregator create( CardinalityUpperBound cardinality, Map metadata ) throws IOException { - - final IncludeExclude.StringFilter filter = includeExclude == null ? null : includeExclude.convertToStringFilter(format); + IndexSettings indexSettings = aggregationContext.getQueryShardContext().getIndexSettings(); + final IncludeExclude.StringFilter filter = includeExclude == null + ? null + : includeExclude.convertToStringFilter(format, indexSettings); return new MapStringTermsAggregator( name, factories, @@ -364,8 +367,10 @@ Aggregator create( CardinalityUpperBound cardinality, Map metadata ) throws IOException { - - final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(format); + IndexSettings indexSettings = aggregationContext.getQueryShardContext().getIndexSettings(); + final IncludeExclude.OrdinalsFilter filter = includeExclude == null + ? null + : includeExclude.convertToOrdinalsFilter(format, indexSettings); boolean remapGlobalOrd = true; if (cardinality == CardinalityUpperBound.ONE && factories == AggregatorFactories.EMPTY && includeExclude == null) { /* diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java index 85b4282e4c55b..992035f1fbe97 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java @@ -44,6 +44,7 @@ import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.BytesRefHash; import org.opensearch.common.util.ObjectArray; +import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryShardContext; @@ -137,7 +138,10 @@ protected Aggregator createInternal( // TODO - need to check with mapping that this is indeed a text field.... - IncludeExclude.StringFilter incExcFilter = includeExclude == null ? null : includeExclude.convertToStringFilter(DocValueFormat.RAW); + IndexSettings indexSettings = searchContext.getQueryShardContext().getIndexSettings(); + IncludeExclude.StringFilter incExcFilter = includeExclude == null + ? null + : includeExclude.convertToStringFilter(DocValueFormat.RAW, indexSettings); MapStringTermsAggregator.CollectorSource collectorSource = new SignificantTextCollectorSource( queryShardContext.lookup().source(), diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index d2272d0a63042..17b412f87107c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -34,6 +34,7 @@ import org.apache.lucene.search.IndexSearcher; import org.opensearch.common.ParseField; +import org.opensearch.index.IndexSettings; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.AggregationExecutionException; @@ -380,7 +381,10 @@ Aggregator create( CardinalityUpperBound cardinality, Map metadata ) throws IOException { - final IncludeExclude.StringFilter filter = includeExclude == null ? null : includeExclude.convertToStringFilter(format); + IndexSettings indexSettings = context.getQueryShardContext().getIndexSettings(); + final IncludeExclude.StringFilter filter = includeExclude == null + ? null + : includeExclude.convertToStringFilter(format, indexSettings); return new MapStringTermsAggregator( name, factories, @@ -458,7 +462,10 @@ Aggregator create( ); } - final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(format); + IndexSettings indexSettings = context.getQueryShardContext().getIndexSettings(); + final IncludeExclude.OrdinalsFilter filter = includeExclude == null + ? null + : includeExclude.convertToOrdinalsFilter(format, indexSettings); boolean remapGlobalOrds; if (cardinality == CardinalityUpperBound.ONE && REMAP_GLOBAL_ORDS != null) { /* diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/RareTermsTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/RareTermsTests.java index 799faecb5ab57..6b8655eccd74d 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/RareTermsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/RareTermsTests.java @@ -33,7 +33,6 @@ package org.opensearch.search.aggregations.bucket; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.automaton.RegExp; import org.opensearch.search.aggregations.BaseAggregationTestCase; import org.opensearch.search.aggregations.bucket.terms.IncludeExclude; import org.opensearch.search.aggregations.bucket.terms.RareTermsAggregationBuilder; @@ -59,13 +58,13 @@ protected RareTermsAggregationBuilder createTestAggregatorBuilder() { IncludeExclude incExc = null; switch (randomInt(6)) { case 0: - incExc = new IncludeExclude(new RegExp("foobar"), null); + incExc = new IncludeExclude("foobar", null); break; case 1: - incExc = new IncludeExclude(null, new RegExp("foobaz")); + incExc = new IncludeExclude(null, "foobaz"); break; case 2: - incExc = new IncludeExclude(new RegExp("foobar"), new RegExp("foobaz")); + incExc = new IncludeExclude("foobar", "foobaz"); break; case 3: SortedSet includeValues = new TreeSet<>(); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/SignificantTermsTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/SignificantTermsTests.java index 3001f8ede7f4d..6312d6c175866 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/SignificantTermsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/SignificantTermsTests.java @@ -33,7 +33,6 @@ package org.opensearch.search.aggregations.bucket; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.automaton.RegExp; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.BaseAggregationTestCase; import org.opensearch.search.aggregations.bucket.terms.IncludeExclude; @@ -160,13 +159,13 @@ static IncludeExclude getIncludeExclude() { IncludeExclude incExc = null; switch (randomInt(5)) { case 0: - incExc = new IncludeExclude(new RegExp("foobar"), null); + incExc = new IncludeExclude("foobar", null); break; case 1: - incExc = new IncludeExclude(null, new RegExp("foobaz")); + incExc = new IncludeExclude(null, "foobaz"); break; case 2: - incExc = new IncludeExclude(new RegExp("foobar"), new RegExp("foobaz")); + incExc = new IncludeExclude("foobar", "foobaz"); break; case 3: SortedSet includeValues = new TreeSet<>(); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/TermsTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/TermsTests.java index eb4f33c6f8e19..04e7fad2105ec 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/TermsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/TermsTests.java @@ -33,7 +33,6 @@ package org.opensearch.search.aggregations.bucket; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.automaton.RegExp; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.BaseAggregationTestCase; import org.opensearch.search.aggregations.BucketOrder; @@ -118,13 +117,13 @@ protected TermsAggregationBuilder createTestAggregatorBuilder() { IncludeExclude incExc = null; switch (randomInt(6)) { case 0: - incExc = new IncludeExclude(new RegExp("foobar"), null); + incExc = new IncludeExclude("foobar", null); break; case 1: - incExc = new IncludeExclude(null, new RegExp("foobaz")); + incExc = new IncludeExclude(null, "foobaz"); break; case 2: - incExc = new IncludeExclude(new RegExp("foobar"), new RegExp("foobaz")); + incExc = new IncludeExclude("foobar", "foobaz"); break; case 3: SortedSet includeValues = new TreeSet<>(); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/BinaryTermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/BinaryTermsAggregatorTests.java index 7703afa88d93c..34cc29d40a9fd 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/BinaryTermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/BinaryTermsAggregatorTests.java @@ -41,7 +41,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.automaton.RegExp; import org.opensearch.common.Numbers; import org.opensearch.index.mapper.BinaryFieldMapper; import org.opensearch.index.mapper.MappedFieldType; @@ -97,7 +96,7 @@ public void testMatchAllDocs() throws IOException { } public void testBadIncludeExclude() throws IOException { - IncludeExclude includeExclude = new IncludeExclude(new RegExp("foo"), null); + IncludeExclude includeExclude = new IncludeExclude("foo", null); // Make sure the include/exclude fails regardless of how the user tries to type hint the agg AggregationExecutionException e = expectThrows( diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/NumericTermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/NumericTermsAggregatorTests.java index 13e41d5a2e543..846f71b12dab0 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/NumericTermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/NumericTermsAggregatorTests.java @@ -42,7 +42,6 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; -import org.apache.lucene.util.automaton.RegExp; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.search.aggregations.AggregationExecutionException; @@ -116,7 +115,7 @@ public void testMatchAllDocs() throws IOException { } public void testBadIncludeExclude() throws IOException { - IncludeExclude includeExclude = new IncludeExclude(new RegExp("foo"), null); + IncludeExclude includeExclude = new IncludeExclude("foo", null); // Numerics don't support any regex include/exclude, so should fail no matter what we do diff --git a/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java b/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java index d84812557ab18..9ebca90d84cab 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java @@ -55,12 +55,12 @@ public class IncludeExcludeTests extends OpenSearchTestCase { public void testEmptyTermsWithOrds() throws IOException { IncludeExclude inexcl = new IncludeExclude(new TreeSet<>(Collections.singleton(new BytesRef("foo"))), null); - OrdinalsFilter filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW); + OrdinalsFilter filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW, null); LongBitSet acceptedOrds = filter.acceptedGlobalOrdinals(DocValues.emptySortedSet()); assertEquals(0, acceptedOrds.length()); inexcl = new IncludeExclude(null, new TreeSet<>(Collections.singleton(new BytesRef("foo")))); - filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW); + filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW, null); acceptedOrds = filter.acceptedGlobalOrdinals(DocValues.emptySortedSet()); assertEquals(0, acceptedOrds.length()); } @@ -99,13 +99,13 @@ public long getValueCount() { }; IncludeExclude inexcl = new IncludeExclude(new TreeSet<>(Collections.singleton(new BytesRef("foo"))), null); - OrdinalsFilter filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW); + OrdinalsFilter filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW, null); LongBitSet acceptedOrds = filter.acceptedGlobalOrdinals(ords); assertEquals(1, acceptedOrds.length()); assertTrue(acceptedOrds.get(0)); inexcl = new IncludeExclude(new TreeSet<>(Collections.singleton(new BytesRef("bar"))), null); - filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW); + filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW, null); acceptedOrds = filter.acceptedGlobalOrdinals(ords); assertEquals(1, acceptedOrds.length()); assertFalse(acceptedOrds.get(0)); @@ -114,7 +114,7 @@ public long getValueCount() { new TreeSet<>(Collections.singleton(new BytesRef("foo"))), new TreeSet<>(Collections.singleton(new BytesRef("foo"))) ); - filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW); + filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW, null); acceptedOrds = filter.acceptedGlobalOrdinals(ords); assertEquals(1, acceptedOrds.length()); assertFalse(acceptedOrds.get(0)); @@ -123,7 +123,7 @@ public long getValueCount() { null, // means everything included new TreeSet<>(Collections.singleton(new BytesRef("foo"))) ); - filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW); + filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW, null); acceptedOrds = filter.acceptedGlobalOrdinals(ords); assertEquals(1, acceptedOrds.length()); assertFalse(acceptedOrds.get(0)); From 249155772b65681ba8e91d2356e3430f2236b6be Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Thu, 7 Apr 2022 15:31:31 -0700 Subject: [PATCH 058/653] Allow deprecation warning for API call GET _cat/master in ExceptionIT of mixed cluster BWC test (#2767) Signed-off-by: Tianli Feng --- .../test/java/org/opensearch/backwards/ExceptionIT.java | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/ExceptionIT.java b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/ExceptionIT.java index e0246870181c0..f85a94cc9f556 100644 --- a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/ExceptionIT.java +++ b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/ExceptionIT.java @@ -47,8 +47,13 @@ public void testOpensearchException() throws Exception { private void logClusterNodes() throws IOException { ObjectPath objectPath = ObjectPath.createFromResponse(client().performRequest(new Request("GET", "_nodes"))); Map nodes = objectPath.evaluate("nodes"); - String master = EntityUtils.toString(client().performRequest(new Request("GET", "_cat/master?h=id")).getEntity()).trim(); - logger.info("cluster discovered: master id='{}'", master); + // As of 2.0, 'GET _cat/master' API is deprecated to promote inclusive language. + // Allow the deprecation warning for the node running an older version. + // TODO: Replace the API with 'GET _cat/cluster_manager' when dropping compatibility with 1.x versions. + Request catRequest = new Request("GET", "_cat/master?h=id"); + catRequest.setOptions(expectWarningsOnce("[GET /_cat/master] is deprecated! Use [GET /_cat/cluster_manager] instead.")); + String clusterManager = EntityUtils.toString(client().performRequest(catRequest).getEntity()).trim(); + logger.info("cluster discovered: cluster-manager id='{}'", clusterManager); for (String id : nodes.keySet()) { logger.info("{}: id='{}', name='{}', version={}", objectPath.evaluate("nodes." + id + ".http.publish_address"), From 47a22bb08d09a1eada4cc4349a35f3eed45e6336 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Thu, 7 Apr 2022 15:45:20 -0700 Subject: [PATCH 059/653] Replace remaining 'blacklist' with 'denylist' in internal class and method names (#2784) * Replace blacklist with denylist in BlacklistedPathPatternMatcher Signed-off-by: Tianli Feng * Replace blacklist with denylist in assumption message Signed-off-by: Tianli Feng * Replace all Blacklisted with Denylisted Signed-off-by: Tianli Feng * Replace all blacklist(key) with denylist(key) Signed-off-by: Tianli Feng * Adjust format by spotlessApply task Signed-off-by: Tianli Feng --- .../opensearch/common/inject/BindingProcessor.java | 2 +- .../opensearch/common/inject/InheritingState.java | 8 ++++---- .../org/opensearch/common/inject/InjectorImpl.java | 8 ++++---- .../java/org/opensearch/common/inject/State.java | 12 ++++++------ ...tcher.java => DenylistedPathPatternMatcher.java} | 4 ++-- .../yaml/OpenSearchClientYamlSuiteTestCase.java | 13 +++++-------- ....java => DenylistedPathPatternMatcherTests.java} | 6 +++--- 7 files changed, 25 insertions(+), 28 deletions(-) rename test/framework/src/main/java/org/opensearch/test/rest/yaml/{BlacklistedPathPatternMatcher.java => DenylistedPathPatternMatcher.java} (97%) rename test/framework/src/test/java/org/opensearch/test/rest/yaml/{BlacklistedPathPatternMatcherTests.java => DenylistedPathPatternMatcherTests.java} (93%) diff --git a/server/src/main/java/org/opensearch/common/inject/BindingProcessor.java b/server/src/main/java/org/opensearch/common/inject/BindingProcessor.java index 671123f2df767..2635ead8d7f51 100644 --- a/server/src/main/java/org/opensearch/common/inject/BindingProcessor.java +++ b/server/src/main/java/org/opensearch/common/inject/BindingProcessor.java @@ -274,7 +274,7 @@ private void putBinding(BindingImpl binding) { } // prevent the parent from creating a JIT binding for this key - injector.state.parent().blacklist(key); + injector.state.parent().denylist(key); injector.state.putBinding(key, binding); } diff --git a/server/src/main/java/org/opensearch/common/inject/InheritingState.java b/server/src/main/java/org/opensearch/common/inject/InheritingState.java index 70a2fb335cca5..a25017ab9a2c0 100644 --- a/server/src/main/java/org/opensearch/common/inject/InheritingState.java +++ b/server/src/main/java/org/opensearch/common/inject/InheritingState.java @@ -143,18 +143,18 @@ public List getTypeListenerBindings() { } @Override - public void blacklist(Key key) { - parent.blacklist(key); + public void denylist(Key key) { + parent.denylist(key); denylistedKeys.add(key); } @Override - public boolean isBlacklisted(Key key) { + public boolean isDenylisted(Key key) { return denylistedKeys.contains(key); } @Override - public void clearBlacklisted() { + public void clearDenylisted() { denylistedKeys = new WeakKeySet(); } diff --git a/server/src/main/java/org/opensearch/common/inject/InjectorImpl.java b/server/src/main/java/org/opensearch/common/inject/InjectorImpl.java index 3c888cd92b226..439ce8fbae33b 100644 --- a/server/src/main/java/org/opensearch/common/inject/InjectorImpl.java +++ b/server/src/main/java/org/opensearch/common/inject/InjectorImpl.java @@ -530,12 +530,12 @@ public T get(Errors errors, InternalContext context, Dependency dependency) t * other ancestor injectors until this injector is tried. */ private BindingImpl createJustInTimeBindingRecursive(Key key, Errors errors) throws ErrorsException { - if (state.isBlacklisted(key)) { + if (state.isDenylisted(key)) { throw errors.childBindingAlreadySet(key).toException(); } BindingImpl binding = createJustInTimeBinding(key, errors); - state.parent().blacklist(key); + state.parent().denylist(key); jitBindings.put(key, binding); return binding; } @@ -555,7 +555,7 @@ private BindingImpl createJustInTimeBindingRecursive(Key key, Errors e * if the binding cannot be created. */ BindingImpl createJustInTimeBinding(Key key, Errors errors) throws ErrorsException { - if (state.isBlacklisted(key)) { + if (state.isDenylisted(key)) { throw errors.childBindingAlreadySet(key).toException(); } @@ -805,7 +805,7 @@ public String toString() { // ES_GUICE: clear caches public void clearCache() { - state.clearBlacklisted(); + state.clearDenylisted(); constructors = new ConstructorInjectorStore(this); membersInjectorStore = new MembersInjectorStore(this, state.getTypeListenerBindings()); jitBindings = new HashMap<>(); diff --git a/server/src/main/java/org/opensearch/common/inject/State.java b/server/src/main/java/org/opensearch/common/inject/State.java index 6a69e9547d707..560824c065793 100644 --- a/server/src/main/java/org/opensearch/common/inject/State.java +++ b/server/src/main/java/org/opensearch/common/inject/State.java @@ -106,15 +106,15 @@ public List getTypeListenerBindings() { } @Override - public void blacklist(Key key) {} + public void denylist(Key key) {} @Override - public boolean isBlacklisted(Key key) { + public boolean isDenylisted(Key key) { return true; } @Override - public void clearBlacklisted() {} + public void clearDenylisted() {} @Override public void makeAllBindingsToEagerSingletons(Injector injector) {} @@ -167,13 +167,13 @@ public Object lock() { * denylist their bound keys on their parent injectors to prevent just-in-time bindings on the * parent injector that would conflict. */ - void blacklist(Key key); + void denylist(Key key); /** * Returns true if {@code key} is forbidden from being bound in this injector. This indicates that * one of this injector's descendent's has bound the key. */ - boolean isBlacklisted(Key key); + boolean isDenylisted(Key key); /** * Returns the shared lock for all injector data. This is a low-granularity, high-contention lock @@ -182,7 +182,7 @@ public Object lock() { Object lock(); // ES_GUICE: clean denylist keys - void clearBlacklisted(); + void clearDenylisted(); void makeAllBindingsToEagerSingletons(Injector injector); } diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/BlacklistedPathPatternMatcher.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/DenylistedPathPatternMatcher.java similarity index 97% rename from test/framework/src/main/java/org/opensearch/test/rest/yaml/BlacklistedPathPatternMatcher.java rename to test/framework/src/main/java/org/opensearch/test/rest/yaml/DenylistedPathPatternMatcher.java index 15510e368b1f5..eeaa76b6ca1b3 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/BlacklistedPathPatternMatcher.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/DenylistedPathPatternMatcher.java @@ -47,7 +47,7 @@ * * Each denylist pattern is a suffix match on the path. Empty patterns are not allowed. */ -final class BlacklistedPathPatternMatcher { +final class DenylistedPathPatternMatcher { private final Pattern pattern; /** @@ -55,7 +55,7 @@ final class BlacklistedPathPatternMatcher { * * @param p The suffix pattern. Must be a non-empty string. */ - BlacklistedPathPatternMatcher(String p) { + DenylistedPathPatternMatcher(String p) { // guard against accidentally matching everything as an empty string lead to the pattern ".*" which matches everything if (p == null || p.trim().isEmpty()) { throw new IllegalArgumentException("Empty denylist patterns are not supported"); diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java index 70e3adbefbfc3..1b19f03f46174 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java @@ -116,7 +116,7 @@ public abstract class OpenSearchClientYamlSuiteTestCase extends OpenSearchRestTe */ private static final String PATHS_SEPARATOR = "(? denylistPathMatchers; + private static List denylistPathMatchers; private static ClientYamlTestExecutionContext restTestExecutionContext; private static ClientYamlTestExecutionContext adminExecutionContext; private static ClientYamlTestClient clientYamlTestClient; @@ -157,11 +157,11 @@ public void initAndResetContext() throws Exception { final String[] denylist = resolvePathsProperty(REST_TESTS_DENYLIST, null); denylistPathMatchers = new ArrayList<>(); for (final String entry : denylist) { - denylistPathMatchers.add(new BlacklistedPathPatternMatcher(entry)); + denylistPathMatchers.add(new DenylistedPathPatternMatcher(entry)); } final String[] denylistAdditions = resolvePathsProperty(REST_TESTS_DENYLIST_ADDITIONS, null); for (final String entry : denylistAdditions) { - denylistPathMatchers.add(new BlacklistedPathPatternMatcher(entry)); + denylistPathMatchers.add(new DenylistedPathPatternMatcher(entry)); } } assert restTestExecutionContext != null; @@ -368,12 +368,9 @@ protected RequestOptions getCatNodesVersionMasterRequestOptions() { public void test() throws IOException { // skip test if it matches one of the denylist globs - for (BlacklistedPathPatternMatcher denylistedPathMatcher : denylistPathMatchers) { + for (DenylistedPathPatternMatcher denylistedPathMatcher : denylistPathMatchers) { String testPath = testCandidate.getSuitePath() + "/" + testCandidate.getTestSection().getName(); - assumeFalse( - "[" + testCandidate.getTestPath() + "] skipped, reason: blacklisted", - denylistedPathMatcher.isSuffixMatch(testPath) - ); + assumeFalse("[" + testCandidate.getTestPath() + "] skipped, reason: denylisted", denylistedPathMatcher.isSuffixMatch(testPath)); } // skip test if the whole suite (yaml file) is disabled diff --git a/test/framework/src/test/java/org/opensearch/test/rest/yaml/BlacklistedPathPatternMatcherTests.java b/test/framework/src/test/java/org/opensearch/test/rest/yaml/DenylistedPathPatternMatcherTests.java similarity index 93% rename from test/framework/src/test/java/org/opensearch/test/rest/yaml/BlacklistedPathPatternMatcherTests.java rename to test/framework/src/test/java/org/opensearch/test/rest/yaml/DenylistedPathPatternMatcherTests.java index 05cdec242e565..3d62f399fe271 100644 --- a/test/framework/src/test/java/org/opensearch/test/rest/yaml/BlacklistedPathPatternMatcherTests.java +++ b/test/framework/src/test/java/org/opensearch/test/rest/yaml/DenylistedPathPatternMatcherTests.java @@ -33,7 +33,7 @@ import org.opensearch.test.OpenSearchTestCase; -public class BlacklistedPathPatternMatcherTests extends OpenSearchTestCase { +public class DenylistedPathPatternMatcherTests extends OpenSearchTestCase { public void testMatchesExact() { // suffix match @@ -71,12 +71,12 @@ public void testMatchesMixedPatterns() { } private void assertMatch(String pattern, String path) { - BlacklistedPathPatternMatcher matcher = new BlacklistedPathPatternMatcher(pattern); + DenylistedPathPatternMatcher matcher = new DenylistedPathPatternMatcher(pattern); assertTrue("Pattern [" + pattern + "] should have matched path [" + path + "]", matcher.isSuffixMatch(path)); } private void assertNoMatch(String pattern, String path) { - BlacklistedPathPatternMatcher matcher = new BlacklistedPathPatternMatcher(pattern); + DenylistedPathPatternMatcher matcher = new DenylistedPathPatternMatcher(pattern); assertFalse("Pattern [" + pattern + "] should not have matched path [" + path + "]", matcher.isSuffixMatch(path)); } } From 2d89bc7c61021b81dc884118300f64772a319276 Mon Sep 17 00:00:00 2001 From: Kartik Date: Thu, 7 Apr 2022 18:12:33 -0700 Subject: [PATCH 060/653] Updates to the large string reg-ex check (#2814) * Updates to the large string reg-ex check Removed the null-case for IndexSettings since this only occurs in tests. The tests now use a dummy Index Setting. This change also fixes a bug with the base case handling of max regex length in the check. Signed-off-by: Kartik Ganesh --- .../bucket/terms/IncludeExclude.java | 49 ++++++++----------- .../support/IncludeExcludeTests.java | 26 +++++++--- 2 files changed, 40 insertions(+), 35 deletions(-) diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/IncludeExclude.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/IncludeExclude.java index acb3a6629c734..71320909ca5d2 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/IncludeExclude.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/IncludeExclude.java @@ -48,7 +48,6 @@ import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.automaton.RegExp; import org.opensearch.OpenSearchParseException; -import org.opensearch.common.Nullable; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -577,23 +576,10 @@ public boolean isPartitionBased() { return incNumPartitions > 0; } - private Automaton toAutomaton(@Nullable IndexSettings indexSettings) { - int maxRegexLength = indexSettings == null ? -1 : indexSettings.getMaxRegexLength(); + private Automaton toAutomaton(IndexSettings indexSettings) { Automaton a; if (include != null) { - if (include.length() > maxRegexLength) { - throw new IllegalArgumentException( - "The length of regex [" - + include.length() - + "] used in the request has exceeded " - + "the allowed maximum of [" - + maxRegexLength - + "]. " - + "This maximum can be set by changing the [" - + IndexSettings.MAX_REGEX_LENGTH_SETTING.getKey() - + "] index level setting." - ); - } + validateRegExpStringLength(include, indexSettings); a = new RegExp(include).toAutomaton(); } else if (includeValues != null) { a = Automata.makeStringUnion(includeValues); @@ -601,19 +587,7 @@ private Automaton toAutomaton(@Nullable IndexSettings indexSettings) { a = Automata.makeAnyString(); } if (exclude != null) { - if (exclude.length() > maxRegexLength) { - throw new IllegalArgumentException( - "The length of regex [" - + exclude.length() - + "] used in the request has exceeded " - + "the allowed maximum of [" - + maxRegexLength - + "]. " - + "This maximum can be set by changing the [" - + IndexSettings.MAX_REGEX_LENGTH_SETTING.getKey() - + "] index level setting." - ); - } + validateRegExpStringLength(exclude, indexSettings); Automaton excludeAutomaton = new RegExp(exclude).toAutomaton(); a = Operations.minus(a, excludeAutomaton, Operations.DEFAULT_DETERMINIZE_WORK_LIMIT); } else if (excludeValues != null) { @@ -622,6 +596,23 @@ private Automaton toAutomaton(@Nullable IndexSettings indexSettings) { return a; } + private static void validateRegExpStringLength(String source, IndexSettings indexSettings) { + int maxRegexLength = indexSettings.getMaxRegexLength(); + if (maxRegexLength > 0 && source.length() > maxRegexLength) { + throw new IllegalArgumentException( + "The length of regex [" + + source.length() + + "] used in the request has exceeded " + + "the allowed maximum of [" + + maxRegexLength + + "]. " + + "This maximum can be set by changing the [" + + IndexSettings.MAX_REGEX_LENGTH_SETTING.getKey() + + "] index level setting." + ); + } + } + public StringFilter convertToStringFilter(DocValueFormat format, IndexSettings indexSettings) { if (isRegexBased()) { return new AutomatonBackedStringFilter(toAutomaton(indexSettings)); diff --git a/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java b/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java index 9ebca90d84cab..d104fc6783dc5 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java @@ -36,12 +36,16 @@ import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LongBitSet; +import org.opensearch.Version; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.ParseField; +import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.ToXContent; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.index.IndexSettings; import org.opensearch.index.fielddata.AbstractSortedSetDocValues; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.bucket.terms.IncludeExclude; @@ -53,14 +57,24 @@ import java.util.TreeSet; public class IncludeExcludeTests extends OpenSearchTestCase { + + private final IndexSettings dummyIndexSettings = new IndexSettings( + IndexMetadata.builder("index") + .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(0) + .build(), + Settings.EMPTY + ); + public void testEmptyTermsWithOrds() throws IOException { IncludeExclude inexcl = new IncludeExclude(new TreeSet<>(Collections.singleton(new BytesRef("foo"))), null); - OrdinalsFilter filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW, null); + OrdinalsFilter filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW, dummyIndexSettings); LongBitSet acceptedOrds = filter.acceptedGlobalOrdinals(DocValues.emptySortedSet()); assertEquals(0, acceptedOrds.length()); inexcl = new IncludeExclude(null, new TreeSet<>(Collections.singleton(new BytesRef("foo")))); - filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW, null); + filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW, dummyIndexSettings); acceptedOrds = filter.acceptedGlobalOrdinals(DocValues.emptySortedSet()); assertEquals(0, acceptedOrds.length()); } @@ -99,13 +113,13 @@ public long getValueCount() { }; IncludeExclude inexcl = new IncludeExclude(new TreeSet<>(Collections.singleton(new BytesRef("foo"))), null); - OrdinalsFilter filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW, null); + OrdinalsFilter filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW, dummyIndexSettings); LongBitSet acceptedOrds = filter.acceptedGlobalOrdinals(ords); assertEquals(1, acceptedOrds.length()); assertTrue(acceptedOrds.get(0)); inexcl = new IncludeExclude(new TreeSet<>(Collections.singleton(new BytesRef("bar"))), null); - filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW, null); + filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW, dummyIndexSettings); acceptedOrds = filter.acceptedGlobalOrdinals(ords); assertEquals(1, acceptedOrds.length()); assertFalse(acceptedOrds.get(0)); @@ -114,7 +128,7 @@ public long getValueCount() { new TreeSet<>(Collections.singleton(new BytesRef("foo"))), new TreeSet<>(Collections.singleton(new BytesRef("foo"))) ); - filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW, null); + filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW, dummyIndexSettings); acceptedOrds = filter.acceptedGlobalOrdinals(ords); assertEquals(1, acceptedOrds.length()); assertFalse(acceptedOrds.get(0)); @@ -123,7 +137,7 @@ public long getValueCount() { null, // means everything included new TreeSet<>(Collections.singleton(new BytesRef("foo"))) ); - filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW, null); + filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW, dummyIndexSettings); acceptedOrds = filter.acceptedGlobalOrdinals(ords); assertEquals(1, acceptedOrds.length()); assertFalse(acceptedOrds.get(0)); From b5d5616d44a08c609fb96c7467732480d7333285 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yoann=20Rodi=C3=A8re?= Date: Fri, 8 Apr 2022 22:43:51 +0200 Subject: [PATCH 061/653] Update commons-logging to 1.2 (#2806) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Upgrade to Apache Commons Logging 1.2 Signed-off-by: Yoann Rodière * Clarify that Apache HTTP/commons-* dependencies are not just for tests Signed-off-by: Yoann Rodière --- buildSrc/version.properties | 12 +++++++----- client/rest/build.gradle | 1 - client/rest/licenses/commons-logging-1.1.3.jar.sha1 | 1 - client/rest/licenses/commons-logging-1.2.jar.sha1 | 1 + client/sniffer/build.gradle | 1 - .../sniffer/licenses/commons-logging-1.1.3.jar.sha1 | 1 - client/sniffer/licenses/commons-logging-1.2.jar.sha1 | 1 + .../licenses/commons-logging-1.2.jar.sha1 | 1 + .../licenses/commons-logging-1.1.3.jar.sha1 | 1 - .../licenses/commons-logging-1.2.jar.sha1 | 1 + .../licenses/commons-logging-1.1.3.jar.sha1 | 1 - .../licenses/commons-logging-1.2.jar.sha1 | 1 + .../licenses/commons-logging-1.1.3.jar.sha1 | 1 - .../licenses/commons-logging-1.2.jar.sha1 | 1 + .../licenses/commons-logging-1.1.3.jar.sha1 | 1 - .../licenses/commons-logging-1.2.jar.sha1 | 1 + .../licenses/commons-logging-1.1.3.jar.sha1 | 1 - .../licenses/commons-logging-1.2.jar.sha1 | 1 + .../licenses/commons-logging-1.1.3.jar.sha1 | 1 - .../licenses/commons-logging-1.2.jar.sha1 | 1 + .../licenses/commons-logging-1.1.3.jar.sha1 | 1 - .../licenses/commons-logging-1.2.jar.sha1 | 1 + test/framework/build.gradle | 1 - .../core/licenses/commons-logging-1.2.jar.sha1 | 1 + 24 files changed, 18 insertions(+), 17 deletions(-) delete mode 100644 client/rest/licenses/commons-logging-1.1.3.jar.sha1 create mode 100644 client/rest/licenses/commons-logging-1.2.jar.sha1 delete mode 100644 client/sniffer/licenses/commons-logging-1.1.3.jar.sha1 create mode 100644 client/sniffer/licenses/commons-logging-1.2.jar.sha1 create mode 100644 modules/repository-s3/licenses/commons-logging-1.2.jar.sha1 delete mode 100644 plugins/discovery-azure-classic/licenses/commons-logging-1.1.3.jar.sha1 create mode 100644 plugins/discovery-azure-classic/licenses/commons-logging-1.2.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/commons-logging-1.1.3.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/commons-logging-1.2.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/commons-logging-1.1.3.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/commons-logging-1.2.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/commons-logging-1.1.3.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/commons-logging-1.2.jar.sha1 delete mode 100644 plugins/repository-gcs/licenses/commons-logging-1.1.3.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/commons-logging-1.2.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/commons-logging-1.1.3.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/commons-logging-1.2.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/commons-logging-1.1.3.jar.sha1 create mode 100644 plugins/repository-s3/licenses/commons-logging-1.2.jar.sha1 create mode 100644 x-pack/plugin/core/licenses/commons-logging-1.2.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 84ee06cafba2d..7ae3bfaa19b5a 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -23,6 +23,13 @@ jna = 5.5.0 netty = 4.1.73.Final joda = 2.10.12 +# client dependencies +httpclient = 4.5.13 +httpcore = 4.4.12 +httpasyncclient = 4.1.4 +commonslogging = 1.2 +commonscodec = 1.13 + # when updating this version, you need to ensure compatibility with: # - plugins/ingest-attachment (transitive dependency, check the upstream POM) # - distribution/tools/plugin-cli @@ -30,11 +37,6 @@ bouncycastle=1.70 # test dependencies randomizedrunner = 2.7.1 junit = 4.13.2 -httpclient = 4.5.13 -httpcore = 4.4.12 -httpasyncclient = 4.1.4 -commonslogging = 1.1.3 -commonscodec = 1.13 hamcrest = 2.1 mockito = 4.3.1 objenesis = 3.2 diff --git a/client/rest/build.gradle b/client/rest/build.gradle index 5c1252061443a..01c186ed83fc2 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -89,7 +89,6 @@ thirdPartyAudit.ignoreMissingClasses( 'org.apache.avalon.framework.logger.Logger', 'org.apache.log.Hierarchy', 'org.apache.log.Logger', - 'org.apache.log4j.Category', 'org.apache.log4j.Level', 'org.apache.log4j.Logger', 'org.apache.log4j.Priority', diff --git a/client/rest/licenses/commons-logging-1.1.3.jar.sha1 b/client/rest/licenses/commons-logging-1.1.3.jar.sha1 deleted file mode 100644 index 5b8f029e58293..0000000000000 --- a/client/rest/licenses/commons-logging-1.1.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f \ No newline at end of file diff --git a/client/rest/licenses/commons-logging-1.2.jar.sha1 b/client/rest/licenses/commons-logging-1.2.jar.sha1 new file mode 100644 index 0000000000000..f40f0242448e8 --- /dev/null +++ b/client/rest/licenses/commons-logging-1.2.jar.sha1 @@ -0,0 +1 @@ +4bfc12adfe4842bf07b657f0369c4cb522955686 \ No newline at end of file diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle index bc4be1dd153e8..b7cb0d87c02d9 100644 --- a/client/sniffer/build.gradle +++ b/client/sniffer/build.gradle @@ -88,7 +88,6 @@ thirdPartyAudit.ignoreMissingClasses( 'org.apache.avalon.framework.logger.Logger', 'org.apache.log.Hierarchy', 'org.apache.log.Logger', - 'org.apache.log4j.Category', 'org.apache.log4j.Level', 'org.apache.log4j.Logger', 'org.apache.log4j.Priority', diff --git a/client/sniffer/licenses/commons-logging-1.1.3.jar.sha1 b/client/sniffer/licenses/commons-logging-1.1.3.jar.sha1 deleted file mode 100644 index 5b8f029e58293..0000000000000 --- a/client/sniffer/licenses/commons-logging-1.1.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f \ No newline at end of file diff --git a/client/sniffer/licenses/commons-logging-1.2.jar.sha1 b/client/sniffer/licenses/commons-logging-1.2.jar.sha1 new file mode 100644 index 0000000000000..f40f0242448e8 --- /dev/null +++ b/client/sniffer/licenses/commons-logging-1.2.jar.sha1 @@ -0,0 +1 @@ +4bfc12adfe4842bf07b657f0369c4cb522955686 \ No newline at end of file diff --git a/modules/repository-s3/licenses/commons-logging-1.2.jar.sha1 b/modules/repository-s3/licenses/commons-logging-1.2.jar.sha1 new file mode 100644 index 0000000000000..f40f0242448e8 --- /dev/null +++ b/modules/repository-s3/licenses/commons-logging-1.2.jar.sha1 @@ -0,0 +1 @@ +4bfc12adfe4842bf07b657f0369c4cb522955686 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/commons-logging-1.1.3.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-logging-1.1.3.jar.sha1 deleted file mode 100644 index c8756c438320f..0000000000000 --- a/plugins/discovery-azure-classic/licenses/commons-logging-1.1.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f diff --git a/plugins/discovery-azure-classic/licenses/commons-logging-1.2.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-logging-1.2.jar.sha1 new file mode 100644 index 0000000000000..f40f0242448e8 --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/commons-logging-1.2.jar.sha1 @@ -0,0 +1 @@ +4bfc12adfe4842bf07b657f0369c4cb522955686 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/commons-logging-1.1.3.jar.sha1 b/plugins/discovery-ec2/licenses/commons-logging-1.1.3.jar.sha1 deleted file mode 100644 index c8756c438320f..0000000000000 --- a/plugins/discovery-ec2/licenses/commons-logging-1.1.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f diff --git a/plugins/discovery-ec2/licenses/commons-logging-1.2.jar.sha1 b/plugins/discovery-ec2/licenses/commons-logging-1.2.jar.sha1 new file mode 100644 index 0000000000000..f40f0242448e8 --- /dev/null +++ b/plugins/discovery-ec2/licenses/commons-logging-1.2.jar.sha1 @@ -0,0 +1 @@ +4bfc12adfe4842bf07b657f0369c4cb522955686 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/commons-logging-1.1.3.jar.sha1 b/plugins/discovery-gce/licenses/commons-logging-1.1.3.jar.sha1 deleted file mode 100644 index c8756c438320f..0000000000000 --- a/plugins/discovery-gce/licenses/commons-logging-1.1.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f diff --git a/plugins/discovery-gce/licenses/commons-logging-1.2.jar.sha1 b/plugins/discovery-gce/licenses/commons-logging-1.2.jar.sha1 new file mode 100644 index 0000000000000..f40f0242448e8 --- /dev/null +++ b/plugins/discovery-gce/licenses/commons-logging-1.2.jar.sha1 @@ -0,0 +1 @@ +4bfc12adfe4842bf07b657f0369c4cb522955686 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-logging-1.1.3.jar.sha1 b/plugins/ingest-attachment/licenses/commons-logging-1.1.3.jar.sha1 deleted file mode 100644 index 5b8f029e58293..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-logging-1.1.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-logging-1.2.jar.sha1 b/plugins/ingest-attachment/licenses/commons-logging-1.2.jar.sha1 new file mode 100644 index 0000000000000..f40f0242448e8 --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-logging-1.2.jar.sha1 @@ -0,0 +1 @@ +4bfc12adfe4842bf07b657f0369c4cb522955686 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/commons-logging-1.1.3.jar.sha1 b/plugins/repository-gcs/licenses/commons-logging-1.1.3.jar.sha1 deleted file mode 100644 index 5b8f029e58293..0000000000000 --- a/plugins/repository-gcs/licenses/commons-logging-1.1.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/commons-logging-1.2.jar.sha1 b/plugins/repository-gcs/licenses/commons-logging-1.2.jar.sha1 new file mode 100644 index 0000000000000..f40f0242448e8 --- /dev/null +++ b/plugins/repository-gcs/licenses/commons-logging-1.2.jar.sha1 @@ -0,0 +1 @@ +4bfc12adfe4842bf07b657f0369c4cb522955686 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-logging-1.1.3.jar.sha1 b/plugins/repository-hdfs/licenses/commons-logging-1.1.3.jar.sha1 deleted file mode 100644 index 5b8f029e58293..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-logging-1.1.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-logging-1.2.jar.sha1 b/plugins/repository-hdfs/licenses/commons-logging-1.2.jar.sha1 new file mode 100644 index 0000000000000..f40f0242448e8 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-logging-1.2.jar.sha1 @@ -0,0 +1 @@ +4bfc12adfe4842bf07b657f0369c4cb522955686 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/commons-logging-1.1.3.jar.sha1 b/plugins/repository-s3/licenses/commons-logging-1.1.3.jar.sha1 deleted file mode 100644 index c8756c438320f..0000000000000 --- a/plugins/repository-s3/licenses/commons-logging-1.1.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f diff --git a/plugins/repository-s3/licenses/commons-logging-1.2.jar.sha1 b/plugins/repository-s3/licenses/commons-logging-1.2.jar.sha1 new file mode 100644 index 0000000000000..f40f0242448e8 --- /dev/null +++ b/plugins/repository-s3/licenses/commons-logging-1.2.jar.sha1 @@ -0,0 +1 @@ +4bfc12adfe4842bf07b657f0369c4cb522955686 \ No newline at end of file diff --git a/test/framework/build.gradle b/test/framework/build.gradle index 42197bf5e2980..096e8c1e58243 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -71,7 +71,6 @@ thirdPartyAudit.ignoreMissingClasses( 'org.apache.avalon.framework.logger.Logger', 'org.apache.log.Hierarchy', 'org.apache.log.Logger', - 'org.apache.log4j.Category', 'org.apache.log4j.Level', 'org.apache.log4j.Logger', 'org.apache.log4j.Priority', diff --git a/x-pack/plugin/core/licenses/commons-logging-1.2.jar.sha1 b/x-pack/plugin/core/licenses/commons-logging-1.2.jar.sha1 new file mode 100644 index 0000000000000..f40f0242448e8 --- /dev/null +++ b/x-pack/plugin/core/licenses/commons-logging-1.2.jar.sha1 @@ -0,0 +1 @@ +4bfc12adfe4842bf07b657f0369c4cb522955686 \ No newline at end of file From c876f55294b5636e558e5735cf2f0c629bcda080 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Apr 2022 11:41:20 -0400 Subject: [PATCH 062/653] Bump com.diffplug.spotless from 6.4.1 to 6.4.2 (#2827) Bumps com.diffplug.spotless from 6.4.1 to 6.4.2. --- updated-dependencies: - dependency-name: com.diffplug.spotless dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index 487f20c7f6ccd..a41ad61de39a6 100644 --- a/build.gradle +++ b/build.gradle @@ -48,7 +48,7 @@ plugins { id 'lifecycle-base' id 'opensearch.docker-support' id 'opensearch.global-build-info' - id "com.diffplug.spotless" version "6.4.1" apply false + id "com.diffplug.spotless" version "6.4.2" apply false id "org.gradle.test-retry" version "1.3.1" apply false } From 0a17faca613f15b46d6a0ffc02eafec2414481c4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Apr 2022 11:41:48 -0400 Subject: [PATCH 063/653] Bump cdi-api from 1.2 to 2.0 in /qa/wildfly (#2835) Bumps [cdi-api](https://github.com/cdi-spec/cdi) from 1.2 to 2.0. - [Release notes](https://github.com/cdi-spec/cdi/releases) - [Commits](https://github.com/cdi-spec/cdi/compare/1.2...2.0) --- updated-dependencies: - dependency-name: javax.enterprise:cdi-api dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- qa/wildfly/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qa/wildfly/build.gradle b/qa/wildfly/build.gradle index 9abaa2a83033f..0cf2098c24b91 100644 --- a/qa/wildfly/build.gradle +++ b/qa/wildfly/build.gradle @@ -39,7 +39,7 @@ apply plugin: 'opensearch.internal-distribution-download' testFixtures.useFixture() dependencies { - providedCompile 'javax.enterprise:cdi-api:1.2' + providedCompile 'javax.enterprise:cdi-api:2.0' providedCompile 'org.jboss.spec.javax.annotation:jboss-annotations-api_1.2_spec:1.0.2.Final' providedCompile 'org.jboss.spec.javax.ws.rs:jboss-jaxrs-api_2.0_spec:1.0.1.Final' api('org.jboss.resteasy:resteasy-jackson2-provider:3.0.19.Final') { From f68c8f452704f46ece805958f8d3e67d077684d8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Apr 2022 11:42:16 -0400 Subject: [PATCH 064/653] Bump gradle-info-plugin from 7.1.3 to 11.3.3 in /buildSrc (#2831) Bumps [gradle-info-plugin](https://github.com/nebula-plugins/gradle-info-plugin) from 7.1.3 to 11.3.3. - [Release notes](https://github.com/nebula-plugins/gradle-info-plugin/releases) - [Changelog](https://github.com/nebula-plugins/gradle-info-plugin/blob/main/CHANGELOG.md) - [Commits](https://github.com/nebula-plugins/gradle-info-plugin/compare/v7.1.3...v11.3.3) --- updated-dependencies: - dependency-name: com.netflix.nebula:gradle-info-plugin dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- buildSrc/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 18f8738bbba71..d478a1fd45e80 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -107,7 +107,7 @@ dependencies { api 'org.apache.ant:ant:1.10.12' api 'com.netflix.nebula:gradle-extra-configurations-plugin:7.0.0' api 'com.netflix.nebula:nebula-publishing-plugin:4.4.4' - api 'com.netflix.nebula:gradle-info-plugin:7.1.3' + api 'com.netflix.nebula:gradle-info-plugin:11.3.3' api 'org.apache.rat:apache-rat:0.13' api 'commons-io:commons-io:2.7' api "net.java.dev.jna:jna:5.10.0" From c69ad3deca810f0470757e9a56180cbc574808dc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Apr 2022 11:47:19 -0400 Subject: [PATCH 065/653] Bump azure-core from 1.26.0 to 1.27.0 in /plugins/repository-azure (#2837) * Bump azure-core from 1.26.0 to 1.27.0 in /plugins/repository-azure Bumps [azure-core](https://github.com/Azure/azure-sdk-for-java) from 1.26.0 to 1.27.0. - [Release notes](https://github.com/Azure/azure-sdk-for-java/releases) - [Commits](https://github.com/Azure/azure-sdk-for-java/compare/azure-core_1.26.0...azure-core_1.27.0) --- updated-dependencies: - dependency-name: com.azure:azure-core dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/repository-azure/build.gradle | 2 +- plugins/repository-azure/licenses/azure-core-1.26.0.jar.sha1 | 1 - plugins/repository-azure/licenses/azure-core-1.27.0.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-azure/licenses/azure-core-1.26.0.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-core-1.27.0.jar.sha1 diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 040a29750b967..da644d77eb488 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -44,7 +44,7 @@ opensearchplugin { } dependencies { - api 'com.azure:azure-core:1.26.0' + api 'com.azure:azure-core:1.27.0' api 'com.azure:azure-storage-common:12.15.0' api 'com.azure:azure-core-http-netty:1.11.9' api "io.netty:netty-codec-dns:${versions.netty}" diff --git a/plugins/repository-azure/licenses/azure-core-1.26.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.26.0.jar.sha1 deleted file mode 100644 index 693c6a721959c..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-1.26.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -461b89dcf8948a0c4a97d4f1d876f778d0cac7aa \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-1.27.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.27.0.jar.sha1 new file mode 100644 index 0000000000000..9206b697ca648 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-1.27.0.jar.sha1 @@ -0,0 +1 @@ +75a2db538d218e2bd3c2cbdf04c955b8f6db6626 \ No newline at end of file From 81e2455df1b9e4428a3379aecc7dfe877202060f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Apr 2022 11:47:50 -0400 Subject: [PATCH 066/653] Bump asm-analysis from 9.2 to 9.3 in /test/logger-usage (#2829) Bumps asm-analysis from 9.2 to 9.3. --- updated-dependencies: - dependency-name: org.ow2.asm:asm-analysis dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- test/logger-usage/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/logger-usage/build.gradle b/test/logger-usage/build.gradle index ecfccc9338410..3154e556a87cf 100644 --- a/test/logger-usage/build.gradle +++ b/test/logger-usage/build.gradle @@ -33,7 +33,7 @@ apply plugin: 'opensearch.java' dependencies { api 'org.ow2.asm:asm:9.2' api 'org.ow2.asm:asm-tree:9.2' - api 'org.ow2.asm:asm-analysis:9.2' + api 'org.ow2.asm:asm-analysis:9.3' api "org.apache.logging.log4j:log4j-api:${versions.log4j}" testImplementation project(":test:framework") } From 00ae764752b6b705ed109c4cfef259dd5b6adc4e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Apr 2022 11:48:21 -0400 Subject: [PATCH 067/653] Bump protobuf-java from 3.19.3 to 3.20.0 in /plugins/repository-hdfs (#2836) * Bump protobuf-java from 3.19.3 to 3.20.0 in /plugins/repository-hdfs Bumps [protobuf-java](https://github.com/protocolbuffers/protobuf) from 3.19.3 to 3.20.0. - [Release notes](https://github.com/protocolbuffers/protobuf/releases) - [Changelog](https://github.com/protocolbuffers/protobuf/blob/main/generate_changelog.py) - [Commits](https://github.com/protocolbuffers/protobuf/compare/v3.19.3...v3.20.0) --- updated-dependencies: - dependency-name: com.google.protobuf:protobuf-java dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/protobuf-java-3.19.3.jar.sha1 | 1 - plugins/repository-hdfs/licenses/protobuf-java-3.20.0.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/protobuf-java-3.19.3.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/protobuf-java-3.20.0.jar.sha1 diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 1787a380b933b..d8811ded8d092 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -67,7 +67,7 @@ dependencies { api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api 'com.google.code.gson:gson:2.9.0' runtimeOnly 'com.google.guava:guava:30.1.1-jre' - api 'com.google.protobuf:protobuf-java:3.19.3' + api 'com.google.protobuf:protobuf-java:3.20.0' api "commons-logging:commons-logging:${versions.commonslogging}" api 'commons-cli:commons-cli:1.2' api "commons-codec:commons-codec:${versions.commonscodec}" diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.19.3.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.19.3.jar.sha1 deleted file mode 100644 index 655ecd1f1c1c9..0000000000000 --- a/plugins/repository-hdfs/licenses/protobuf-java-3.19.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4b57f1b1b9e281231c3fcfc039ce3021e29ff570 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.20.0.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.20.0.jar.sha1 new file mode 100644 index 0000000000000..c5b0169ce0dba --- /dev/null +++ b/plugins/repository-hdfs/licenses/protobuf-java-3.20.0.jar.sha1 @@ -0,0 +1 @@ +3c72ddaaab7ffafe789e4f732c1fd614eb798bf4 \ No newline at end of file From 725b5ca004bd075102f0d9a6a2c37cdc6c1fcab7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Apr 2022 11:48:56 -0400 Subject: [PATCH 068/653] Bump joni from 2.1.41 to 2.1.43 in /libs/grok (#2832) * Bump joni from 2.1.41 to 2.1.43 in /libs/grok Bumps [joni](https://github.com/jruby/joni) from 2.1.41 to 2.1.43. - [Release notes](https://github.com/jruby/joni/releases) - [Commits](https://github.com/jruby/joni/compare/joni-2.1.41...joni-2.1.43) --- updated-dependencies: - dependency-name: org.jruby.joni:joni dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- libs/grok/build.gradle | 2 +- libs/grok/licenses/joni-2.1.41.jar.sha1 | 1 - libs/grok/licenses/joni-2.1.43.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 libs/grok/licenses/joni-2.1.41.jar.sha1 create mode 100644 libs/grok/licenses/joni-2.1.43.jar.sha1 diff --git a/libs/grok/build.gradle b/libs/grok/build.gradle index e406a80ee1c91..86414d18108a1 100644 --- a/libs/grok/build.gradle +++ b/libs/grok/build.gradle @@ -29,7 +29,7 @@ */ dependencies { - api 'org.jruby.joni:joni:2.1.41' + api 'org.jruby.joni:joni:2.1.43' // joni dependencies: api 'org.jruby.jcodings:jcodings:1.0.57' diff --git a/libs/grok/licenses/joni-2.1.41.jar.sha1 b/libs/grok/licenses/joni-2.1.41.jar.sha1 deleted file mode 100644 index 4f0a0a8393dd0..0000000000000 --- a/libs/grok/licenses/joni-2.1.41.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4a35f4eaef792073bc081b756b1f4949879cd41e \ No newline at end of file diff --git a/libs/grok/licenses/joni-2.1.43.jar.sha1 b/libs/grok/licenses/joni-2.1.43.jar.sha1 new file mode 100644 index 0000000000000..ef5dfabb2b391 --- /dev/null +++ b/libs/grok/licenses/joni-2.1.43.jar.sha1 @@ -0,0 +1 @@ +9a3bf154469d5ff1d1107755904279081a5fb618 \ No newline at end of file From d308795bf0e62d39c45419c4c3e3a9f98dc4eaec Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Apr 2022 11:49:32 -0400 Subject: [PATCH 069/653] Bump google-oauth-client from 1.33.1 to 1.33.2 in /plugins/discovery-gce (#2828) * Bump google-oauth-client from 1.33.1 to 1.33.2 in /plugins/discovery-gce Bumps [google-oauth-client](https://github.com/googleapis/google-oauth-java-client) from 1.33.1 to 1.33.2. - [Release notes](https://github.com/googleapis/google-oauth-java-client/releases) - [Changelog](https://github.com/googleapis/google-oauth-java-client/blob/main/CHANGELOG.md) - [Commits](https://github.com/googleapis/google-oauth-java-client/compare/v1.33.1...v1.33.2) --- updated-dependencies: - dependency-name: com.google.oauth-client:google-oauth-client dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/discovery-gce/build.gradle | 2 +- .../discovery-gce/licenses/google-oauth-client-1.33.1.jar.sha1 | 1 - .../discovery-gce/licenses/google-oauth-client-1.33.2.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/discovery-gce/licenses/google-oauth-client-1.33.1.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/google-oauth-client-1.33.2.jar.sha1 diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index 2396b228d77a0..eb695f84b2bd0 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -24,7 +24,7 @@ versions << [ dependencies { api "com.google.apis:google-api-services-compute:v1-rev160-${versions.google}" api "com.google.api-client:google-api-client:${versions.google}" - api "com.google.oauth-client:google-oauth-client:1.33.1" + api "com.google.oauth-client:google-oauth-client:1.33.2" api "com.google.http-client:google-http-client:${versions.google}" api "com.google.http-client:google-http-client-jackson2:${versions.google}" api 'com.google.code.findbugs:jsr305:3.0.2' diff --git a/plugins/discovery-gce/licenses/google-oauth-client-1.33.1.jar.sha1 b/plugins/discovery-gce/licenses/google-oauth-client-1.33.1.jar.sha1 deleted file mode 100644 index 3897a85310ec6..0000000000000 --- a/plugins/discovery-gce/licenses/google-oauth-client-1.33.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a431f1a677c5f89507591ab47a7ccdb0b18b6f7 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-oauth-client-1.33.2.jar.sha1 b/plugins/discovery-gce/licenses/google-oauth-client-1.33.2.jar.sha1 new file mode 100644 index 0000000000000..289e8e8261fd3 --- /dev/null +++ b/plugins/discovery-gce/licenses/google-oauth-client-1.33.2.jar.sha1 @@ -0,0 +1 @@ +2810fb515fe110295dc6867fc9f70c401b66daf3 \ No newline at end of file From 7dd171d48ae0d416a745f92ce8fe732ef0cbcc02 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Apr 2022 11:50:03 -0400 Subject: [PATCH 070/653] Bump protobuf-java-util from 3.19.3 to 3.20.0 in /plugins/repository-gcs (#2834) * Bump protobuf-java-util from 3.19.3 to 3.20.0 in /plugins/repository-gcs Bumps [protobuf-java-util](https://github.com/protocolbuffers/protobuf) from 3.19.3 to 3.20.0. - [Release notes](https://github.com/protocolbuffers/protobuf/releases) - [Changelog](https://github.com/protocolbuffers/protobuf/blob/main/generate_changelog.py) - [Commits](https://github.com/protocolbuffers/protobuf/compare/v3.19.3...v3.20.0) --- updated-dependencies: - dependency-name: com.google.protobuf:protobuf-java-util dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/repository-gcs/build.gradle | 2 +- .../repository-gcs/licenses/protobuf-java-util-3.19.3.jar.sha1 | 1 - .../repository-gcs/licenses/protobuf-java-util-3.20.0.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/protobuf-java-util-3.19.3.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/protobuf-java-util-3.20.0.jar.sha1 diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 6e577d058ff67..0e1ed06879f91 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -61,7 +61,7 @@ dependencies { api 'com.google.api:api-common:1.8.1' api 'com.google.api:gax:1.54.0' api 'org.threeten:threetenbp:1.4.4' - api 'com.google.protobuf:protobuf-java-util:3.19.3' + api 'com.google.protobuf:protobuf-java-util:3.20.0' api 'com.google.protobuf:protobuf-java:3.19.3' api 'com.google.code.gson:gson:2.9.0' api 'com.google.api.grpc:proto-google-common-protos:2.8.0' diff --git a/plugins/repository-gcs/licenses/protobuf-java-util-3.19.3.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-util-3.19.3.jar.sha1 deleted file mode 100644 index 9ba36d444c541..0000000000000 --- a/plugins/repository-gcs/licenses/protobuf-java-util-3.19.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3e6812cbbb7e6faffa7b56438740dec510e1fc1a \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/protobuf-java-util-3.20.0.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-util-3.20.0.jar.sha1 new file mode 100644 index 0000000000000..1e9d00d8d5c03 --- /dev/null +++ b/plugins/repository-gcs/licenses/protobuf-java-util-3.20.0.jar.sha1 @@ -0,0 +1 @@ +ee4496b296418283cbe7ae784984347fc4717a9a \ No newline at end of file From ba8657aca5d8918c5393308d248a711a75bd4905 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Mon, 11 Apr 2022 15:59:04 -0700 Subject: [PATCH 071/653] Change deprecation message for REST API parameter 'master_timeout' to specify the version of removal (#2863) Signed-off-by: Tianli Feng --- .../rest-api-spec/test/indices.clone/10_basic.yml | 4 ++-- .../test/indices.clone/20_source_mapping.yml | 2 +- .../rest-api-spec/test/indices.clone/30_copy_settings.yml | 2 +- .../rest-api-spec/test/indices.shrink/10_basic.yml | 2 +- .../test/indices.shrink/20_source_mapping.yml | 2 +- .../test/indices.shrink/30_copy_settings.yml | 6 +++--- .../rest-api-spec/test/indices.split/10_basic.yml | 8 ++++---- .../test/indices.split/20_source_mapping.yml | 2 +- .../rest-api-spec/test/indices.split/30_copy_settings.yml | 6 +++--- .../main/java/org/opensearch/rest/BaseRestHandler.java | 2 +- .../rest/action/admin/indices/RestGetMappingAction.java | 2 +- .../org/opensearch/rest/action/cat/RestIndicesAction.java | 2 +- .../action/RenamedTimeoutRequestParameterTests.java | 2 +- 13 files changed, 21 insertions(+), 21 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml index 6488e4960e08f..ca8342b2e91c2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml @@ -48,7 +48,7 @@ setup: # now we do the actual clone - do: allowed_warnings: - - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." + - "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead." indices.clone: index: "source" target: "target" @@ -102,7 +102,7 @@ setup: - do: catch: /illegal_argument_exception/ allowed_warnings: - - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." + - "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead." indices.clone: index: "source" target: "target" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/20_source_mapping.yml index 1a3074d091399..21c476c76965c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/20_source_mapping.yml @@ -52,7 +52,7 @@ # now we do the actual clone - do: allowed_warnings: - - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." + - "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead." indices.clone: index: "source" target: "target" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/30_copy_settings.yml index 467f5266122eb..b0bd8056cb004 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/30_copy_settings.yml @@ -37,7 +37,7 @@ # now we do an actual clone and copy settings - do: allowed_warnings: - - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." + - "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead." indices.clone: index: "source" target: "copy-settings-target" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml index 4db7ca353334f..032f061d8a160 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml @@ -57,7 +57,7 @@ # now we do the actual shrink - do: allowed_warnings: - - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." + - "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead." indices.shrink: index: "source" target: "target" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml index 4ddf122d82691..8d08373208216 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml @@ -61,7 +61,7 @@ # now we do the actual shrink - do: allowed_warnings: - - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." + - "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead." indices.shrink: index: "source" target: "target" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml index 112303a3a7298..33bcb18f8afb6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml @@ -47,7 +47,7 @@ index.merge.scheduler.max_thread_count: 2 allowed_warnings: - "parameter [copy_settings] is deprecated and will be removed in 8.0.0" - - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." + - "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead." - do: cluster.health: @@ -66,7 +66,7 @@ # now we do a actual shrink and copy settings (by default) - do: allowed_warnings: - - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." + - "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead." indices.shrink: index: "source" target: "default-copy-settings-target" @@ -95,7 +95,7 @@ - do: catch: /illegal_argument_exception/ allowed_warnings: - - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." + - "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead." indices.shrink: index: "source" target: "explicit-no-copy-settings-target" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml index 01781e35b9ae9..2432f47d4dca7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml @@ -48,7 +48,7 @@ setup: # now we do the actual split - do: allowed_warnings: - - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." + - "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead." indices.split: index: "source" target: "target" @@ -139,7 +139,7 @@ setup: # now we do the actual split from 1 to 5 - do: allowed_warnings: - - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." + - "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead." indices.split: index: "source_one_shard" target: "target" @@ -192,7 +192,7 @@ setup: - do: catch: /illegal_argument_exception/ allowed_warnings: - - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." + - "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead." indices.split: index: "source" target: "target" @@ -208,7 +208,7 @@ setup: - do: catch: /illegal_state_exception/ allowed_warnings: - - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." + - "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead." indices.split: index: "source" target: "target" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml index 0baae30238013..69061c6d0fbd4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml @@ -52,7 +52,7 @@ # now we do the actual split - do: allowed_warnings: - - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." + - "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead." indices.split: index: "source" target: "target" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml index ace49ff6dd917..5b7f8f9960774 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml @@ -49,7 +49,7 @@ index.merge.scheduler.max_thread_count: 2 allowed_warnings: - "parameter [copy_settings] is deprecated and will be removed in 8.0.0" - - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." + - "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead." - do: cluster.health: @@ -68,7 +68,7 @@ # now we do a actual shrink and copy settings (by default) - do: allowed_warnings: - - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." + - "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead." indices.split: index: "source" target: "default-copy-settings-target" @@ -97,7 +97,7 @@ - do: catch: /illegal_argument_exception/ allowed_warnings: - - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version." + - "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead." indices.split: index: "source" target: "explicit-no-copy-settings-target" diff --git a/server/src/main/java/org/opensearch/rest/BaseRestHandler.java b/server/src/main/java/org/opensearch/rest/BaseRestHandler.java index e0a62581447ac..e16e385910d98 100644 --- a/server/src/main/java/org/opensearch/rest/BaseRestHandler.java +++ b/server/src/main/java/org/opensearch/rest/BaseRestHandler.java @@ -219,7 +219,7 @@ public static void parseDeprecatedMasterTimeoutParameter( String logMsgKeyPrefix ) { final String MASTER_TIMEOUT_DEPRECATED_MESSAGE = - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version."; + "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead."; final String DUPLICATE_PARAMETER_ERROR_MESSAGE = "Please only use one of the request parameters [master_timeout, cluster_manager_timeout]."; if (request.hasParam("master_timeout")) { diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetMappingAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetMappingAction.java index 62ea315fd89a0..86bf6b626c24a 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetMappingAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetMappingAction.java @@ -63,7 +63,7 @@ public class RestGetMappingAction extends BaseRestHandler { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestGetMappingAction.class); private static final String MASTER_TIMEOUT_DEPRECATED_MESSAGE = - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version."; + "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead."; private static final String DUPLICATE_PARAMETER_ERROR_MESSAGE = "Please only use one of the request parameters [master_timeout, cluster_manager_timeout]."; diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java index a26b57aab0636..1b70603edf6e1 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java @@ -86,7 +86,7 @@ public class RestIndicesAction extends AbstractCatAction { private static final DateFormatter STRICT_DATE_TIME_FORMATTER = DateFormatter.forPattern("strict_date_time"); private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestIndicesAction.class); private static final String MASTER_TIMEOUT_DEPRECATED_MESSAGE = - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version."; + "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead."; private static final String DUPLICATE_PARAMETER_ERROR_MESSAGE = "Please only use one of the request parameters [master_timeout, cluster_manager_timeout]."; diff --git a/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java b/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java index df93d5c16d8e0..878730868a3b0 100644 --- a/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java +++ b/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java @@ -78,7 +78,7 @@ public class RenamedTimeoutRequestParameterTests extends OpenSearchTestCase { private static final String DUPLICATE_PARAMETER_ERROR_MESSAGE = "Please only use one of the request parameters [master_timeout, cluster_manager_timeout]."; private static final String MASTER_TIMEOUT_DEPRECATED_MESSAGE = - "Deprecated parameter [master_timeout] used. To promote inclusive language, please use [cluster_manager_timeout] instead. It will be unsupported in a future major version."; + "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead."; @After public void terminateThreadPool() { From ba3ed8a9cb9dc9eeee4a4b09db326cdb99cda50b Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Mon, 11 Apr 2022 17:29:06 -0700 Subject: [PATCH 072/653] Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Index Template APIs (#2678) - Deprecate the request parameter `master_timeout` that used in Index Template APIs which have got the parameter. (The other Index APIs are addressed in PR https://github.com/opensearch-project/OpenSearch/pull/2660) - Add alternative new request parameter `cluster_manager_timeout`. - Add unit tests. Signed-off-by: Tianli Feng --- .../cluster.delete_component_template.json | 10 +- .../api/cluster.get_component_template.json | 10 +- .../api/cluster.put_component_template.json | 10 +- .../api/indices.delete_index_template.json | 10 +- .../api/indices.delete_template.json | 10 +- .../api/indices.get_index_template.json | 10 +- .../api/indices.get_template.json | 2 +- .../api/indices.put_index_template.json | 10 +- .../api/indices.put_template.json | 10 +- .../api/indices.simulate_index_template.json | 10 +- .../api/indices.simulate_template.json | 10 +- .../test/cluster.state/20_filtering.yml | 2 +- .../test/indices.exists_template/10_basic.yml | 4 + .../test/indices.get_template/10_basic.yml | 4 + .../RestDeleteComponentTemplateAction.java | 6 +- ...stDeleteComposableIndexTemplateAction.java | 6 +- .../RestDeleteIndexTemplateAction.java | 8 +- .../RestGetComponentTemplateAction.java | 6 +- .../RestGetComposableIndexTemplateAction.java | 6 +- .../indices/RestGetIndexTemplateAction.java | 8 +- .../RestPutComponentTemplateAction.java | 6 +- .../RestPutComposableIndexTemplateAction.java | 6 +- .../indices/RestPutIndexTemplateAction.java | 3 +- .../RestSimulateIndexTemplateAction.java | 6 +- .../indices/RestSimulateTemplateAction.java | 8 +- .../RenamedTimeoutRequestParameterTests.java | 92 +++++++++++++++++++ 26 files changed, 249 insertions(+), 24 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_component_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_component_template.json index 9beea52c86b37..43e14ad0e2dd8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_component_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_component_template.json @@ -28,7 +28,15 @@ }, "master_timeout":{ "type":"time", - "description":"Specify timeout for connection to master" + "description":"Specify timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_component_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_component_template.json index ecf32f50c0a6c..aa4e395672ef3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_component_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_component_template.json @@ -30,7 +30,15 @@ "params":{ "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "local":{ "type":"boolean", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_component_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_component_template.json index abc83fb15f48a..05558bc7bfc50 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_component_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_component_template.json @@ -34,7 +34,15 @@ }, "master_timeout":{ "type":"time", - "description":"Specify timeout for connection to master" + "description":"Specify timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" } }, "body":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_index_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_index_template.json index d037b03dc5277..c74771ffe4b81 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_index_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_index_template.json @@ -28,7 +28,15 @@ }, "master_timeout":{ "type":"time", - "description":"Specify timeout for connection to master" + "description":"Specify timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_template.json index ca484a73e99f9..74dbb1822b64a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_template.json @@ -28,7 +28,15 @@ }, "master_timeout":{ "type":"time", - "description":"Specify timeout for connection to master" + "description":"Specify timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_index_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_index_template.json index 7ea6dd2944c79..fbd03f99d2547 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_index_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_index_template.json @@ -34,7 +34,15 @@ }, "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "local":{ "type":"boolean", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_template.json index 04d2f846e6ac1..52aeb17913db4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_template.json @@ -37,7 +37,7 @@ "description":"Explicit operation timeout for connection to master node", "deprecated":{ "version":"2.0.0", - "description":"To promote inclusive language, use 'cluster_manager_timeout' instead." + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." } }, "cluster_manager_timeout":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_index_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_index_template.json index 3f758e18737e2..a2ceb259a4376 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_index_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_index_template.json @@ -35,7 +35,15 @@ }, "master_timeout":{ "type":"time", - "description":"Specify timeout for connection to master" + "description":"Specify timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" } }, "body":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_template.json index 75a328af929ef..3b1c230178bb8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_template.json @@ -34,7 +34,15 @@ }, "master_timeout":{ "type":"time", - "description":"Specify timeout for connection to master" + "description":"Specify timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" } }, "body":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.simulate_index_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.simulate_index_template.json index 2b81572f0aaaf..0e42ba6028a9f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.simulate_index_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.simulate_index_template.json @@ -34,7 +34,15 @@ }, "master_timeout":{ "type":"time", - "description":"Specify timeout for connection to master" + "description":"Specify timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" } }, "body":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.simulate_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.simulate_template.json index 364547dd318a2..65b555082c3b1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.simulate_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.simulate_template.json @@ -40,7 +40,15 @@ }, "master_timeout":{ "type":"time", - "description":"Specify timeout for connection to master" + "description":"Specify timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" } }, "body":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml index 3d20f1d0f7e52..b2c1e1e561933 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml @@ -169,7 +169,7 @@ setup: cluster.state: metric: [ master_node, version ] allowed_warnings: - - 'Deprecated value [master_node] used for parameter [metric]. To promote inclusive language, please use [cluster_manager_node] instead. It will be unsupported in a future major version.' + - 'Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead.' - match: { cluster_uuid: $cluster_uuid } - is_true: master_node diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.exists_template/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.exists_template/10_basic.yml index 67592a013e8f1..c7892f58a6f59 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.exists_template/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.exists_template/10_basic.yml @@ -6,6 +6,8 @@ setup: ignore: [404] --- "Test indices.exists_template": + - skip: + features: allowed_warnings - do: indices.exists_template: @@ -23,6 +25,8 @@ setup: number_of_replicas: 0 - do: + allowed_warnings: + - "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead." indices.exists_template: name: test master_timeout: 1m diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_template/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_template/10_basic.yml index 9becbd54a3773..32536f8f72650 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_template/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_template/10_basic.yml @@ -72,8 +72,12 @@ setup: --- "Get template with flat settings and master timeout": + - skip: + features: allowed_warnings - do: + allowed_warnings: + - "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead." indices.get_template: name: test flat_settings: true diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestDeleteComponentTemplateAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestDeleteComponentTemplateAction.java index e4f0347192dbe..05656cb250596 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestDeleteComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestDeleteComponentTemplateAction.java @@ -34,6 +34,7 @@ import org.opensearch.action.admin.indices.template.delete.DeleteComponentTemplateAction; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; @@ -46,6 +47,8 @@ public class RestDeleteComponentTemplateAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestDeleteComponentTemplateAction.class); + @Override public List routes() { return Collections.singletonList(new Route(DELETE, "/_component_template/{name}")); @@ -60,7 +63,8 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { DeleteComponentTemplateAction.Request deleteReq = new DeleteComponentTemplateAction.Request(request.param("name")); - deleteReq.masterNodeTimeout(request.paramAsTime("master_timeout", deleteReq.masterNodeTimeout())); + deleteReq.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", deleteReq.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(deleteReq, request, deprecationLogger, getName()); return channel -> client.execute(DeleteComponentTemplateAction.INSTANCE, deleteReq, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestDeleteComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestDeleteComposableIndexTemplateAction.java index f5832b4167852..b08288593515b 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestDeleteComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestDeleteComposableIndexTemplateAction.java @@ -34,6 +34,7 @@ import org.opensearch.action.admin.indices.template.delete.DeleteComposableIndexTemplateAction; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; @@ -46,6 +47,8 @@ public class RestDeleteComposableIndexTemplateAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestDeleteComposableIndexTemplateAction.class); + @Override public List routes() { return Collections.singletonList(new Route(DELETE, "/_index_template/{name}")); @@ -60,7 +63,8 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { DeleteComposableIndexTemplateAction.Request deleteReq = new DeleteComposableIndexTemplateAction.Request(request.param("name")); - deleteReq.masterNodeTimeout(request.paramAsTime("master_timeout", deleteReq.masterNodeTimeout())); + deleteReq.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", deleteReq.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(deleteReq, request, deprecationLogger, getName()); return channel -> client.execute(DeleteComposableIndexTemplateAction.INSTANCE, deleteReq, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestDeleteIndexTemplateAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestDeleteIndexTemplateAction.java index 51040082c2c47..ce49332a4abbd 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestDeleteIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestDeleteIndexTemplateAction.java @@ -33,6 +33,7 @@ import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; @@ -45,6 +46,8 @@ public class RestDeleteIndexTemplateAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestDeleteIndexTemplateAction.class); + @Override public List routes() { return singletonList(new Route(DELETE, "/_template/{name}")); @@ -58,7 +61,10 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { DeleteIndexTemplateRequest deleteIndexTemplateRequest = new DeleteIndexTemplateRequest(request.param("name")); - deleteIndexTemplateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteIndexTemplateRequest.masterNodeTimeout())); + deleteIndexTemplateRequest.masterNodeTimeout( + request.paramAsTime("cluster_manager_timeout", deleteIndexTemplateRequest.masterNodeTimeout()) + ); + parseDeprecatedMasterTimeoutParameter(deleteIndexTemplateRequest, request, deprecationLogger, getName()); return channel -> client.admin().indices().deleteTemplate(deleteIndexTemplateRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetComponentTemplateAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetComponentTemplateAction.java index 8a85a66eef635..e245c5a20848f 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetComponentTemplateAction.java @@ -34,6 +34,7 @@ import org.opensearch.action.admin.indices.template.get.GetComponentTemplateAction; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; @@ -52,6 +53,8 @@ public class RestGetComponentTemplateAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestGetComponentTemplateAction.class); + @Override public List routes() { return Arrays.asList( @@ -72,7 +75,8 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final GetComponentTemplateAction.Request getRequest = new GetComponentTemplateAction.Request(request.param("name")); getRequest.local(request.paramAsBoolean("local", getRequest.local())); - getRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getRequest.masterNodeTimeout())); + getRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", getRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(getRequest, request, deprecationLogger, getName()); final boolean implicitAll = getRequest.name() == null; diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetComposableIndexTemplateAction.java index 684198c6799f0..d860e8856571e 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetComposableIndexTemplateAction.java @@ -34,6 +34,7 @@ import org.opensearch.action.admin.indices.template.get.GetComposableIndexTemplateAction; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; @@ -52,6 +53,8 @@ public class RestGetComposableIndexTemplateAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestGetComposableIndexTemplateAction.class); + @Override public List routes() { return Arrays.asList( @@ -71,7 +74,8 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final GetComposableIndexTemplateAction.Request getRequest = new GetComposableIndexTemplateAction.Request(request.param("name")); getRequest.local(request.paramAsBoolean("local", getRequest.local())); - getRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getRequest.masterNodeTimeout())); + getRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", getRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(getRequest, request, deprecationLogger, getName()); final boolean implicitAll = getRequest.name() == null; diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetIndexTemplateAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetIndexTemplateAction.java index 71e7ed098cf8d..cda29d2b0ad47 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetIndexTemplateAction.java @@ -36,6 +36,7 @@ import org.opensearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.common.Strings; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; @@ -58,6 +59,8 @@ */ public class RestGetIndexTemplateAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestGetIndexTemplateAction.class); + @Override public List routes() { return unmodifiableList( @@ -76,7 +79,10 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final GetIndexTemplatesRequest getIndexTemplatesRequest = new GetIndexTemplatesRequest(names); getIndexTemplatesRequest.local(request.paramAsBoolean("local", getIndexTemplatesRequest.local())); - getIndexTemplatesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getIndexTemplatesRequest.masterNodeTimeout())); + getIndexTemplatesRequest.masterNodeTimeout( + request.paramAsTime("cluster_manager_timeout", getIndexTemplatesRequest.masterNodeTimeout()) + ); + parseDeprecatedMasterTimeoutParameter(getIndexTemplatesRequest, request, deprecationLogger, getName()); final boolean implicitAll = getIndexTemplatesRequest.names().length == 0; diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutComponentTemplateAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutComponentTemplateAction.java index ab02eef51a072..0d956b4dd147f 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutComponentTemplateAction.java @@ -35,6 +35,7 @@ import org.opensearch.action.admin.indices.template.put.PutComponentTemplateAction; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.metadata.ComponentTemplate; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; @@ -48,6 +49,8 @@ public class RestPutComponentTemplateAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestPutComponentTemplateAction.class); + @Override public List routes() { return Arrays.asList(new Route(POST, "/_component_template/{name}"), new Route(PUT, "/_component_template/{name}")); @@ -62,7 +65,8 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { PutComponentTemplateAction.Request putRequest = new PutComponentTemplateAction.Request(request.param("name")); - putRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRequest.masterNodeTimeout())); + putRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", putRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(putRequest, request, deprecationLogger, getName()); putRequest.create(request.paramAsBoolean("create", false)); putRequest.cause(request.param("cause", "api")); putRequest.componentTemplate(ComponentTemplate.parse(request.contentParser())); diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutComposableIndexTemplateAction.java index 790aad33008b9..63352a73b281d 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutComposableIndexTemplateAction.java @@ -35,6 +35,7 @@ import org.opensearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.metadata.ComposableIndexTemplate; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; @@ -48,6 +49,8 @@ public class RestPutComposableIndexTemplateAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestPutComposableIndexTemplateAction.class); + @Override public List routes() { return Arrays.asList(new Route(POST, "/_index_template/{name}"), new Route(PUT, "/_index_template/{name}")); @@ -62,7 +65,8 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { PutComposableIndexTemplateAction.Request putRequest = new PutComposableIndexTemplateAction.Request(request.param("name")); - putRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRequest.masterNodeTimeout())); + putRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", putRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(putRequest, request, deprecationLogger, getName()); putRequest.create(request.paramAsBoolean("create", false)); putRequest.cause(request.param("cause", "api")); putRequest.indexTemplate(ComposableIndexTemplate.parse(request.contentParser())); diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutIndexTemplateAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutIndexTemplateAction.java index f17ac495b494b..42cd8e8103a18 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutIndexTemplateAction.java @@ -78,7 +78,8 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC putRequest.patterns(Arrays.asList(request.paramAsStringArray("index_patterns", Strings.EMPTY_ARRAY))); } putRequest.order(request.paramAsInt("order", putRequest.order())); - putRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRequest.masterNodeTimeout())); + putRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", putRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(putRequest, request, deprecationLogger, getName()); putRequest.create(request.paramAsBoolean("create", false)); putRequest.cause(request.param("cause", "")); diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java index 596b96c28be41..fb78b7b0a826d 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java @@ -37,6 +37,7 @@ import org.opensearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.metadata.ComposableIndexTemplate; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; @@ -48,6 +49,8 @@ public class RestSimulateIndexTemplateAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestSimulateIndexTemplateAction.class); + @Override public List routes() { return org.opensearch.common.collect.List.of(new Route(POST, "/_index_template/_simulate_index/{name}")); @@ -62,8 +65,9 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { SimulateIndexTemplateRequest simulateIndexTemplateRequest = new SimulateIndexTemplateRequest(request.param("name")); simulateIndexTemplateRequest.masterNodeTimeout( - request.paramAsTime("master_timeout", simulateIndexTemplateRequest.masterNodeTimeout()) + request.paramAsTime("cluster_manager_timeout", simulateIndexTemplateRequest.masterNodeTimeout()) ); + parseDeprecatedMasterTimeoutParameter(simulateIndexTemplateRequest, request, deprecationLogger, getName()); if (request.hasContent()) { PutComposableIndexTemplateAction.Request indexTemplateRequest = new PutComposableIndexTemplateAction.Request( "simulating_template" diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestSimulateTemplateAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestSimulateTemplateAction.java index c3f0958f62718..58cc0c1e369d8 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestSimulateTemplateAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestSimulateTemplateAction.java @@ -36,6 +36,7 @@ import org.opensearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.metadata.ComposableIndexTemplate; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; @@ -47,6 +48,8 @@ import static org.opensearch.rest.RestRequest.Method.POST; public class RestSimulateTemplateAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestSimulateTemplateAction.class); + @Override public List routes() { return Arrays.asList(new Route(POST, "/_index_template/_simulate"), new Route(POST, "/_index_template/_simulate/{name}")); @@ -58,7 +61,7 @@ public String getName() { } @Override - protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { SimulateTemplateAction.Request simulateRequest = new SimulateTemplateAction.Request(); simulateRequest.templateName(request.param("name")); if (request.hasContent()) { @@ -71,7 +74,8 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli simulateRequest.indexTemplateRequest(indexTemplateRequest); } - simulateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", simulateRequest.masterNodeTimeout())); + simulateRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", simulateRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(simulateRequest, request, deprecationLogger, getName()); return channel -> client.execute(SimulateTemplateAction.INSTANCE, simulateRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java b/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java index 878730868a3b0..09c84f325739a 100644 --- a/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java +++ b/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java @@ -41,6 +41,17 @@ import org.opensearch.rest.action.admin.indices.RestResizeHandler; import org.opensearch.rest.action.admin.indices.RestRolloverIndexAction; import org.opensearch.rest.action.admin.indices.RestUpdateSettingsAction; +import org.opensearch.rest.action.admin.indices.RestDeleteComponentTemplateAction; +import org.opensearch.rest.action.admin.indices.RestDeleteComposableIndexTemplateAction; +import org.opensearch.rest.action.admin.indices.RestDeleteIndexTemplateAction; +import org.opensearch.rest.action.admin.indices.RestGetComponentTemplateAction; +import org.opensearch.rest.action.admin.indices.RestGetComposableIndexTemplateAction; +import org.opensearch.rest.action.admin.indices.RestGetIndexTemplateAction; +import org.opensearch.rest.action.admin.indices.RestPutComponentTemplateAction; +import org.opensearch.rest.action.admin.indices.RestPutComposableIndexTemplateAction; +import org.opensearch.rest.action.admin.indices.RestPutIndexTemplateAction; +import org.opensearch.rest.action.admin.indices.RestSimulateIndexTemplateAction; +import org.opensearch.rest.action.admin.indices.RestSimulateTemplateAction; import org.opensearch.rest.action.cat.RestAllocationAction; import org.opensearch.rest.action.cat.RestRepositoriesAction; import org.opensearch.rest.action.cat.RestThreadPoolAction; @@ -425,6 +436,87 @@ public void testImportDanglingIndex() { assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); } + public void testDeleteComponentTemplate() { + RestDeleteComponentTemplateAction action = new RestDeleteComponentTemplateAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testDeleteComposableIndexTemplate() { + RestDeleteComposableIndexTemplateAction action = new RestDeleteComposableIndexTemplateAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testDeleteIndexTemplate() { + RestDeleteIndexTemplateAction action = new RestDeleteIndexTemplateAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testGetComponentTemplate() { + RestGetComponentTemplateAction action = new RestGetComponentTemplateAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testGetComposableIndexTemplate() { + RestGetComposableIndexTemplateAction action = new RestGetComposableIndexTemplateAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testGetIndexTemplate() { + RestGetIndexTemplateAction action = new RestGetIndexTemplateAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testPutComponentTemplate() { + RestPutComponentTemplateAction action = new RestPutComponentTemplateAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testPutComposableIndexTemplate() { + RestPutComposableIndexTemplateAction action = new RestPutComposableIndexTemplateAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testPutIndexTemplate() { + RestPutIndexTemplateAction action = new RestPutIndexTemplateAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testSimulateIndexTemplate() { + FakeRestRequest request = new FakeRestRequest(); + request.params().put("cluster_manager_timeout", randomFrom("1h", "2m")); + request.params().put("master_timeout", "3s"); + request.params().put("name", "test"); + RestSimulateIndexTemplateAction action = new RestSimulateIndexTemplateAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(request, client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testSimulateTemplate() { + RestSimulateTemplateAction action = new RestSimulateTemplateAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + private MasterNodeRequest getMasterNodeRequest() { return new MasterNodeRequest() { @Override From a89b7e6216354a5c5215918c7513174e9a57aec5 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Mon, 11 Apr 2022 20:42:21 -0700 Subject: [PATCH 073/653] Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Snapshot APIs (#2680) * Deprecate the request parameter `master_timeout` that used in Snapshot APIs which have got the parameter. * Add alternative new request parameter `cluster_manager_timeout`. * Add unit tests. Signed-off-by: Tianli Feng --- .../test/repository_azure/20_repository.yml | 7 ++ .../api/snapshot.cleanup_repository.json | 10 +- .../rest-api-spec/api/snapshot.clone.json | 10 +- .../rest-api-spec/api/snapshot.create.json | 10 +- .../api/snapshot.create_repository.json | 10 +- .../rest-api-spec/api/snapshot.delete.json | 10 +- .../api/snapshot.delete_repository.json | 10 +- .../rest-api-spec/api/snapshot.get.json | 10 +- .../api/snapshot.get_repository.json | 10 +- .../rest-api-spec/api/snapshot.restore.json | 10 +- .../rest-api-spec/api/snapshot.status.json | 10 +- .../api/snapshot.verify_repository.json | 10 +- .../cluster/RestCleanupRepositoryAction.java | 8 +- .../cluster/RestCloneSnapshotAction.java | 6 +- .../cluster/RestCreateSnapshotAction.java | 6 +- .../cluster/RestDeleteRepositoryAction.java | 8 +- .../cluster/RestDeleteSnapshotAction.java | 6 +- .../cluster/RestGetRepositoriesAction.java | 8 +- .../admin/cluster/RestGetSnapshotsAction.java | 6 +- .../cluster/RestPutRepositoryAction.java | 6 +- .../cluster/RestRestoreSnapshotAction.java | 8 +- .../cluster/RestSnapshotsStatusAction.java | 8 +- .../cluster/RestVerifyRepositoryAction.java | 8 +- .../RenamedTimeoutRequestParameterTests.java | 95 +++++++++++++++++++ 24 files changed, 268 insertions(+), 22 deletions(-) diff --git a/plugins/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml b/plugins/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml index beaa95b732d52..04ff4e8c34033 100644 --- a/plugins/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml +++ b/plugins/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml @@ -29,6 +29,9 @@ setup: --- "Snapshot/Restore with repository-azure": + - skip: + features: allowed_warnings + # Get repository - do: snapshot.get_repository: @@ -169,12 +172,16 @@ setup: # Remove the snapshots - do: + allowed_warnings: + - "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead." snapshot.delete: repository: repository snapshot: snapshot-two master_timeout: 5m - do: + allowed_warnings: + - "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead." snapshot.delete: repository: repository snapshot: snapshot-one diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.cleanup_repository.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.cleanup_repository.json index 727fe79176797..05eb3309b11e6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.cleanup_repository.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.cleanup_repository.json @@ -24,7 +24,15 @@ "params": { "master_timeout": { "type" : "time", - "description" : "Explicit operation timeout for connection to master node" + "description" : "Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "timeout": { "type" : "time", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.clone.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.clone.json index 18122bc209b0e..c79460fc30a48 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.clone.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.clone.json @@ -32,7 +32,15 @@ "params":{ "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" } }, "body":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create.json index da8cb9916f584..64aaeaef9d897 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create.json @@ -29,7 +29,15 @@ "params":{ "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "wait_for_completion":{ "type":"boolean", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create_repository.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create_repository.json index 431ac3c68c0bd..4965162bcd86c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create_repository.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create_repository.json @@ -25,7 +25,15 @@ "params":{ "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "timeout":{ "type":"time", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete.json index 30053cd5b94d3..2e21a08219942 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete.json @@ -28,7 +28,15 @@ "params":{ "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete_repository.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete_repository.json index b60aeba83a329..3fc22f969784c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete_repository.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete_repository.json @@ -24,7 +24,15 @@ "params":{ "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "timeout":{ "type":"time", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json index 20006f6f499b6..e084a997a61b1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json @@ -28,7 +28,15 @@ "params":{ "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "ignore_unavailable":{ "type":"boolean", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get_repository.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get_repository.json index 8c91caa4fe81f..cf03bab18c03f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get_repository.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get_repository.json @@ -30,7 +30,15 @@ "params":{ "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "local":{ "type":"boolean", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.restore.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.restore.json index 697ea395dcc2b..87ab8117ec489 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.restore.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.restore.json @@ -28,7 +28,15 @@ "params":{ "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "wait_for_completion":{ "type":"boolean", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.status.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.status.json index 70a7ba23ef506..4f22c24fd9a56 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.status.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.status.json @@ -46,7 +46,15 @@ "params":{ "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "ignore_unavailable":{ "type":"boolean", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.verify_repository.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.verify_repository.json index de638c19d4a0b..865eb15d11310 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.verify_repository.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.verify_repository.json @@ -24,7 +24,15 @@ "params":{ "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "timeout":{ "type":"time", diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestCleanupRepositoryAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestCleanupRepositoryAction.java index 2371581cefccb..c568eae91a528 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestCleanupRepositoryAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestCleanupRepositoryAction.java @@ -34,6 +34,7 @@ import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; @@ -50,6 +51,8 @@ */ public class RestCleanupRepositoryAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestCleanupRepositoryAction.class); + @Override public List routes() { return singletonList(new Route(POST, "/_snapshot/{repository}/_cleanup")); @@ -64,7 +67,10 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { CleanupRepositoryRequest cleanupRepositoryRequest = cleanupRepositoryRequest(request.param("repository")); cleanupRepositoryRequest.timeout(request.paramAsTime("timeout", cleanupRepositoryRequest.timeout())); - cleanupRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", cleanupRepositoryRequest.masterNodeTimeout())); + cleanupRepositoryRequest.masterNodeTimeout( + request.paramAsTime("cluster_manager_timeout", cleanupRepositoryRequest.masterNodeTimeout()) + ); + parseDeprecatedMasterTimeoutParameter(cleanupRepositoryRequest, request, deprecationLogger, getName()); return channel -> client.admin().cluster().cleanupRepository(cleanupRepositoryRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestCloneSnapshotAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestCloneSnapshotAction.java index 5c10c96c17227..abf1efa19f79b 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestCloneSnapshotAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestCloneSnapshotAction.java @@ -35,6 +35,7 @@ import org.opensearch.action.admin.cluster.snapshots.clone.CloneSnapshotRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; @@ -52,6 +53,8 @@ */ public class RestCloneSnapshotAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestCloneSnapshotAction.class); + @Override public List routes() { return Collections.singletonList(new Route(PUT, "/_snapshot/{repository}/{snapshot}/_clone/{target_snapshot}")); @@ -71,7 +74,8 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC request.param("target_snapshot"), XContentMapValues.nodeStringArrayValue(source.getOrDefault("indices", Collections.emptyList())) ); - cloneSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", cloneSnapshotRequest.masterNodeTimeout())); + cloneSnapshotRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", cloneSnapshotRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(cloneSnapshotRequest, request, deprecationLogger, getName()); cloneSnapshotRequest.indicesOptions(IndicesOptions.fromMap(source, cloneSnapshotRequest.indicesOptions())); return channel -> client.admin().cluster().cloneSnapshot(cloneSnapshotRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestCreateSnapshotAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestCreateSnapshotAction.java index c53f1d0cd5637..b3503f0dfbb56 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestCreateSnapshotAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestCreateSnapshotAction.java @@ -34,6 +34,7 @@ import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; @@ -52,6 +53,8 @@ */ public class RestCreateSnapshotAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestCreateSnapshotAction.class); + @Override public List routes() { return unmodifiableList( @@ -68,7 +71,8 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { CreateSnapshotRequest createSnapshotRequest = createSnapshotRequest(request.param("repository"), request.param("snapshot")); request.applyContentParser(p -> createSnapshotRequest.source(p.mapOrdered())); - createSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", createSnapshotRequest.masterNodeTimeout())); + createSnapshotRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", createSnapshotRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(createSnapshotRequest, request, deprecationLogger, getName()); createSnapshotRequest.waitForCompletion(request.paramAsBoolean("wait_for_completion", false)); return channel -> client.admin().cluster().createSnapshot(createSnapshotRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java index 4f8771b5db171..5e53595e3a49b 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java @@ -34,6 +34,7 @@ import org.opensearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; @@ -50,6 +51,8 @@ */ public class RestDeleteRepositoryAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestDeleteRepositoryAction.class); + @Override public List routes() { return singletonList(new Route(DELETE, "/_snapshot/{repository}")); @@ -64,7 +67,10 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { DeleteRepositoryRequest deleteRepositoryRequest = deleteRepositoryRequest(request.param("repository")); deleteRepositoryRequest.timeout(request.paramAsTime("timeout", deleteRepositoryRequest.timeout())); - deleteRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteRepositoryRequest.masterNodeTimeout())); + deleteRepositoryRequest.masterNodeTimeout( + request.paramAsTime("cluster_manager_timeout", deleteRepositoryRequest.masterNodeTimeout()) + ); + parseDeprecatedMasterTimeoutParameter(deleteRepositoryRequest, request, deprecationLogger, getName()); return channel -> client.admin().cluster().deleteRepository(deleteRepositoryRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java index 57b651215bc4f..891b84f8d0869 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java @@ -35,6 +35,7 @@ import org.opensearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; import org.opensearch.client.node.NodeClient; import org.opensearch.common.Strings; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; @@ -51,6 +52,8 @@ */ public class RestDeleteSnapshotAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestDeleteSnapshotAction.class); + @Override public List routes() { return singletonList(new Route(DELETE, "/_snapshot/{repository}/{snapshot}")); @@ -67,7 +70,8 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC request.param("repository"), Strings.splitStringByCommaToArray(request.param("snapshot")) ); - deleteSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteSnapshotRequest.masterNodeTimeout())); + deleteSnapshotRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", deleteSnapshotRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(deleteSnapshotRequest, request, deprecationLogger, getName()); return channel -> client.admin().cluster().deleteSnapshot(deleteSnapshotRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetRepositoriesAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetRepositoriesAction.java index 2a359fa08eb59..780d9266549ae 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetRepositoriesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetRepositoriesAction.java @@ -35,6 +35,7 @@ import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.opensearch.client.node.NodeClient; import org.opensearch.common.Strings; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsFilter; import org.opensearch.rest.BaseRestHandler; @@ -55,6 +56,8 @@ */ public class RestGetRepositoriesAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestGetRepositoriesAction.class); + private final SettingsFilter settingsFilter; public RestGetRepositoriesAction(SettingsFilter settingsFilter) { @@ -75,7 +78,10 @@ public List routes() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final String[] repositories = request.paramAsStringArray("repository", Strings.EMPTY_ARRAY); GetRepositoriesRequest getRepositoriesRequest = getRepositoryRequest(repositories); - getRepositoriesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getRepositoriesRequest.masterNodeTimeout())); + getRepositoriesRequest.masterNodeTimeout( + request.paramAsTime("cluster_manager_timeout", getRepositoriesRequest.masterNodeTimeout()) + ); + parseDeprecatedMasterTimeoutParameter(getRepositoriesRequest, request, deprecationLogger, getName()); getRepositoriesRequest.local(request.paramAsBoolean("local", getRepositoriesRequest.local())); settingsFilter.addFilterSettingParams(request); return channel -> client.admin().cluster().getRepositories(getRepositoriesRequest, new RestToXContentListener<>(channel)); diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetSnapshotsAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetSnapshotsAction.java index 383369ce595c5..189795516adbd 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetSnapshotsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetSnapshotsAction.java @@ -35,6 +35,7 @@ import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; import org.opensearch.client.node.NodeClient; import org.opensearch.common.Strings; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; @@ -51,6 +52,8 @@ */ public class RestGetSnapshotsAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestGetSnapshotsAction.class); + @Override public List routes() { return singletonList(new Route(GET, "/_snapshot/{repository}/{snapshot}")); @@ -69,7 +72,8 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC GetSnapshotsRequest getSnapshotsRequest = getSnapshotsRequest(repository).snapshots(snapshots); getSnapshotsRequest.ignoreUnavailable(request.paramAsBoolean("ignore_unavailable", getSnapshotsRequest.ignoreUnavailable())); getSnapshotsRequest.verbose(request.paramAsBoolean("verbose", getSnapshotsRequest.verbose())); - getSnapshotsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getSnapshotsRequest.masterNodeTimeout())); + getSnapshotsRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", getSnapshotsRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(getSnapshotsRequest, request, deprecationLogger, getName()); return channel -> client.admin().cluster().getSnapshots(getSnapshotsRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestPutRepositoryAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestPutRepositoryAction.java index afc4a142b689a..a1f34294d630d 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestPutRepositoryAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestPutRepositoryAction.java @@ -34,6 +34,7 @@ import org.opensearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; @@ -53,6 +54,8 @@ */ public class RestPutRepositoryAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestPutRepositoryAction.class); + @Override public List routes() { return unmodifiableList(asList(new Route(POST, "/_snapshot/{repository}"), new Route(PUT, "/_snapshot/{repository}"))); @@ -70,7 +73,8 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC putRepositoryRequest.source(parser.mapOrdered()); } putRepositoryRequest.verify(request.paramAsBoolean("verify", true)); - putRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRepositoryRequest.masterNodeTimeout())); + putRepositoryRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", putRepositoryRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(putRepositoryRequest, request, deprecationLogger, getName()); putRepositoryRequest.timeout(request.paramAsTime("timeout", putRepositoryRequest.timeout())); return channel -> client.admin().cluster().putRepository(putRepositoryRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRestoreSnapshotAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRestoreSnapshotAction.java index 4f6032d58b633..6c607b48a89fb 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRestoreSnapshotAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRestoreSnapshotAction.java @@ -34,6 +34,7 @@ import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; @@ -50,6 +51,8 @@ */ public class RestRestoreSnapshotAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestRestoreSnapshotAction.class); + @Override public List routes() { return singletonList(new Route(POST, "/_snapshot/{repository}/{snapshot}/_restore")); @@ -63,7 +66,10 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { RestoreSnapshotRequest restoreSnapshotRequest = restoreSnapshotRequest(request.param("repository"), request.param("snapshot")); - restoreSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", restoreSnapshotRequest.masterNodeTimeout())); + restoreSnapshotRequest.masterNodeTimeout( + request.paramAsTime("cluster_manager_timeout", restoreSnapshotRequest.masterNodeTimeout()) + ); + parseDeprecatedMasterTimeoutParameter(restoreSnapshotRequest, request, deprecationLogger, getName()); restoreSnapshotRequest.waitForCompletion(request.paramAsBoolean("wait_for_completion", false)); request.applyContentParser(p -> restoreSnapshotRequest.source(p.mapOrdered())); return channel -> client.admin().cluster().restoreSnapshot(restoreSnapshotRequest, new RestToXContentListener<>(channel)); diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java index a0db762a68c19..469c1eac348d1 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java @@ -35,6 +35,7 @@ import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; import org.opensearch.client.node.NodeClient; import org.opensearch.common.Strings; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; @@ -52,6 +53,8 @@ */ public class RestSnapshotsStatusAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestSnapshotsStatusAction.class); + @Override public List routes() { return unmodifiableList( @@ -78,7 +81,10 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC SnapshotsStatusRequest snapshotsStatusRequest = snapshotsStatusRequest(repository).snapshots(snapshots); snapshotsStatusRequest.ignoreUnavailable(request.paramAsBoolean("ignore_unavailable", snapshotsStatusRequest.ignoreUnavailable())); - snapshotsStatusRequest.masterNodeTimeout(request.paramAsTime("master_timeout", snapshotsStatusRequest.masterNodeTimeout())); + snapshotsStatusRequest.masterNodeTimeout( + request.paramAsTime("cluster_manager_timeout", snapshotsStatusRequest.masterNodeTimeout()) + ); + parseDeprecatedMasterTimeoutParameter(snapshotsStatusRequest, request, deprecationLogger, getName()); return channel -> client.admin().cluster().snapshotsStatus(snapshotsStatusRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestVerifyRepositoryAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestVerifyRepositoryAction.java index 735325a69c2ed..4792d0e6db148 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestVerifyRepositoryAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestVerifyRepositoryAction.java @@ -34,6 +34,7 @@ import org.opensearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; @@ -47,6 +48,8 @@ public class RestVerifyRepositoryAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestVerifyRepositoryAction.class); + @Override public List routes() { return singletonList(new Route(POST, "/_snapshot/{repository}/_verify")); @@ -60,7 +63,10 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { VerifyRepositoryRequest verifyRepositoryRequest = verifyRepositoryRequest(request.param("repository")); - verifyRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", verifyRepositoryRequest.masterNodeTimeout())); + verifyRepositoryRequest.masterNodeTimeout( + request.paramAsTime("cluster_manager_timeout", verifyRepositoryRequest.masterNodeTimeout()) + ); + parseDeprecatedMasterTimeoutParameter(verifyRepositoryRequest, request, deprecationLogger, getName()); verifyRepositoryRequest.timeout(request.paramAsTime("timeout", verifyRepositoryRequest.timeout())); return channel -> client.admin().cluster().verifyRepository(verifyRepositoryRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java b/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java index 09c84f325739a..86529d96573f8 100644 --- a/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java +++ b/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java @@ -52,6 +52,17 @@ import org.opensearch.rest.action.admin.indices.RestPutIndexTemplateAction; import org.opensearch.rest.action.admin.indices.RestSimulateIndexTemplateAction; import org.opensearch.rest.action.admin.indices.RestSimulateTemplateAction; +import org.opensearch.rest.action.admin.cluster.RestCleanupRepositoryAction; +import org.opensearch.rest.action.admin.cluster.RestCloneSnapshotAction; +import org.opensearch.rest.action.admin.cluster.RestCreateSnapshotAction; +import org.opensearch.rest.action.admin.cluster.RestDeleteRepositoryAction; +import org.opensearch.rest.action.admin.cluster.RestDeleteSnapshotAction; +import org.opensearch.rest.action.admin.cluster.RestGetRepositoriesAction; +import org.opensearch.rest.action.admin.cluster.RestGetSnapshotsAction; +import org.opensearch.rest.action.admin.cluster.RestPutRepositoryAction; +import org.opensearch.rest.action.admin.cluster.RestRestoreSnapshotAction; +import org.opensearch.rest.action.admin.cluster.RestSnapshotsStatusAction; +import org.opensearch.rest.action.admin.cluster.RestVerifyRepositoryAction; import org.opensearch.rest.action.cat.RestAllocationAction; import org.opensearch.rest.action.cat.RestRepositoriesAction; import org.opensearch.rest.action.cat.RestThreadPoolAction; @@ -517,6 +528,90 @@ public void testSimulateTemplate() { assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); } + public void testCleanupRepository() { + RestCleanupRepositoryAction action = new RestCleanupRepositoryAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testCloneSnapshot() { + RestCloneSnapshotAction action = new RestCloneSnapshotAction(); + Exception e = assertThrows( + OpenSearchParseException.class, + () -> action.prepareRequest(getRestRequestWithBodyWithBothParams(), client) + ); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testCreateSnapshot() { + RestCreateSnapshotAction action = new RestCreateSnapshotAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testDeleteRepository() { + RestDeleteRepositoryAction action = new RestDeleteRepositoryAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testDeleteSnapshot() { + RestDeleteSnapshotAction action = new RestDeleteSnapshotAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testGetRepositories() { + final SettingsFilter filter = new SettingsFilter(Collections.singleton("foo.filtered")); + RestGetRepositoriesAction action = new RestGetRepositoriesAction(filter); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testGetSnapshots() { + RestGetSnapshotsAction action = new RestGetSnapshotsAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testPutRepository() { + RestPutRepositoryAction action = new RestPutRepositoryAction(); + Exception e = assertThrows( + OpenSearchParseException.class, + () -> action.prepareRequest(getRestRequestWithBodyWithBothParams(), client) + ); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testRestoreSnapshot() { + RestRestoreSnapshotAction action = new RestRestoreSnapshotAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testSnapshotsStatus() { + RestSnapshotsStatusAction action = new RestSnapshotsStatusAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testVerifyRepository() { + RestVerifyRepositoryAction action = new RestVerifyRepositoryAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + private MasterNodeRequest getMasterNodeRequest() { return new MasterNodeRequest() { @Override From 455fad34eea2cb6d606f54a41ef170066b87b04e Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Tue, 12 Apr 2022 14:58:19 -0700 Subject: [PATCH 074/653] Change deprecation message for API parameter value 'master_node' of parameter 'metric' (#2880) Signed-off-by: Tianli Feng --- .../resources/rest-api-spec/test/cluster.state/20_filtering.yml | 2 +- .../rest/action/admin/cluster/RestClusterRerouteAction.java | 2 +- .../rest/action/admin/cluster/RestClusterStateAction.java | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml index b2c1e1e561933..b17201a911290 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml @@ -169,7 +169,7 @@ setup: cluster.state: metric: [ master_node, version ] allowed_warnings: - - 'Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead.' + - 'Assigning [master_node] to parameter [metric] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_node] instead.' - match: { cluster_uuid: $cluster_uuid } - is_true: master_node diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterRerouteAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterRerouteAction.java index 9c0e09b7629e0..dc5e0ba57e4db 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterRerouteAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterRerouteAction.java @@ -83,7 +83,7 @@ public RestClusterRerouteAction(SettingsFilter settingsFilter) { // It's used to log deprecation when request parameter 'metric' contains 'master_node', or request parameter 'master_timeout' is used. private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestClusterRerouteAction.class); private static final String DEPRECATED_MESSAGE_MASTER_NODE = - "Deprecated value [master_node] used for parameter [metric]. To promote inclusive language, please use [cluster_manager_node] instead. It will be unsupported in a future major version."; + "Assigning [master_node] to parameter [metric] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_node] instead."; @Override public List routes() { diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterStateAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterStateAction.java index 7f18a19b5cd54..02b1eaa741c9e 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterStateAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterStateAction.java @@ -76,7 +76,7 @@ public RestClusterStateAction(SettingsFilter settingsFilter) { // It's used to log deprecation when request parameter 'metric' contains 'master_node', or request parameter 'master_timeout' is used. private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestClusterStateAction.class); private static final String DEPRECATED_MESSAGE_MASTER_NODE = - "Deprecated value [master_node] used for parameter [metric]. To promote inclusive language, please use [cluster_manager_node] instead. It will be unsupported in a future major version."; + "Assigning [master_node] to parameter [metric] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_node] instead."; @Override public String getName() { From 135177e28ed2b65f0b8c5c30bfb0b7857ac241b1 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Wed, 13 Apr 2022 11:09:30 -0700 Subject: [PATCH 075/653] Remove unused file x-pack/plugin/core/licenses/commons-logging-1.2.jar.sha1 (#2869) Signed-off-by: Tianli Feng --- x-pack/plugin/core/licenses/commons-logging-1.2.jar.sha1 | 1 - 1 file changed, 1 deletion(-) delete mode 100644 x-pack/plugin/core/licenses/commons-logging-1.2.jar.sha1 diff --git a/x-pack/plugin/core/licenses/commons-logging-1.2.jar.sha1 b/x-pack/plugin/core/licenses/commons-logging-1.2.jar.sha1 deleted file mode 100644 index f40f0242448e8..0000000000000 --- a/x-pack/plugin/core/licenses/commons-logging-1.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4bfc12adfe4842bf07b657f0369c4cb522955686 \ No newline at end of file From 3af4300c3f62b23eb1368cd3354677ed7d8b0737 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Wed, 13 Apr 2022 14:13:18 -0400 Subject: [PATCH 076/653] Decouple IndexSettings from IncludeExclude (#2860) (#2861) This change refactors an earlier change to impose a reg-ex size limit on the include/exclude string. Instead of accepting an IndexSettings instance, the class now accepts a integer limit value. This is necessary because the IncludeExclude class is used outside the core codebase, whose use-cases may be unaware of indices and their settings. To ensure that a limit is always imposed, a default limit is defined in the class. (cherry picked from commit ba1966853e728b153e42be59ba449420e79b09ee) Signed-off-by: Kartik Ganesh Co-authored-by: Kartik Ganesh --- .../bucket/terms/IncludeExclude.java | 41 +++++++++++++++---- .../terms/RareTermsAggregatorFactory.java | 5 +-- .../SignificantTermsAggregatorFactory.java | 9 ++-- .../SignificantTextAggregatorFactory.java | 5 +-- .../bucket/terms/TermsAggregatorFactory.java | 9 ++-- .../support/IncludeExcludeTests.java | 25 +++-------- 6 files changed, 50 insertions(+), 44 deletions(-) diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/IncludeExclude.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/IncludeExclude.java index 71320909ca5d2..f32abd1892592 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/IncludeExclude.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/IncludeExclude.java @@ -79,6 +79,14 @@ public class IncludeExclude implements Writeable, ToXContentFragment { // can disagree on which terms hash to the required partition. private static final int HASH_PARTITIONING_SEED = 31; + /** + * The default length limit for a reg-ex string. The value is derived from {@link IndexSettings#MAX_REGEX_LENGTH_SETTING}. + * For context, see: + * https://github.com/opensearch-project/OpenSearch/issues/1992 + * https://github.com/opensearch-project/OpenSearch/issues/2858 + */ + private static final int DEFAULT_MAX_REGEX_LENGTH = 1000; + // for parsing purposes only // TODO: move all aggs to the same package so that this stuff could be pkg-private public static IncludeExclude merge(IncludeExclude include, IncludeExclude exclude) { @@ -576,10 +584,10 @@ public boolean isPartitionBased() { return incNumPartitions > 0; } - private Automaton toAutomaton(IndexSettings indexSettings) { + private Automaton toAutomaton(int maxRegExLength) { Automaton a; if (include != null) { - validateRegExpStringLength(include, indexSettings); + validateRegExpStringLength(include, maxRegExLength); a = new RegExp(include).toAutomaton(); } else if (includeValues != null) { a = Automata.makeStringUnion(includeValues); @@ -587,7 +595,7 @@ private Automaton toAutomaton(IndexSettings indexSettings) { a = Automata.makeAnyString(); } if (exclude != null) { - validateRegExpStringLength(exclude, indexSettings); + validateRegExpStringLength(exclude, maxRegExLength); Automaton excludeAutomaton = new RegExp(exclude).toAutomaton(); a = Operations.minus(a, excludeAutomaton, Operations.DEFAULT_DETERMINIZE_WORK_LIMIT); } else if (excludeValues != null) { @@ -596,8 +604,7 @@ private Automaton toAutomaton(IndexSettings indexSettings) { return a; } - private static void validateRegExpStringLength(String source, IndexSettings indexSettings) { - int maxRegexLength = indexSettings.getMaxRegexLength(); + private static void validateRegExpStringLength(String source, int maxRegexLength) { if (maxRegexLength > 0 && source.length() > maxRegexLength) { throw new IllegalArgumentException( "The length of regex [" @@ -613,9 +620,17 @@ private static void validateRegExpStringLength(String source, IndexSettings inde } } - public StringFilter convertToStringFilter(DocValueFormat format, IndexSettings indexSettings) { + /** + * Wrapper method that imposes a default regex limit. + * See https://github.com/opensearch-project/OpenSearch/issues/2858 + */ + public StringFilter convertToStringFilter(DocValueFormat format) { + return convertToStringFilter(format, DEFAULT_MAX_REGEX_LENGTH); + } + + public StringFilter convertToStringFilter(DocValueFormat format, int maxRegexLength) { if (isRegexBased()) { - return new AutomatonBackedStringFilter(toAutomaton(indexSettings)); + return new AutomatonBackedStringFilter(toAutomaton(maxRegexLength)); } if (isPartitionBased()) { return new PartitionedStringFilter(); @@ -636,10 +651,18 @@ private static SortedSet parseForDocValues(SortedSet endUser return result; } - public OrdinalsFilter convertToOrdinalsFilter(DocValueFormat format, IndexSettings indexSettings) { + /** + * Wrapper method that imposes a default regex limit. + * See https://github.com/opensearch-project/OpenSearch/issues/2858 + */ + public OrdinalsFilter convertToOrdinalsFilter(DocValueFormat format) { + return convertToOrdinalsFilter(format, DEFAULT_MAX_REGEX_LENGTH); + } + + public OrdinalsFilter convertToOrdinalsFilter(DocValueFormat format, int maxRegexLength) { if (isRegexBased()) { - return new AutomatonBackedOrdinalsFilter(toAutomaton(indexSettings)); + return new AutomatonBackedOrdinalsFilter(toAutomaton(maxRegexLength)); } if (isPartitionBased()) { return new PartitionedOrdinalsFilter(); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java index c0a5c77a98170..ed70a9b310ea1 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java @@ -34,7 +34,6 @@ import org.opensearch.common.ParseField; import org.opensearch.common.logging.DeprecationLogger; -import org.opensearch.index.IndexSettings; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; @@ -251,10 +250,10 @@ Aggregator create( double precision, CardinalityUpperBound cardinality ) throws IOException { - IndexSettings indexSettings = context.getQueryShardContext().getIndexSettings(); + int maxRegexLength = context.getQueryShardContext().getIndexSettings().getMaxRegexLength(); final IncludeExclude.StringFilter filter = includeExclude == null ? null - : includeExclude.convertToStringFilter(format, indexSettings); + : includeExclude.convertToStringFilter(format, maxRegexLength); return new StringRareTermsAggregator( name, factories, diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java index 4b93121ae06ef..8935c9715a1e0 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java @@ -34,7 +34,6 @@ import org.opensearch.common.ParseField; import org.opensearch.common.logging.DeprecationLogger; -import org.opensearch.index.IndexSettings; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.DocValueFormat; @@ -326,10 +325,10 @@ Aggregator create( CardinalityUpperBound cardinality, Map metadata ) throws IOException { - IndexSettings indexSettings = aggregationContext.getQueryShardContext().getIndexSettings(); + int maxRegexLength = aggregationContext.getQueryShardContext().getIndexSettings().getMaxRegexLength(); final IncludeExclude.StringFilter filter = includeExclude == null ? null - : includeExclude.convertToStringFilter(format, indexSettings); + : includeExclude.convertToStringFilter(format, maxRegexLength); return new MapStringTermsAggregator( name, factories, @@ -367,10 +366,10 @@ Aggregator create( CardinalityUpperBound cardinality, Map metadata ) throws IOException { - IndexSettings indexSettings = aggregationContext.getQueryShardContext().getIndexSettings(); + int maxRegexLength = aggregationContext.getQueryShardContext().getIndexSettings().getMaxRegexLength(); final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null - : includeExclude.convertToOrdinalsFilter(format, indexSettings); + : includeExclude.convertToOrdinalsFilter(format, maxRegexLength); boolean remapGlobalOrd = true; if (cardinality == CardinalityUpperBound.ONE && factories == AggregatorFactories.EMPTY && includeExclude == null) { /* diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java index 992035f1fbe97..9085df1ccd749 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java @@ -44,7 +44,6 @@ import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.BytesRefHash; import org.opensearch.common.util.ObjectArray; -import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryShardContext; @@ -138,10 +137,10 @@ protected Aggregator createInternal( // TODO - need to check with mapping that this is indeed a text field.... - IndexSettings indexSettings = searchContext.getQueryShardContext().getIndexSettings(); + int maxRegexLength = searchContext.getQueryShardContext().getIndexSettings().getMaxRegexLength(); IncludeExclude.StringFilter incExcFilter = includeExclude == null ? null - : includeExclude.convertToStringFilter(DocValueFormat.RAW, indexSettings); + : includeExclude.convertToStringFilter(DocValueFormat.RAW, maxRegexLength); MapStringTermsAggregator.CollectorSource collectorSource = new SignificantTextCollectorSource( queryShardContext.lookup().source(), diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 17b412f87107c..b320126dff24b 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -34,7 +34,6 @@ import org.apache.lucene.search.IndexSearcher; import org.opensearch.common.ParseField; -import org.opensearch.index.IndexSettings; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.AggregationExecutionException; @@ -381,10 +380,10 @@ Aggregator create( CardinalityUpperBound cardinality, Map metadata ) throws IOException { - IndexSettings indexSettings = context.getQueryShardContext().getIndexSettings(); + int maxRegexLength = context.getQueryShardContext().getIndexSettings().getMaxRegexLength(); final IncludeExclude.StringFilter filter = includeExclude == null ? null - : includeExclude.convertToStringFilter(format, indexSettings); + : includeExclude.convertToStringFilter(format, maxRegexLength); return new MapStringTermsAggregator( name, factories, @@ -462,10 +461,10 @@ Aggregator create( ); } - IndexSettings indexSettings = context.getQueryShardContext().getIndexSettings(); + int maxRegexLength = context.getQueryShardContext().getIndexSettings().getMaxRegexLength(); final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null - : includeExclude.convertToOrdinalsFilter(format, indexSettings); + : includeExclude.convertToOrdinalsFilter(format, maxRegexLength); boolean remapGlobalOrds; if (cardinality == CardinalityUpperBound.ONE && REMAP_GLOBAL_ORDS != null) { /* diff --git a/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java b/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java index d104fc6783dc5..d0995abd07f32 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java @@ -36,16 +36,12 @@ import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LongBitSet; -import org.opensearch.Version; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.ParseField; -import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.ToXContent; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; -import org.opensearch.index.IndexSettings; import org.opensearch.index.fielddata.AbstractSortedSetDocValues; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.bucket.terms.IncludeExclude; @@ -58,23 +54,14 @@ public class IncludeExcludeTests extends OpenSearchTestCase { - private final IndexSettings dummyIndexSettings = new IndexSettings( - IndexMetadata.builder("index") - .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)) - .numberOfShards(1) - .numberOfReplicas(0) - .build(), - Settings.EMPTY - ); - public void testEmptyTermsWithOrds() throws IOException { IncludeExclude inexcl = new IncludeExclude(new TreeSet<>(Collections.singleton(new BytesRef("foo"))), null); - OrdinalsFilter filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW, dummyIndexSettings); + OrdinalsFilter filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW); LongBitSet acceptedOrds = filter.acceptedGlobalOrdinals(DocValues.emptySortedSet()); assertEquals(0, acceptedOrds.length()); inexcl = new IncludeExclude(null, new TreeSet<>(Collections.singleton(new BytesRef("foo")))); - filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW, dummyIndexSettings); + filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW); acceptedOrds = filter.acceptedGlobalOrdinals(DocValues.emptySortedSet()); assertEquals(0, acceptedOrds.length()); } @@ -113,13 +100,13 @@ public long getValueCount() { }; IncludeExclude inexcl = new IncludeExclude(new TreeSet<>(Collections.singleton(new BytesRef("foo"))), null); - OrdinalsFilter filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW, dummyIndexSettings); + OrdinalsFilter filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW); LongBitSet acceptedOrds = filter.acceptedGlobalOrdinals(ords); assertEquals(1, acceptedOrds.length()); assertTrue(acceptedOrds.get(0)); inexcl = new IncludeExclude(new TreeSet<>(Collections.singleton(new BytesRef("bar"))), null); - filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW, dummyIndexSettings); + filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW); acceptedOrds = filter.acceptedGlobalOrdinals(ords); assertEquals(1, acceptedOrds.length()); assertFalse(acceptedOrds.get(0)); @@ -128,7 +115,7 @@ public long getValueCount() { new TreeSet<>(Collections.singleton(new BytesRef("foo"))), new TreeSet<>(Collections.singleton(new BytesRef("foo"))) ); - filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW, dummyIndexSettings); + filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW); acceptedOrds = filter.acceptedGlobalOrdinals(ords); assertEquals(1, acceptedOrds.length()); assertFalse(acceptedOrds.get(0)); @@ -137,7 +124,7 @@ public long getValueCount() { null, // means everything included new TreeSet<>(Collections.singleton(new BytesRef("foo"))) ); - filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW, dummyIndexSettings); + filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW); acceptedOrds = filter.acceptedGlobalOrdinals(ords); assertEquals(1, acceptedOrds.length()); assertFalse(acceptedOrds.get(0)); From 08e4a358399d5d931666fed8471ac00bcdec8a61 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Wed, 13 Apr 2022 11:13:37 -0700 Subject: [PATCH 077/653] Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Ingest APIs and Script APIs (#2682) - Deprecate the request parameter `master_timeout` that used in Ingest APIs and Script APIs which have got the parameter. - Add alternative new request parameter `cluster_manager_timeout`. - Add unit tests. Signed-off-by: Tianli Feng --- .../rest-api-spec/api/delete_script.json | 10 +++- .../rest-api-spec/api/get_script.json | 10 +++- .../api/ingest.delete_pipeline.json | 10 +++- .../api/ingest.get_pipeline.json | 10 +++- .../api/ingest.put_pipeline.json | 10 +++- .../rest-api-spec/api/put_script.json | 10 +++- .../cluster/RestDeleteStoredScriptAction.java | 8 ++- .../cluster/RestGetStoredScriptAction.java | 6 +- .../cluster/RestPutStoredScriptAction.java | 6 +- .../ingest/RestDeletePipelineAction.java | 6 +- .../action/ingest/RestGetPipelineAction.java | 6 +- .../action/ingest/RestPutPipelineAction.java | 6 +- .../RenamedTimeoutRequestParameterTests.java | 59 +++++++++++++++++++ 13 files changed, 145 insertions(+), 12 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_script.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_script.json index b38b97ae57c2e..acaa389738606 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_script.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_script.json @@ -28,7 +28,15 @@ }, "master_timeout":{ "type":"time", - "description":"Specify timeout for connection to master" + "description":"Specify timeout for connection to master", + "deprecated":{ + "version":"2.0.0", + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get_script.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get_script.json index 14307bea2ef0b..9cdac886b1b27 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get_script.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get_script.json @@ -24,7 +24,15 @@ "params":{ "master_timeout":{ "type":"time", - "description":"Specify timeout for connection to master" + "description":"Specify timeout for connection to master", + "deprecated":{ + "version":"2.0.0", + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_pipeline.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_pipeline.json index 29b4219038cd2..3e40136f556fa 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_pipeline.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_pipeline.json @@ -24,7 +24,15 @@ "params":{ "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "timeout":{ "type":"time", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.get_pipeline.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.get_pipeline.json index 65fc4f91b2b42..cde980e67c8c9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.get_pipeline.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.get_pipeline.json @@ -30,7 +30,15 @@ "params":{ "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_pipeline.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_pipeline.json index 4d2105866791c..5475905e7b99f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_pipeline.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_pipeline.json @@ -24,7 +24,15 @@ "params":{ "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Explicit operation timeout for connection to master node", + "deprecated":{ + "version":"2.0.0", + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" }, "timeout":{ "type":"time", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/put_script.json b/rest-api-spec/src/main/resources/rest-api-spec/api/put_script.json index 750f7fdf4eb62..c8413d1476402 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/put_script.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/put_script.json @@ -46,7 +46,15 @@ }, "master_timeout":{ "type":"time", - "description":"Specify timeout for connection to master" + "description":"Specify timeout for connection to master", + "deprecated":{ + "version":"2.0.0", + "description":"To support inclusive language, use 'cluster_manager_timeout' instead." + } + }, + "cluster_manager_timeout":{ + "type":"time", + "description":"Specify timeout for connection to cluster-manager node" }, "context":{ "type":"string", diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDeleteStoredScriptAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDeleteStoredScriptAction.java index 8703899d5ed14..b303f769d216b 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDeleteStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDeleteStoredScriptAction.java @@ -33,6 +33,7 @@ import org.opensearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; @@ -45,6 +46,8 @@ public class RestDeleteStoredScriptAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestDeleteStoredScriptAction.class); + @Override public List routes() { return singletonList(new Route(DELETE, "/_scripts/{id}")); @@ -60,7 +63,10 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client String id = request.param("id"); DeleteStoredScriptRequest deleteStoredScriptRequest = new DeleteStoredScriptRequest(id); deleteStoredScriptRequest.timeout(request.paramAsTime("timeout", deleteStoredScriptRequest.timeout())); - deleteStoredScriptRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteStoredScriptRequest.masterNodeTimeout())); + deleteStoredScriptRequest.masterNodeTimeout( + request.paramAsTime("cluster_manager_timeout", deleteStoredScriptRequest.masterNodeTimeout()) + ); + parseDeprecatedMasterTimeoutParameter(deleteStoredScriptRequest, request, deprecationLogger, getName()); return channel -> client.admin().cluster().deleteStoredScript(deleteStoredScriptRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetStoredScriptAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetStoredScriptAction.java index b75fb7693f865..5a904b99be469 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetStoredScriptAction.java @@ -33,6 +33,7 @@ import org.opensearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestStatusToXContentListener; @@ -45,6 +46,8 @@ public class RestGetStoredScriptAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestGetStoredScriptAction.class); + @Override public List routes() { return singletonList(new Route(GET, "/_scripts/{id}")); @@ -59,7 +62,8 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, NodeClient client) throws IOException { String id = request.param("id"); GetStoredScriptRequest getRequest = new GetStoredScriptRequest(id); - getRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getRequest.masterNodeTimeout())); + getRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", getRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(getRequest, request, deprecationLogger, getName()); return channel -> client.admin().cluster().getStoredScript(getRequest, new RestStatusToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestPutStoredScriptAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestPutStoredScriptAction.java index f4fe21b8adbe0..1568a80278bb9 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestPutStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestPutStoredScriptAction.java @@ -34,6 +34,7 @@ import org.opensearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; import org.opensearch.client.node.NodeClient; import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.xcontent.XContentType; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; @@ -50,6 +51,8 @@ public class RestPutStoredScriptAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestPutStoredScriptAction.class); + @Override public List routes() { return unmodifiableList( @@ -76,7 +79,8 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client StoredScriptSource source = StoredScriptSource.parse(content, xContentType); PutStoredScriptRequest putRequest = new PutStoredScriptRequest(id, context, content, request.getXContentType(), source); - putRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRequest.masterNodeTimeout())); + putRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", putRequest.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(putRequest, request, deprecationLogger, getName()); putRequest.timeout(request.paramAsTime("timeout", putRequest.timeout())); return channel -> client.admin().cluster().putStoredScript(putRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/opensearch/rest/action/ingest/RestDeletePipelineAction.java b/server/src/main/java/org/opensearch/rest/action/ingest/RestDeletePipelineAction.java index 179736b4b1816..69f9316bc3d9c 100644 --- a/server/src/main/java/org/opensearch/rest/action/ingest/RestDeletePipelineAction.java +++ b/server/src/main/java/org/opensearch/rest/action/ingest/RestDeletePipelineAction.java @@ -34,6 +34,7 @@ import org.opensearch.action.ingest.DeletePipelineRequest; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; @@ -45,6 +46,8 @@ import static org.opensearch.rest.RestRequest.Method.DELETE; public class RestDeletePipelineAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestDeletePipelineAction.class); + @Override public List routes() { return singletonList(new Route(DELETE, "/_ingest/pipeline/{id}")); @@ -58,7 +61,8 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { DeletePipelineRequest request = new DeletePipelineRequest(restRequest.param("id")); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(restRequest.paramAsTime("cluster_manager_timeout", request.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(request, restRequest, deprecationLogger, getName()); request.timeout(restRequest.paramAsTime("timeout", request.timeout())); return channel -> client.admin().cluster().deletePipeline(request, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/opensearch/rest/action/ingest/RestGetPipelineAction.java b/server/src/main/java/org/opensearch/rest/action/ingest/RestGetPipelineAction.java index cf86541ca8cd9..5555bf53a5ee9 100644 --- a/server/src/main/java/org/opensearch/rest/action/ingest/RestGetPipelineAction.java +++ b/server/src/main/java/org/opensearch/rest/action/ingest/RestGetPipelineAction.java @@ -35,6 +35,7 @@ import org.opensearch.action.ingest.GetPipelineRequest; import org.opensearch.client.node.NodeClient; import org.opensearch.common.Strings; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestStatusToXContentListener; @@ -48,6 +49,8 @@ public class RestGetPipelineAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestGetPipelineAction.class); + @Override public List routes() { return unmodifiableList(asList(new Route(GET, "/_ingest/pipeline"), new Route(GET, "/_ingest/pipeline/{id}"))); @@ -61,7 +64,8 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { GetPipelineRequest request = new GetPipelineRequest(Strings.splitStringByCommaToArray(restRequest.param("id"))); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(restRequest.paramAsTime("cluster_manager_timeout", request.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(request, restRequest, deprecationLogger, getName()); return channel -> client.admin().cluster().getPipeline(request, new RestStatusToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/opensearch/rest/action/ingest/RestPutPipelineAction.java b/server/src/main/java/org/opensearch/rest/action/ingest/RestPutPipelineAction.java index 09f40c962dda7..8a9abc860fbc9 100644 --- a/server/src/main/java/org/opensearch/rest/action/ingest/RestPutPipelineAction.java +++ b/server/src/main/java/org/opensearch/rest/action/ingest/RestPutPipelineAction.java @@ -36,6 +36,7 @@ import org.opensearch.client.node.NodeClient; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.xcontent.XContentType; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; @@ -49,6 +50,8 @@ public class RestPutPipelineAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestPutPipelineAction.class); + @Override public List routes() { return singletonList(new Route(PUT, "/_ingest/pipeline/{id}")); @@ -63,7 +66,8 @@ public String getName() { public RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { Tuple sourceTuple = restRequest.contentOrSourceParam(); PutPipelineRequest request = new PutPipelineRequest(restRequest.param("id"), sourceTuple.v2(), sourceTuple.v1()); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(restRequest.paramAsTime("cluster_manager_timeout", request.masterNodeTimeout())); + parseDeprecatedMasterTimeoutParameter(request, restRequest, deprecationLogger, getName()); request.timeout(restRequest.paramAsTime("timeout", request.timeout())); return channel -> client.admin().cluster().putPipeline(request, new RestToXContentListener<>(channel)); } diff --git a/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java b/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java index 86529d96573f8..648766681a377 100644 --- a/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java +++ b/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java @@ -63,6 +63,9 @@ import org.opensearch.rest.action.admin.cluster.RestRestoreSnapshotAction; import org.opensearch.rest.action.admin.cluster.RestSnapshotsStatusAction; import org.opensearch.rest.action.admin.cluster.RestVerifyRepositoryAction; +import org.opensearch.rest.action.admin.cluster.RestDeleteStoredScriptAction; +import org.opensearch.rest.action.admin.cluster.RestGetStoredScriptAction; +import org.opensearch.rest.action.admin.cluster.RestPutStoredScriptAction; import org.opensearch.rest.action.cat.RestAllocationAction; import org.opensearch.rest.action.cat.RestRepositoriesAction; import org.opensearch.rest.action.cat.RestThreadPoolAction; @@ -76,6 +79,9 @@ import org.opensearch.rest.action.cat.RestPendingClusterTasksAction; import org.opensearch.rest.action.cat.RestSegmentsAction; import org.opensearch.rest.action.cat.RestSnapshotAction; +import org.opensearch.rest.action.ingest.RestDeletePipelineAction; +import org.opensearch.rest.action.ingest.RestGetPipelineAction; +import org.opensearch.rest.action.ingest.RestPutPipelineAction; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.threadpool.TestThreadPool; @@ -612,6 +618,59 @@ public void testVerifyRepository() { assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); } + public void testDeletePipeline() { + FakeRestRequest request = new FakeRestRequest(); + request.params().put("cluster_manager_timeout", "1h"); + request.params().put("master_timeout", "3s"); + request.params().put("id", "test"); + RestDeletePipelineAction action = new RestDeletePipelineAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(request, client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testGetPipeline() { + RestGetPipelineAction action = new RestGetPipelineAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testPutPipeline() { + FakeRestRequest request = getFakeRestRequestWithBody(); + request.params().put("cluster_manager_timeout", "2m"); + request.params().put("master_timeout", "3s"); + request.params().put("id", "test"); + RestPutPipelineAction action = new RestPutPipelineAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(request, client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testDeleteStoredScript() { + RestDeleteStoredScriptAction action = new RestDeleteStoredScriptAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testGetStoredScript() { + RestGetStoredScriptAction action = new RestGetStoredScriptAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.prepareRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + + public void testPutStoredScript() { + RestPutStoredScriptAction action = new RestPutStoredScriptAction(); + Exception e = assertThrows( + OpenSearchParseException.class, + () -> action.prepareRequest(getRestRequestWithBodyWithBothParams(), client) + ); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE, "empty templates should no longer be used"); + } + private MasterNodeRequest getMasterNodeRequest() { return new MasterNodeRequest() { @Override From 3c5d997a765e24ffa32d35219fd5026cfb143a9d Mon Sep 17 00:00:00 2001 From: Owais Kazi Date: Wed, 13 Apr 2022 11:14:18 -0700 Subject: [PATCH 078/653] Added a new line linter (#2875) * Added linter to add new line Signed-off-by: Owais Kazi * Fixed new lines Signed-off-by: Owais Kazi * Ignore empty files Signed-off-by: Owais Kazi * Updated DEVELOPER GUIDE Signed-off-by: Owais Kazi * Renamed workflow file Signed-off-by: Owais Kazi * Fixed failing tests Signed-off-by: Owais Kazi --- .ci/dockerOnLinuxExclusions | 2 +- .github/workflows/code-hygiene.yml | 14 +++++ .github/workflows/delete_backport_branch.yml | 2 +- .github/workflows/links.yml | 2 +- .github/workflows/wrapper.yml | 2 +- .linelint.yml | 49 ++++++++++++++++++ DEVELOPER_GUIDE.md | 16 ++++++ README.md | 2 +- RELEASING.md | 2 +- SECURITY.md | 2 +- .../org.eclipse.core.resources.prefs | 2 +- .../src/main/resources/minimumGradleVersion | 2 +- .../testKit/testingConventions/build.gradle | 3 -- .../testingConventions/settings.gradle | 2 +- .../testKit/thirdPartyAudit/settings.gradle | 2 +- .../prepare_release_update_documentation.py | 1 - dev-tools/signoff-check.sh | 1 - .../resources/rest-api-spec/test/10_info.yml | 1 - .../resources/rest-api-spec/test/11_nodes.yml | 1 - distribution/src/bin/opensearch-env-from-file | 1 - distribution/src/bin/opensearch-env.bat | 1 - doc-tools/build.gradle | 1 - .../missingdoclet/MissingDoclet.class | Bin 0 -> 14156 bytes libs/dissect/build.gradle | 1 - .../test/resources/specification/tests.json | 2 +- libs/geo/build.gradle | 1 - libs/grok/src/main/resources/patterns/exim | 1 - libs/grok/src/main/resources/patterns/junos | 1 - .../src/main/resources/patterns/postgresql | 1 - libs/ssl-config/build.gradle | 1 - .../analysis/common/cjk_analysis.json | 2 +- .../analysis/common/pattern_capture.json | 2 +- .../analysis-common/60_analysis_scripting.yml | 1 - .../test/cluster.stats/10_analysis_stats.yml | 1 - .../test/indices.analyze/10_synonyms.yml | 1 - .../test/search.query/60_synonym_graph.yml | 1 - .../test/search.query/70_intervals.yml | 1 - .../test/ingest/220_drop_processor.yml | 1 - modules/ingest-user-agent/build.gradle | 1 - .../src/test/test-regexes.yml | 2 +- modules/lang-expression/build.gradle | 1 - .../src/main/antlr/PainlessLexer.g4 | 2 +- .../test/painless/100_terms_agg.yml | 1 - modules/opensearch-dashboards/build.gradle | 1 - modules/repository-url/build.gradle | 1 - modules/systemd/build.gradle | 1 - .../index/analysis/KeywordTokenizer.rbbi | 2 +- plugins/analysis-kuromoji/build.gradle | 1 - .../test/analysis_phonetic/10_metaphone.yml | 1 - .../analysis_phonetic/20_double_metaphone.yml | 1 - .../analysis_phonetic/30_beider_morse.yml | 1 - .../analysis_phonetic/50_daitch_mokotoff.yml | 1 - plugins/analysis-smartcn/build.gradle | 1 - .../attributes/google-compute-default-zone | 2 +- .../gce/computeMetadata/v1/project/project-id | 2 +- plugins/examples/build.gradle | 1 - plugins/examples/custom-settings/build.gradle | 1 - .../src/main/config/custom.yml | 2 +- plugins/examples/rest-handler/build.gradle | 1 - .../script-expert-scoring/build.gradle | 1 - .../test/sample-files/asciidoc.asciidoc | 1 - .../test/ingest_attachment/10_basic.yml | 1 - .../test/mapper_annotatedtext/10_basic.yml | 1 - .../test/hdfs_repository/30_snapshot.yml | 1 - .../secure_hdfs_repository/30_snapshot.yml | 1 - plugins/transport-nio/build.gradle | 1 - qa/die-with-dignity/build.gradle | 1 - .../org/opensearch/common/cli/tool-cmd1.help | 2 +- .../org/opensearch/common/cli/tool.help | 2 +- .../test/multi_cluster/70_skip_shards.yml | 1 - .../test/old_cluster/10_basic.yml | 1 - .../test/old_cluster/20_date_range.yml | 2 - .../test/upgraded_cluster/10_basic.yml | 2 - .../test/upgraded_cluster/20_date_range.yml | 1 - .../ingest_mustache/10_ingest_disabled.yml | 1 - .../50_script_processor_using_painless.yml | 1 - .../60_pipeline_timestamp_date_mapping.yml | 1 - .../test/resources/scripts/master.painless | 2 +- ...SmokeTestPluginsClientYamlTestSuiteIT.java | 1 - .../opensearch.release-notes-1.0.0-rc1.md | 2 - .../opensearch.release-notes-1.1.0.md | 2 - .../opensearch.release-notes-1.2.0.md | 1 - .../opensearch.release-notes-1.2.4.md | 2 - .../opensearch.release-notes-1.3.0.md | 2 - .../rest-api-spec/test/bulk/10_basic.yml | 1 - .../test/bulk/20_list_of_strings.yml | 1 - .../rest-api-spec/test/bulk/30_big_string.yml | 1 - .../rest-api-spec/test/bulk/40_source.yml | 1 - .../rest-api-spec/test/cat.nodes/10_basic.yml | 1 - .../test/cluster.pending_tasks/10_basic.yml | 1 - .../test/cluster.remote_info/10_info.yml | 1 - .../10_basic.yml | 1 - .../rest-api-spec/test/create/40_routing.yml | 1 - .../rest-api-spec/test/delete/30_routing.yml | 1 - .../test/get/15_default_values.yml | 1 - .../test/get/20_stored_fields.yml | 2 - .../rest-api-spec/test/get/40_routing.yml | 1 - .../rest-api-spec/test/get/90_versions.yml | 1 - .../rest-api-spec/test/index/40_routing.yml | 1 - .../indices.delete_alias/all_path_options.yml | 1 - .../test/indices.exists_template/10_basic.yml | 1 - .../test/indices.forcemerge/10_basic.yml | 2 - .../40_missing_index.yml | 2 - .../50_field_wildcards.yml | 1 - .../20_get_missing.yml | 1 - .../indices.get_mapping/30_missing_index.yml | 1 - .../test/indices.get_settings/20_aliases.yml | 1 - .../indices.get_template/20_get_missing.yml | 1 - .../test/indices.open/20_multiple_indices.yml | 1 - .../indices.put_alias/all_path_options.yml | 1 - .../test/indices.put_mapping/10_basic.yml | 1 - .../indices.put_mapping/all_path_options.yml | 1 - .../indices.put_settings/all_path_options.yml | 1 - .../test/indices.refresh/10_basic.yml | 1 - .../test/indices.rollover/10_basic.yml | 1 - .../indices.rollover/20_max_doc_condition.yml | 1 - .../indices.update_aliases/20_routing.yml | 1 - .../test/indices.upgrade/10_basic.yml | 1 - .../test/info/20_lucene_version.yml | 3 -- .../test/nodes.stats/11_indices_metrics.yml | 1 - .../rest-api-spec/test/ping/10_ping.yml | 1 - .../search.aggregation/240_max_buckets.yml | 1 - .../test/search.aggregation/250_moving_fn.yml | 1 - .../350_variable_width_histogram.yml | 1 - .../test/search/10_source_filtering.yml | 1 - .../search/140_pre_filter_search_shards.yml | 1 - .../test/search/230_interval_query.yml | 2 - .../test/search/240_date_nanos.yml | 1 - .../test/search/90_search_after.yml | 1 - .../test/update/20_doc_upsert.yml | 2 - .../test/update/22_doc_as_upsert.yml | 2 - .../test/update/35_if_seq_no.yml | 1 - .../rest-api-spec/test/update/40_routing.yml | 1 - .../test/update/85_fields_meta.yml | 2 - .../plugins/concurrent-search/build.gradle | 2 +- .../search/aggregations/bucket/package-info | 1 - .../src/test/resources/config/opensearch.yml | 1 - .../hunspell/en_US_custom/settings.yml | 2 +- .../action/admin/invalid.txt.keystore | 1 - .../fieldstats-index-constraints-request.json | 2 +- .../index/analysis/shingle_analysis.json | 2 +- .../index/analysis/shingle_analysis2.json | 2 +- .../org/opensearch/index/analysis/stop.json | 2 +- .../genericstore/test-data.json | 2 +- .../dynamictemplate/pathmatch/test-data.json | 2 +- .../dynamictemplate/simple/test-data.json | 2 +- .../mapper/multifield/merge/test-data.json | 2 +- .../index/mapper/multifield/test-data.json | 2 +- settings.gradle | 1 - .../src/main/resources/provision/addprinc.sh | 2 +- .../resources/provision/krb5.conf.template | 1 - test/fixtures/minio-fixture/build.gradle | 1 - .../org.mockito.plugins.MockMaker | 2 +- .../test/suite1/20_another_test.yml | 1 - 154 files changed, 115 insertions(+), 167 deletions(-) create mode 100644 .github/workflows/code-hygiene.yml create mode 100644 .linelint.yml create mode 100644 doc-tools/missing-doclet/bin/main/org/opensearch/missingdoclet/MissingDoclet.class diff --git a/.ci/dockerOnLinuxExclusions b/.ci/dockerOnLinuxExclusions index 8061248a87df4..dd518c7043e2f 100644 --- a/.ci/dockerOnLinuxExclusions +++ b/.ci/dockerOnLinuxExclusions @@ -12,4 +12,4 @@ ol-7.7 sles-12.3 # older version used in Vagrant image sles-12.5 sles-15.1 -sles-15.2 \ No newline at end of file +sles-15.2 diff --git a/.github/workflows/code-hygiene.yml b/.github/workflows/code-hygiene.yml new file mode 100644 index 0000000000000..1952630e5bdfa --- /dev/null +++ b/.github/workflows/code-hygiene.yml @@ -0,0 +1,14 @@ +name: Code Hygiene + +on: [push, pull_request] + +jobs: + linelint: + runs-on: ubuntu-latest + name: Check if all files end in newline + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Linelint + uses: fernandrone/linelint@0.0.4 diff --git a/.github/workflows/delete_backport_branch.yml b/.github/workflows/delete_backport_branch.yml index d654df6b40257..387a124b8cb6a 100644 --- a/.github/workflows/delete_backport_branch.yml +++ b/.github/workflows/delete_backport_branch.yml @@ -12,4 +12,4 @@ jobs: - name: Delete merged branch uses: SvanBoxel/delete-merged-branch@main env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index 21fb7ab9086ee..ca05aee8be378 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -16,4 +16,4 @@ jobs: args: --accept=200,403,429 --exclude-mail **/*.html **/*.md **/*.txt **/*.json --exclude-file .lychee.excludes fail: true env: - GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} \ No newline at end of file + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} diff --git a/.github/workflows/wrapper.yml b/.github/workflows/wrapper.yml index d577699b66dc0..be5e7afb56ba0 100644 --- a/.github/workflows/wrapper.yml +++ b/.github/workflows/wrapper.yml @@ -7,4 +7,4 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - - uses: gradle/wrapper-validation-action@v1 \ No newline at end of file + - uses: gradle/wrapper-validation-action@v1 diff --git a/.linelint.yml b/.linelint.yml new file mode 100644 index 0000000000000..7b7bc162eef28 --- /dev/null +++ b/.linelint.yml @@ -0,0 +1,49 @@ +# 'true' will fix files +autofix: true + +ignore: + - .git/ + - .gradle/ + - .idea/ + - '*.sha1' + - '*.txt' + - '.github/CODEOWNERS' + - 'buildSrc/src/testKit/opensearch.build/LICENSE' + - 'buildSrc/src/testKit/opensearch.build/NOTICE' + - 'server/licenses/apache-log4j-extras-DEPENDENCIES' + # Empty files + - 'doc-tools/missing-doclet/bin/main/org/opensearch/missingdoclet/MissingDoclet.class' + - 'buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/build.gradle' + - 'buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/archives/oss-darwin-tar/build.gradle' + - 'buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/bwc/bugfix/build.gradle' + - 'buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/bwc/minor/build.gradle' + - 'buildSrc/src/main/resources/buildSrc.marker' + - 'buildSrc/src/testKit/opensearch-build-resources/settings.gradle' + - 'buildSrc/src/testKit/opensearch.build/settings.gradle' + - 'buildSrc/src/testKit/reaper/settings.gradle' + - 'buildSrc/src/testKit/symbolic-link-preserving-tar/settings.gradle' + - 'buildSrc/src/testKit/testingConventions/empty_test_task/.gitignore' + - 'client/rest-high-level/src/main/resources/META-INF/services/org.opensearch.plugins.spi.NamedXContentProvider' + - 'distribution/bwc/bugfix/build.gradle' + - 'distribution/bwc/maintenance/build.gradle' + - 'distribution/bwc/minor/build.gradle' + - 'distribution/bwc/staged/build.gradle' + - 'libs/ssl-config/src/test/resources/certs/pem-utils/empty.pem' + - 'qa/evil-tests/src/test/resources/org/opensearch/common/logging/does_not_exist/nothing_to_see_here' + - 'qa/os/centos-6/build.gradle' + - 'qa/os/debian-8/build.gradle' + - 'qa/os/oel-6/build.gradle' + - 'qa/os/oel-7/build.gradle' + - 'qa/os/sles-12/build.gradle' + # Test requires no new line for these files + - 'server/src/test/resources/org/opensearch/action/bulk/simple-bulk11.json' + - 'server/src/test/resources/org/opensearch/action/search/simple-msearch5.json' + +rules: + # checks if file ends in a newline character + end-of-file: + # set to true to enable this rule + enable: true + + # if true also checks if file ends in a single newline character + single-new-line: true diff --git a/DEVELOPER_GUIDE.md b/DEVELOPER_GUIDE.md index 9b1bc933eb1e3..11fcb324c8cae 100644 --- a/DEVELOPER_GUIDE.md +++ b/DEVELOPER_GUIDE.md @@ -48,6 +48,7 @@ - [Distributed Framework](#distributed-framework) - [Submitting Changes](#submitting-changes) - [Backports](#backports) + - [LineLint](#linelint) # Developer Guide @@ -472,3 +473,18 @@ See [CONTRIBUTING](CONTRIBUTING.md). ## Backports The Github workflow in [`backport.yml`](.github/workflows/backport.yml) creates backport PRs automatically when the original PR with an appropriate label `backport ` is merged to main with the backport workflow run successfully on the PR. For example, if a PR on main needs to be backported to `1.x` branch, add a label `backport 1.x` to the PR and make sure the backport workflow runs on the PR along with other checks. Once this PR is merged to main, the workflow will create a backport PR to the `1.x` branch. + +## LineLint +A linter in [`code-hygiene.yml`](.github/workflows/code-hygiene.yml) that validates simple newline and whitespace rules in all sorts of files. It can: +- Recursively check a directory tree for files that do not end in a newline +- Automatically fix these files by adding a newline or trimming extra newlines. + +Rules are defined in `.linelint.yml`. + +Executing the binary will automatically search the local directory tree for linting errors. + + linelint . + +Pass a list of files or directories to limit your search. + + linelint README.md LICENSE diff --git a/README.md b/README.md index 6a9a2a69d7367..bb1def63340fe 100644 --- a/README.md +++ b/README.md @@ -45,4 +45,4 @@ Copyright OpenSearch Contributors. See [NOTICE](NOTICE.txt) for details. OpenSearch is a registered trademark of Amazon Web Services. -OpenSearch includes certain Apache-licensed Elasticsearch code from Elasticsearch B.V. and other source code. Elasticsearch B.V. is not the source of that other source code. ELASTICSEARCH is a registered trademark of Elasticsearch B.V. \ No newline at end of file +OpenSearch includes certain Apache-licensed Elasticsearch code from Elasticsearch B.V. and other source code. Elasticsearch B.V. is not the source of that other source code. ELASTICSEARCH is a registered trademark of Elasticsearch B.V. diff --git a/RELEASING.md b/RELEASING.md index 50bb965b8d551..1ef59446f6e31 100644 --- a/RELEASING.md +++ b/RELEASING.md @@ -1,3 +1,3 @@ ## Releasing -This project follows [OpenSearch project branching, labelling, and releasing](https://github.com/opensearch-project/.github/blob/main/RELEASING.md). \ No newline at end of file +This project follows [OpenSearch project branching, labelling, and releasing](https://github.com/opensearch-project/.github/blob/main/RELEASING.md). diff --git a/SECURITY.md b/SECURITY.md index 0b85ca04ed260..b86292104335f 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1,3 +1,3 @@ ## Reporting a Vulnerability -If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/) or directly via email to aws-security@amazon.com. Please do **not** create a public GitHub issue. \ No newline at end of file +If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/) or directly via email to aws-security@amazon.com. Please do **not** create a public GitHub issue. diff --git a/buildSrc/src/main/resources/eclipse.settings/org.eclipse.core.resources.prefs b/buildSrc/src/main/resources/eclipse.settings/org.eclipse.core.resources.prefs index 6fd0a9aab1327..29abf99956411 100644 --- a/buildSrc/src/main/resources/eclipse.settings/org.eclipse.core.resources.prefs +++ b/buildSrc/src/main/resources/eclipse.settings/org.eclipse.core.resources.prefs @@ -3,4 +3,4 @@ encoding//src/main/java=UTF-8 encoding//src/main/resources=UTF-8 encoding//src/test/java=UTF-8 encoding//src/test/resources=UTF-8 -encoding/=UTF-8 \ No newline at end of file +encoding/=UTF-8 diff --git a/buildSrc/src/main/resources/minimumGradleVersion b/buildSrc/src/main/resources/minimumGradleVersion index 6b0e58e78f5ee..815da58b7a9ed 100644 --- a/buildSrc/src/main/resources/minimumGradleVersion +++ b/buildSrc/src/main/resources/minimumGradleVersion @@ -1 +1 @@ -7.4.1 \ No newline at end of file +7.4.1 diff --git a/buildSrc/src/testKit/testingConventions/build.gradle b/buildSrc/src/testKit/testingConventions/build.gradle index 418e833e8cb14..676960bcc8b70 100644 --- a/buildSrc/src/testKit/testingConventions/build.gradle +++ b/buildSrc/src/testKit/testingConventions/build.gradle @@ -88,6 +88,3 @@ project(':valid_setup_with_base') { } } } - - - diff --git a/buildSrc/src/testKit/testingConventions/settings.gradle b/buildSrc/src/testKit/testingConventions/settings.gradle index c4206edd63ff7..bb64f39e020c5 100644 --- a/buildSrc/src/testKit/testingConventions/settings.gradle +++ b/buildSrc/src/testKit/testingConventions/settings.gradle @@ -16,4 +16,4 @@ include 'all_classes_in_tasks' include 'not_implementing_base' include 'valid_setup_no_base' include 'valid_setup_with_base' -include 'tests_in_main' \ No newline at end of file +include 'tests_in_main' diff --git a/buildSrc/src/testKit/thirdPartyAudit/settings.gradle b/buildSrc/src/testKit/thirdPartyAudit/settings.gradle index 582faadddaef1..603d8b7da6e5d 100644 --- a/buildSrc/src/testKit/thirdPartyAudit/settings.gradle +++ b/buildSrc/src/testKit/thirdPartyAudit/settings.gradle @@ -9,4 +9,4 @@ * GitHub history for details. */ -include 'sample_jars' \ No newline at end of file +include 'sample_jars' diff --git a/dev-tools/prepare_release_update_documentation.py b/dev-tools/prepare_release_update_documentation.py index c7eae4eeb3245..d4edbb110f278 100644 --- a/dev-tools/prepare_release_update_documentation.py +++ b/dev-tools/prepare_release_update_documentation.py @@ -136,4 +136,3 @@ def callback(line): print('WARNING: no documentation references updates for release %s' % (release_version)) print('*** Done.') - diff --git a/dev-tools/signoff-check.sh b/dev-tools/signoff-check.sh index 56cb49455165e..5fe00c430ca79 100755 --- a/dev-tools/signoff-check.sh +++ b/dev-tools/signoff-check.sh @@ -28,4 +28,3 @@ done # Return non-zero error code if any commits were missing signoff exit $missingSignoff - diff --git a/distribution/docker/src/test/resources/rest-api-spec/test/10_info.yml b/distribution/docker/src/test/resources/rest-api-spec/test/10_info.yml index 2b0f6683a24cf..97b3b7b5d0f4d 100644 --- a/distribution/docker/src/test/resources/rest-api-spec/test/10_info.yml +++ b/distribution/docker/src/test/resources/rest-api-spec/test/10_info.yml @@ -7,4 +7,3 @@ - is_true: version - is_true: version.number - match: { version.build_type: "docker" } - diff --git a/distribution/docker/src/test/resources/rest-api-spec/test/11_nodes.yml b/distribution/docker/src/test/resources/rest-api-spec/test/11_nodes.yml index 95ea022696942..a6b78645087f4 100644 --- a/distribution/docker/src/test/resources/rest-api-spec/test/11_nodes.yml +++ b/distribution/docker/src/test/resources/rest-api-spec/test/11_nodes.yml @@ -123,4 +123,3 @@ - match: $body: | /^(\S{5,}\n)+$/ - diff --git a/distribution/src/bin/opensearch-env-from-file b/distribution/src/bin/opensearch-env-from-file index 73cd11123bfff..be5b428c268c8 100644 --- a/distribution/src/bin/opensearch-env-from-file +++ b/distribution/src/bin/opensearch-env-from-file @@ -47,4 +47,3 @@ for VAR_NAME_FILE in OPENSEARCH_PASSWORD_FILE KEYSTORE_PASSWORD_FILE ; do unset "$VAR_NAME_FILE" fi done - diff --git a/distribution/src/bin/opensearch-env.bat b/distribution/src/bin/opensearch-env.bat index bc8a6ce53a5f5..96770f72f35c8 100644 --- a/distribution/src/bin/opensearch-env.bat +++ b/distribution/src/bin/opensearch-env.bat @@ -74,4 +74,3 @@ if defined JAVA_OPTS ( rem check the Java version %JAVA% -cp "%OPENSEARCH_CLASSPATH%" "org.opensearch.tools.java_version_checker.JavaVersionChecker" || exit /b 1 - diff --git a/doc-tools/build.gradle b/doc-tools/build.gradle index 98b2149cb59a9..c47097c3d6035 100644 --- a/doc-tools/build.gradle +++ b/doc-tools/build.gradle @@ -8,4 +8,3 @@ version '1.0.0-SNAPSHOT' repositories { mavenCentral() } - diff --git a/doc-tools/missing-doclet/bin/main/org/opensearch/missingdoclet/MissingDoclet.class b/doc-tools/missing-doclet/bin/main/org/opensearch/missingdoclet/MissingDoclet.class new file mode 100644 index 0000000000000000000000000000000000000000..8729def18e4b3eda7184b1e3e49f1bf055d152db GIT binary patch literal 14156 zcmbta34B!5)j#KEd6UTlhOlT*aeyetJXqnwXLEAqD9eG+uHWi?weigVi&vE(nYJ4?|<%lZKq+q?oGXiS2dq0V|fYLW!QhZ13X7?aV_*tEX!q827@Wt!3yk0mi8wJ{VOvnnHFkALg_(-mTe{ZHS zRvL5x_&zff!%Vu6Rsr9kaJVrFW-hPH!cGQ}4ON#Iv>NA}{4pz*c&U+^gqGhVYBfVEL#m#T_Ss2VNw~*D5WcDt3e^Aipk71sfR$kQZT%$si2v_ z=o9KP*>0OjGijD=*K1OQu7XHn5l2-xDsH|bF%u{`St@GMY&uDnikTD_g&+dh8taJ; zhOMxx4~6A+n3N#MvnK+59!kZb(`V;MWvkQPP)dUYQw3dKmcg&vYtk;-4VuQRU7+bq zcZF=$HRzo{giOr=-cgBl4p3e|e~n51MgI+Z)6;A9^g;94EHcrw1`(h^y3VBQ>D^%3 z-cT~2L4i8G1a)T@rumvzCtL~|1BSGxuRS#2=)6e*&{|3#pqmW35r%8bDYfNt@ov%w z>1J3jki;<^!EH?KlR4%1&^s$Rdw^OWrjHnO3)8|ez&OaL%bjA{d z7O}#CP(K7}UOuLC7m*>@?RL7uppU^J7&`l|E(CX>=bLv$E<^5NHqWH|YQkfg^BC z+x%8ebUFu}GP1!-pP>f~`m88||AUKVyh#tz=R|)d!jV`AmIkcYYS0(p3ynLyJsuv6 zTF$1W^m%&Nq=UlHC6VNYSPU2p%c&kU>5vGBsRg7d-Y-s&bQ~5R;eB$>5tAOH$8EW9 zf!xQF8My}zT>;g6*%nErK*FQ6!=NYN)o4VfdrENZV3?7e zBgwAau~e^>ioly3>~Gg@*CZ7K?Dz{zCynNg)q{~}*h+Zl+f1jn`orDR0E-;EmaPIEM@%=_ zvmGcJJoFQw#J}50Etb`ODy#i$)FSSFWBA`)L}uP0czemDU(&Czpp{67Mn88Fs%6Zo zP^nDbvIdV$vT)bc?MWP$WqY z{TX7QyMx<=H(xdBuk^QU-h@N#>;mDMggj~v`@2d15WXoCz7bH<0Eyvy&7{|bcf66L zYxlhLhG?lbb99C6Sb6C!z(^;+3lrIClbI12O~>(J+#SJ_XoP9~Byc>&2?)n~QkX3< zP1a%!rCi9x1{ZdMwB^mzeC;CNYw1k(dDd!KTSm z1UEBbCR<~@RszO9ybh9$<&z$sh8__Ysrf?*%kRXZvfOl&%XkKq8TJx+XwBwn$4NDK z7Hr`7kPE`VCw!90Co>`s1My@u)B_~Wu54<`UeIBA8GMS#r;6<;MEKQT!gJYg@aZlk zZDpnXKa=MXEPi2UFbExTH!m=`g3o{+O2ymaS6hjuP!b!R=}2eR#D;M;F7dELTU#Nu zecF8iTKqViqjS01|44mz%L2+XiDgB(3W}ydCKYrQjy;`MlC##ALQQ*4WQk*5nI$ z6^wo;w!1x)>gg5dW8yX8s`F}YHn<7!%I>@BQt@~+S=Ss1A=XdAWK?K#%q_eIpaz27 zEjn*8{?G;wuSEdoh=5;*(*CW3DgUbAPH5Cf*z$+1o+uIGE{o)a8G(?7QX33Ap<&s%4A)(@N8~s|RT~ZNKt5^w zsk^M6!IXsbj?OKEO6Wu!z+FtSw-vTd0N?0Ma>PrA4FCWfu>oEO!_wG6g+8g<>*b62 z5`#A*9W&-Insprb*F>TaIFm2s%it;5IT`4+jFjiB>H=bmzIJ^$NUs{aWg^F}8{w|A z9sGO_nY@*Iuv^^W2#EW2E-mwsYYaNx4(i4qn*|nf10IHcSU#3-ZIt-au)?>A!<$NE zaW9-n7z3BTaTbW+Xy3}oe5y2SX5Wnsw zMG&h!dnR-=vyaTQBG1Zpwr;{0PqKHj(-5wSCmb`_90yS^n23(@9+U#t4-OunAq4KnwKK;{*B_fJn4e|s;#hu>%L zdma65AHadoN!Y=Fnf!kK0FDbY51EGlkptl9;Tpc1O#UDv5I1`4{3+8}*)WcolkFfl zu1*NlbX&N zcG2%KgFLm_`ZCJm7I|#@8<*X9-3QltI^ql>}8%S zft(o)2Hy`D^26Hf82N6Tuat-QvnD?vaZ5=k>6!wN@+1fcx><$mVgacI7`@Q@t{*A%Eb}w!8{LVqp z3kgAfnSTo!!;4BH7=dH%s_pDi6&vmD1~31fUorR(V1J>8?NB&Nxr0ppqt3f+v-Dja zGEDqulZxmmi4$Hm`L7ZN6t#4A20O)NKQ12m-_!RH<3ta6{cOP9ZzYqs`a^iRZ7>>b zcWaJt=D`%Q^ZdFbCFcT3{Dz6t3Gxx(>`TWdnZsAMF2t<2Og;Wdp-dgW=R@=5E|wqR3ZTh69srkWvL z#qGi74K^IJOf_4Yih|v1TO=!}PBsav%GVrIouY?#wloDhB|)f8Gu2#a^4U#D5w>>( z11$j!kKa`Dq;q;_OIL71Cm_u}@d8s-NN*`l(cRjVKE;`)S}5&P1#er@M>kXz)0_-z z1>#+UJ-urpRy5oqVX{GUVP2x4{;lCq#aT>?ZCS}HXrn`H_~)f+u{z68bs+e-%4GC3 zQ!P#lk^;;yiGu~&_wP~(HpcI0m54`L)Rhw!DYa_aIn~*Zk*51xcJGfel zw;@q0S7cd4l^RI4!V5}!gM$<6cWQ#n3+_Kr@I zPhBAkhD_C@nlbrGpK3Ij5QP|LaRRJyv zAcfa%$?K0K%_GP?-u##}2Zpm4HA9{Rkq7mXthyKV(yY3SpORIdidlHGf?tx`dR^j1 zU6;7()+O$#b&1y~*k=yD6`g`#yh+K_ee1+xqIEjjPA5Np=b_e#v5fNM>ccd@y7&-P zR3D);4^gdMEPfb$iq69CY@CB=ITg}MoS+HNG-Kc8v;?iDy#_6%WvH<{9>FXdlI41z zyD(3oZ}sDJ&gR3&In-do^G2va(WaWmsc|!^O(V3%uC9}6Te{jV)u2;-2&)umh~{X# ztOZV6fsu7IA2;Yra6P(;j%cW6+dDc?d8nQ`G)CUxKy0^k0$!H)5BRk)QeacAy7mxl zsII9!Oq()Tn5z4Z0vAt!Zcplc{0^n<(+f5>(q~&@%ixx0aLQ+K>aWnbC|A+fvY=e;U<3Qh%M8G@6tixi_W-K*V!fH9PEaKk8iWgx zE3xb#(Ny%|PSk<_I@E--tEwN>puS%L)DH=~w;`5B*YDET2g!-8H z0@(IjAlA33g1(ytT2&gfDh&{B3;@n2=#vg;9^yBt!ocmAlE)1;IN+%wS(WD!e<1@B?bL_=IYyr!p@)EedZfB$m>wCT z2b}UgDTfR1rG2^qpd1mLd{NVIb%usO4q!Y;PwuCM?)TIC=@j?-nf)}){r>6!^5oF` z@gZu^`xM0y)Hw5X096i6e>|2NFJ1!!=U-si+dU2S3K1{z59=7FXqt+Wg zqcFWh1GwfKq+dgFUZ!hsGkGoj32!j|Odq1Z&=BqzAECd2%a7C7fuJAYvhYQ;{|48B zzoQfMI&k|IF7p_Xs-KH9q~v)nDe)z|#aL@Oc5%6EJ(avc;TgsW=qQ`j_Smd;g3far z4BDf)?*zy@YD_7?^#*_Lf!>-6ZomyUwh-aH#bM56z`$=@o~$kV-7x)OADOjff5OLJ znvLGSNUx^E@dH#;_+UYXO|_=usbumrIIBLY|u!1Ngy_t?v+V3Gk7Y$F zsmxl4St6Ik_fiSe+b~yUSfdjXM|>Jh;kh)G7vT66bUL4j$C3-Fo~!VjubP^;me%uP zyoRfzEBP$yZey+&KLeeN{cZgJ37iP?*=mckRN zX_)^>U`W)^R5GUe-lRDO7ka!)rywJX+g1mQuVO7P#vH8~VFVB(43G0M?1wK{lM7DW zOS5pi4F|aqhrMWoTfsjM);z_*y>wH}F%AZ*7d^(X0riFDg(KX}R9`$q+slhxHCta) zUZkb>2yfB?T<;m83(7rqKL_iLMI(GUG%{aNUxJ*Kx{o^Oo{f>)cstO&0vFo7909r;aj$t5 z_W`M!>0*vTMvG`SE#`jo8ko0$V;qOY2gbCWx5p1)yb!8~ir)m7C&*8REt70;&;U%1 z@SaX3Iqi;E>)!KDVBP%FN~pk@hE0^@_W-nZC|H~;!eI-r1<}nQ=@sn|0oX`~6|NRm z*rmV}?~<=;6;$yJBYY#IgN_D3JTQz9-~Ot8j6Wp9J|e?zt1rY-cgj+CK}++!2dLI= zIL1RG{25`E^1|}sBmAHqTwYjRUOdbXITi6uY^v6Sg^5E{#9Kkx9`bP*RJEvrw?Ut5 zrL(vfRE>FFeLaNBBv>;+JGF9E1AeMZ*2! z-^mm$1<#5!j__Bq)@wTs!rqN%g*VWh{2n@l`jh;DEO|PXrp7Tx zo`54qeNBT`@GbaV1yn~1`P=**?A%Tz{9UNDVi@ zQSk{{K*cumL;V?e$ft#6sR)Z>2%UD=;Ie!}-=Ibr>_`v>V__xlnH&P&20*0kxHt@l z`3Hd<-{}GL5cT7~82_;G)Zlzygi;to%2KB^ol<N9>CkO2k9mL9BwndkY(bIr+MUfnn#ZFKeT7y zp;!2y{4a;ni0a;?4uhUAJ;VD3b@_ZA1Zel!6jIk<5HQc@9^}_HKTCxV;mrSj;)~w( z)d%@MaBz*_5EX8!KB&ao_04i?1*jD}6?$5_Ot*F%wIa9jRq1jEsv@a*-M(j}mR{c@ zwW&^@Dn-rXR^WZy?<;YqJt?(Pw{N=C%G?@UlKXu=r^fKX?k{w^PLi(llI2pH;r5*> zwV7`1bg4~uYxAWx#jTwowP|*3^CSG)L-25Ykj7J0C6Kz1s!`S;Dm@qF3Pe6FDA!Ps zI#G7f<#Z*=5JH3)$~dgwyHH+3@1q-0-bA<29VqXlPtksq2k0Ss1m&ZM0G~qnG<}1f zNBIK%h+ahbQ+UyTK=}&1<=0TY4)?8CRl+Pz=b0#H;my^2lnbCa&O*P}Xoz zWYGn?sAjF9s!vFmhEjeisZD;6;2QI#J2F3+A(^&Ob4PV#sy?q|ifmSdC9yLEv#Lkb zB4}d;>p4T6J*>`wFP(9k^#S0*ECJwlI$)NnSE4ywMMvbc_>?4_oaO7Wyd2nv_*c9t z7}F88l7VD5@nEmKK5J#4Eo1-_i7IEE%?_Al)fv-m6hca}TrGR0aMns?s!^@ZfzfRd z0u+(*tzvA0l$Nv28kC|Ez16NmkjY2b%UfB?d&eyAwiK!stfJH!CCc<{DpCO|P(jtH zy7X7K+N3VlUz^os>T>Qnvj09LD*8dz1X ecBrHpEG~hEXaB8p%67X{J5hSTe~rkUwEr(LO30D` literal 0 HcmV?d00001 diff --git a/libs/dissect/build.gradle b/libs/dissect/build.gradle index 47f7970ea5ac0..dc98d2820ef52 100644 --- a/libs/dissect/build.gradle +++ b/libs/dissect/build.gradle @@ -40,4 +40,3 @@ dependencies { tasks.named('forbiddenApisMain').configure { replaceSignatureFiles 'jdk-signatures' } - diff --git a/libs/dissect/src/test/resources/specification/tests.json b/libs/dissect/src/test/resources/specification/tests.json index 1cb85ce651940..490383ba3fedb 100644 --- a/libs/dissect/src/test/resources/specification/tests.json +++ b/libs/dissect/src/test/resources/specification/tests.json @@ -360,4 +360,4 @@ "append": "" } -] \ No newline at end of file +] diff --git a/libs/geo/build.gradle b/libs/geo/build.gradle index fac5c1b84d2b0..8b81129f43b67 100644 --- a/libs/geo/build.gradle +++ b/libs/geo/build.gradle @@ -42,4 +42,3 @@ tasks.named('forbiddenApisMain').configure { // TODO: Need to decide how we want to handle for forbidden signatures with the changes to core replaceSignatureFiles 'jdk-signatures' } - diff --git a/libs/grok/src/main/resources/patterns/exim b/libs/grok/src/main/resources/patterns/exim index 68c4e5cd7d0d7..e81eace04d32d 100644 --- a/libs/grok/src/main/resources/patterns/exim +++ b/libs/grok/src/main/resources/patterns/exim @@ -10,4 +10,3 @@ EXIM_PROTOCOL (P=%{NOTSPACE:protocol}) EXIM_MSG_SIZE (S=%{NUMBER:exim_msg_size}) EXIM_HEADER_ID (id=%{NOTSPACE:exim_header_id}) EXIM_SUBJECT (T=%{QS:exim_subject}) - diff --git a/libs/grok/src/main/resources/patterns/junos b/libs/grok/src/main/resources/patterns/junos index 4eea59d08ccf9..2da91cc6ce3df 100644 --- a/libs/grok/src/main/resources/patterns/junos +++ b/libs/grok/src/main/resources/patterns/junos @@ -6,4 +6,3 @@ RT_FLOW1 %{RT_FLOW_EVENT:event}: %{GREEDYDATA:close-reason}: %{IP:src-ip}/%{INT: RT_FLOW2 %{RT_FLOW_EVENT:event}: session created %{IP:src-ip}/%{INT:src-port}->%{IP:dst-ip}/%{INT:dst-port} %{DATA:service} %{IP:nat-src-ip}/%{INT:nat-src-port}->%{IP:nat-dst-ip}/%{INT:nat-dst-port} %{DATA:src-nat-rule-name} %{DATA:dst-nat-rule-name} %{INT:protocol-id} %{DATA:policy-name} %{DATA:from-zone} %{DATA:to-zone} %{INT:session-id} .* RT_FLOW3 %{RT_FLOW_EVENT:event}: session denied %{IP:src-ip}/%{INT:src-port}->%{IP:dst-ip}/%{INT:dst-port} %{DATA:service} %{INT:protocol-id}\(\d\) %{DATA:policy-name} %{DATA:from-zone} %{DATA:to-zone} .* - diff --git a/libs/grok/src/main/resources/patterns/postgresql b/libs/grok/src/main/resources/patterns/postgresql index c5b3e90b7250f..6901c6253e926 100644 --- a/libs/grok/src/main/resources/patterns/postgresql +++ b/libs/grok/src/main/resources/patterns/postgresql @@ -1,3 +1,2 @@ # Default postgresql pg_log format pattern POSTGRESQL %{DATESTAMP:timestamp} %{TZ} %{DATA:user_id} %{GREEDYDATA:connection_id} %{POSINT:pid} - diff --git a/libs/ssl-config/build.gradle b/libs/ssl-config/build.gradle index 740d5e309350c..456641f0d7645 100644 --- a/libs/ssl-config/build.gradle +++ b/libs/ssl-config/build.gradle @@ -61,4 +61,3 @@ tasks.test { jvmArgs += ["--add-opens", "java.base/java.security.cert=ALL-UNNAMED"] } } - diff --git a/modules/analysis-common/src/test/resources/org/opensearch/analysis/common/cjk_analysis.json b/modules/analysis-common/src/test/resources/org/opensearch/analysis/common/cjk_analysis.json index 89a1281473cd7..c69b889c914a4 100644 --- a/modules/analysis-common/src/test/resources/org/opensearch/analysis/common/cjk_analysis.json +++ b/modules/analysis-common/src/test/resources/org/opensearch/analysis/common/cjk_analysis.json @@ -34,4 +34,4 @@ } } } -} \ No newline at end of file +} diff --git a/modules/analysis-common/src/test/resources/org/opensearch/analysis/common/pattern_capture.json b/modules/analysis-common/src/test/resources/org/opensearch/analysis/common/pattern_capture.json index d82fb987e6ed2..5057a1e6d7f9e 100644 --- a/modules/analysis-common/src/test/resources/org/opensearch/analysis/common/pattern_capture.json +++ b/modules/analysis-common/src/test/resources/org/opensearch/analysis/common/pattern_capture.json @@ -43,4 +43,4 @@ } } } -} \ No newline at end of file +} diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/60_analysis_scripting.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/60_analysis_scripting.yml index 2015fe31fccb5..1637c8736134f 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/60_analysis_scripting.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/60_analysis_scripting.yml @@ -68,4 +68,3 @@ - match: { tokens.1.token: "f" } - match: { tokens.2.token: "g" } - match: { tokens.3.token: "h" } - diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/10_analysis_stats.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/10_analysis_stats.yml index a19a1f2721910..5468da5216bb4 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/10_analysis_stats.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/10_analysis_stats.yml @@ -119,4 +119,3 @@ - match: { indices.analysis.built_in_analyzers.2.name: spanish } - match: { indices.analysis.built_in_analyzers.2.count: 2 } - match: { indices.analysis.built_in_analyzers.2.index_count: 2 } - diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices.analyze/10_synonyms.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices.analyze/10_synonyms.yml index f0f8765ab5130..42d1c23001300 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices.analyze/10_synonyms.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices.analyze/10_synonyms.yml @@ -76,4 +76,3 @@ - match: { tokens.5.token: dude } - match: { tokens.5.position: 4 } - match: { tokens.5.positionLength: null } - diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/60_synonym_graph.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/60_synonym_graph.yml index ae039e453be6c..4388de3eef30a 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/60_synonym_graph.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/60_synonym_graph.yml @@ -229,4 +229,3 @@ setup: query: bar baz analyzer: lower_graph_syns - match: { hits.total: 1 } - diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/70_intervals.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/70_intervals.yml index 35a611d13f359..9ad68e960421c 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/70_intervals.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/70_intervals.yml @@ -56,4 +56,3 @@ setup: use_field: text_en max_gaps: 1 - match: { hits.total.value: 1 } - diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/220_drop_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/220_drop_processor.yml index 77a1df81a296a..ef8332c2670d0 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/220_drop_processor.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/220_drop_processor.yml @@ -91,4 +91,3 @@ teardown: get: index: test id: 3 - diff --git a/modules/ingest-user-agent/build.gradle b/modules/ingest-user-agent/build.gradle index cd04925287b8f..a3752ad1c7f7e 100644 --- a/modules/ingest-user-agent/build.gradle +++ b/modules/ingest-user-agent/build.gradle @@ -43,4 +43,3 @@ restResources { testClusters.all { extraConfigFile 'ingest-user-agent/test-regexes.yml', file('src/test/test-regexes.yml') } - diff --git a/modules/ingest-user-agent/src/test/test-regexes.yml b/modules/ingest-user-agent/src/test/test-regexes.yml index e41dec700c047..8815c85c7c6e9 100644 --- a/modules/ingest-user-agent/src/test/test-regexes.yml +++ b/modules/ingest-user-agent/src/test/test-regexes.yml @@ -1,3 +1,3 @@ user_agent_parsers: - regex: '.*' - family_replacement: 'Test' \ No newline at end of file + family_replacement: 'Test' diff --git a/modules/lang-expression/build.gradle b/modules/lang-expression/build.gradle index e3feacd71f060..9d7b0e2f0979c 100644 --- a/modules/lang-expression/build.gradle +++ b/modules/lang-expression/build.gradle @@ -52,4 +52,3 @@ tasks.named("dependencyLicenses").configure { mapping from: /lucene-.*/, to: 'lucene' mapping from: /asm-.*/, to: 'asm' } - diff --git a/modules/lang-painless/src/main/antlr/PainlessLexer.g4 b/modules/lang-painless/src/main/antlr/PainlessLexer.g4 index 7dd168833e347..21b03b85d8edd 100644 --- a/modules/lang-painless/src/main/antlr/PainlessLexer.g4 +++ b/modules/lang-painless/src/main/antlr/PainlessLexer.g4 @@ -124,4 +124,4 @@ ID: [_a-zA-Z] [_a-zA-Z0-9]*; mode AFTER_DOT; DOTINTEGER: ( '0' | [1-9] [0-9]* ) -> mode(DEFAULT_MODE); -DOTID: [_a-zA-Z] [_a-zA-Z0-9]* -> mode(DEFAULT_MODE); \ No newline at end of file +DOTID: [_a-zA-Z] [_a-zA-Z0-9]* -> mode(DEFAULT_MODE); diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/100_terms_agg.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/100_terms_agg.yml index 000e1af694d7d..aa01647811c83 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/100_terms_agg.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/100_terms_agg.yml @@ -139,4 +139,3 @@ setup: - is_false: aggregations.placeholder.buckets.0.str_terms.buckets.1.key_as_string - match: { aggregations.placeholder.buckets.0.str_terms.buckets.1.doc_count: 1 } - match: { aggregations.placeholder.buckets.0.the_bucket_script.value: 2.0 } - diff --git a/modules/opensearch-dashboards/build.gradle b/modules/opensearch-dashboards/build.gradle index 9bda17243bdb4..f76ca739faf81 100644 --- a/modules/opensearch-dashboards/build.gradle +++ b/modules/opensearch-dashboards/build.gradle @@ -41,4 +41,3 @@ dependencies { testClusters.all { module ':modules:reindex' } - diff --git a/modules/repository-url/build.gradle b/modules/repository-url/build.gradle index 24742416de6f2..702f0e9bb0f8b 100644 --- a/modules/repository-url/build.gradle +++ b/modules/repository-url/build.gradle @@ -75,4 +75,3 @@ testClusters.all { "http://snapshot.test*,http://${urlFixture.addressAndPort}" }, PropertyNormalization.IGNORE_VALUE } - diff --git a/modules/systemd/build.gradle b/modules/systemd/build.gradle index b157fd9321fc9..26e094a9eeae1 100644 --- a/modules/systemd/build.gradle +++ b/modules/systemd/build.gradle @@ -32,4 +32,3 @@ opensearchplugin { description 'Integrates OpenSearch with systemd' classname 'org.opensearch.systemd.SystemdPlugin' } - diff --git a/plugins/analysis-icu/src/test/resources/org/opensearch/index/analysis/KeywordTokenizer.rbbi b/plugins/analysis-icu/src/test/resources/org/opensearch/index/analysis/KeywordTokenizer.rbbi index 8e6de8aa94abb..86eb398ee9157 100644 --- a/plugins/analysis-icu/src/test/resources/org/opensearch/index/analysis/KeywordTokenizer.rbbi +++ b/plugins/analysis-icu/src/test/resources/org/opensearch/index/analysis/KeywordTokenizer.rbbi @@ -18,4 +18,4 @@ # Apply rule status {200}=RBBI.WORD_LETTER, which is mapped # to token type by DefaultICUTokenizerConfig. -.+ {200}; \ No newline at end of file +.+ {200}; diff --git a/plugins/analysis-kuromoji/build.gradle b/plugins/analysis-kuromoji/build.gradle index 60738fb28b6d5..426b85f44bf55 100644 --- a/plugins/analysis-kuromoji/build.gradle +++ b/plugins/analysis-kuromoji/build.gradle @@ -46,4 +46,3 @@ restResources { tasks.named("dependencyLicenses").configure { mapping from: /lucene-.*/, to: 'lucene' } - diff --git a/plugins/analysis-phonetic/src/yamlRestTest/resources/rest-api-spec/test/analysis_phonetic/10_metaphone.yml b/plugins/analysis-phonetic/src/yamlRestTest/resources/rest-api-spec/test/analysis_phonetic/10_metaphone.yml index 1be0d8525a1c6..2268d30c986df 100644 --- a/plugins/analysis-phonetic/src/yamlRestTest/resources/rest-api-spec/test/analysis_phonetic/10_metaphone.yml +++ b/plugins/analysis-phonetic/src/yamlRestTest/resources/rest-api-spec/test/analysis_phonetic/10_metaphone.yml @@ -31,4 +31,3 @@ - match: { tokens.1.token: joe } - match: { tokens.2.token: BLKS } - match: { tokens.3.token: bloggs } - diff --git a/plugins/analysis-phonetic/src/yamlRestTest/resources/rest-api-spec/test/analysis_phonetic/20_double_metaphone.yml b/plugins/analysis-phonetic/src/yamlRestTest/resources/rest-api-spec/test/analysis_phonetic/20_double_metaphone.yml index 84b0129414c8e..40215cc469fc9 100644 --- a/plugins/analysis-phonetic/src/yamlRestTest/resources/rest-api-spec/test/analysis_phonetic/20_double_metaphone.yml +++ b/plugins/analysis-phonetic/src/yamlRestTest/resources/rest-api-spec/test/analysis_phonetic/20_double_metaphone.yml @@ -28,4 +28,3 @@ - length: { tokens: 1 } - match: { tokens.0.token: SPRKLF } - diff --git a/plugins/analysis-phonetic/src/yamlRestTest/resources/rest-api-spec/test/analysis_phonetic/30_beider_morse.yml b/plugins/analysis-phonetic/src/yamlRestTest/resources/rest-api-spec/test/analysis_phonetic/30_beider_morse.yml index bdd1ddef388df..dcc46484780dc 100644 --- a/plugins/analysis-phonetic/src/yamlRestTest/resources/rest-api-spec/test/analysis_phonetic/30_beider_morse.yml +++ b/plugins/analysis-phonetic/src/yamlRestTest/resources/rest-api-spec/test/analysis_phonetic/30_beider_morse.yml @@ -30,4 +30,3 @@ - length: { tokens: 1 } - match: { tokens.0.token: Svarts } - diff --git a/plugins/analysis-phonetic/src/yamlRestTest/resources/rest-api-spec/test/analysis_phonetic/50_daitch_mokotoff.yml b/plugins/analysis-phonetic/src/yamlRestTest/resources/rest-api-spec/test/analysis_phonetic/50_daitch_mokotoff.yml index bee4c8bf5f432..9b173a710ea43 100644 --- a/plugins/analysis-phonetic/src/yamlRestTest/resources/rest-api-spec/test/analysis_phonetic/50_daitch_mokotoff.yml +++ b/plugins/analysis-phonetic/src/yamlRestTest/resources/rest-api-spec/test/analysis_phonetic/50_daitch_mokotoff.yml @@ -27,4 +27,3 @@ - length: { tokens: 1 } - match: { tokens.0.token: "645740" } - diff --git a/plugins/analysis-smartcn/build.gradle b/plugins/analysis-smartcn/build.gradle index 92f2774854715..d74d314ab0673 100644 --- a/plugins/analysis-smartcn/build.gradle +++ b/plugins/analysis-smartcn/build.gradle @@ -47,4 +47,3 @@ restResources { tasks.named("dependencyLicenses").configure { mapping from: /lucene-.*/, to: 'lucene' } - diff --git a/plugins/discovery-gce/src/test/resources/org/opensearch/discovery/gce/computeMetadata/v1/project/attributes/google-compute-default-zone b/plugins/discovery-gce/src/test/resources/org/opensearch/discovery/gce/computeMetadata/v1/project/attributes/google-compute-default-zone index 6cf886270bef1..218127ccfb695 100644 --- a/plugins/discovery-gce/src/test/resources/org/opensearch/discovery/gce/computeMetadata/v1/project/attributes/google-compute-default-zone +++ b/plugins/discovery-gce/src/test/resources/org/opensearch/discovery/gce/computeMetadata/v1/project/attributes/google-compute-default-zone @@ -1 +1 @@ -europe-west1-b \ No newline at end of file +europe-west1-b diff --git a/plugins/discovery-gce/src/test/resources/org/opensearch/discovery/gce/computeMetadata/v1/project/project-id b/plugins/discovery-gce/src/test/resources/org/opensearch/discovery/gce/computeMetadata/v1/project/project-id index 25b8069381897..44be476f3ae83 100644 --- a/plugins/discovery-gce/src/test/resources/org/opensearch/discovery/gce/computeMetadata/v1/project/project-id +++ b/plugins/discovery-gce/src/test/resources/org/opensearch/discovery/gce/computeMetadata/v1/project/project-id @@ -1 +1 @@ -metadataserver \ No newline at end of file +metadataserver diff --git a/plugins/examples/build.gradle b/plugins/examples/build.gradle index e4e0ca6f7be99..460c6e81eac5c 100644 --- a/plugins/examples/build.gradle +++ b/plugins/examples/build.gradle @@ -36,4 +36,3 @@ configure(project('painless-whitelist')) { } } } - diff --git a/plugins/examples/custom-settings/build.gradle b/plugins/examples/custom-settings/build.gradle index 104660c458991..5b35d887b3db1 100644 --- a/plugins/examples/custom-settings/build.gradle +++ b/plugins/examples/custom-settings/build.gradle @@ -42,4 +42,3 @@ testClusters.all { // Adds a setting in the OpenSearch keystore before running the integration tests keystore 'custom.secured', 'password' } - diff --git a/plugins/examples/custom-settings/src/main/config/custom.yml b/plugins/examples/custom-settings/src/main/config/custom.yml index 1759e0ff96d40..258e050a0664b 100644 --- a/plugins/examples/custom-settings/src/main/config/custom.yml +++ b/plugins/examples/custom-settings/src/main/config/custom.yml @@ -2,4 +2,4 @@ custom: simple: foo list: [0, 1, 1, 2, 3, 5, 8, 13, 21] - filtered: secret \ No newline at end of file + filtered: secret diff --git a/plugins/examples/rest-handler/build.gradle b/plugins/examples/rest-handler/build.gradle index cc939b15854d5..b97d091af9d08 100644 --- a/plugins/examples/rest-handler/build.gradle +++ b/plugins/examples/rest-handler/build.gradle @@ -56,4 +56,3 @@ javaRestTest { dependsOn exampleFixture nonInputProperties.systemProperty 'external.address', "${-> exampleFixture.addressAndPort}" } - diff --git a/plugins/examples/script-expert-scoring/build.gradle b/plugins/examples/script-expert-scoring/build.gradle index 9f2bab20a7db0..e4ddd97abbe4c 100644 --- a/plugins/examples/script-expert-scoring/build.gradle +++ b/plugins/examples/script-expert-scoring/build.gradle @@ -39,4 +39,3 @@ opensearchplugin { } test.enabled = false - diff --git a/plugins/ingest-attachment/src/test/resources/org/opensearch/ingest/attachment/test/sample-files/asciidoc.asciidoc b/plugins/ingest-attachment/src/test/resources/org/opensearch/ingest/attachment/test/sample-files/asciidoc.asciidoc index dc06d4e83dd30..4a2b2c388cfc1 100644 --- a/plugins/ingest-attachment/src/test/resources/org/opensearch/ingest/attachment/test/sample-files/asciidoc.asciidoc +++ b/plugins/ingest-attachment/src/test/resources/org/opensearch/ingest/attachment/test/sample-files/asciidoc.asciidoc @@ -2,4 +2,3 @@ = AsciiDoc test Here is a test of the asciidoc format. - diff --git a/plugins/ingest-attachment/src/yamlRestTest/resources/rest-api-spec/test/ingest_attachment/10_basic.yml b/plugins/ingest-attachment/src/yamlRestTest/resources/rest-api-spec/test/ingest_attachment/10_basic.yml index 607fa5bf8b781..88f6f33ad0a66 100644 --- a/plugins/ingest-attachment/src/yamlRestTest/resources/rest-api-spec/test/ingest_attachment/10_basic.yml +++ b/plugins/ingest-attachment/src/yamlRestTest/resources/rest-api-spec/test/ingest_attachment/10_basic.yml @@ -12,4 +12,3 @@ - contains: { 'nodes.$master.plugins': { name: ingest-attachment } } - contains: { 'nodes.$master.ingest.processors': { type: attachment } } - diff --git a/plugins/mapper-annotated-text/src/yamlRestTest/resources/rest-api-spec/test/mapper_annotatedtext/10_basic.yml b/plugins/mapper-annotated-text/src/yamlRestTest/resources/rest-api-spec/test/mapper_annotatedtext/10_basic.yml index b4acccf36879d..0e43e9e40b619 100644 --- a/plugins/mapper-annotated-text/src/yamlRestTest/resources/rest-api-spec/test/mapper_annotatedtext/10_basic.yml +++ b/plugins/mapper-annotated-text/src/yamlRestTest/resources/rest-api-spec/test/mapper_annotatedtext/10_basic.yml @@ -142,4 +142,3 @@ request_cache: false body: { "query" : {"match_phrase" : { "my_field" : {"query": "~MARK0", "analyzer": "whitespace"} } }, "highlight" : { "type" : "annotated", "fields" : { "my_field" : {} } } } - match: {_shards.failed: 0} - diff --git a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot.yml b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot.yml index 20019686d3db1..fbe0e0a8b0066 100644 --- a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot.yml +++ b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot.yml @@ -45,4 +45,3 @@ - do: snapshot.delete_repository: repository: test_snapshot_repository - diff --git a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot.yml b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot.yml index 44f29fe0341a6..821110dc52bed 100644 --- a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot.yml +++ b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot.yml @@ -47,4 +47,3 @@ - do: snapshot.delete_repository: repository: test_snapshot_repository - diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index 88355cdf22728..2b9e11cb2521f 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -183,4 +183,3 @@ thirdPartyAudit { 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator' ) } - diff --git a/qa/die-with-dignity/build.gradle b/qa/die-with-dignity/build.gradle index 008e2e19bf72f..db8762fe921bf 100644 --- a/qa/die-with-dignity/build.gradle +++ b/qa/die-with-dignity/build.gradle @@ -35,4 +35,3 @@ testClusters.javaRestTest { } test.enabled = false - diff --git a/qa/evil-tests/src/test/resources/org/opensearch/common/cli/tool-cmd1.help b/qa/evil-tests/src/test/resources/org/opensearch/common/cli/tool-cmd1.help index d083e3a65348f..60acabffb6544 100644 --- a/qa/evil-tests/src/test/resources/org/opensearch/common/cli/tool-cmd1.help +++ b/qa/evil-tests/src/test/resources/org/opensearch/common/cli/tool-cmd1.help @@ -1 +1 @@ -cmd1 help \ No newline at end of file +cmd1 help diff --git a/qa/evil-tests/src/test/resources/org/opensearch/common/cli/tool.help b/qa/evil-tests/src/test/resources/org/opensearch/common/cli/tool.help index 023b1accdffb1..2a5850ba79db2 100644 --- a/qa/evil-tests/src/test/resources/org/opensearch/common/cli/tool.help +++ b/qa/evil-tests/src/test/resources/org/opensearch/common/cli/tool.help @@ -1 +1 @@ -tool help \ No newline at end of file +tool help diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/70_skip_shards.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/70_skip_shards.yml index 92ae11c712b25..23cd1567b49ba 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/70_skip_shards.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/70_skip_shards.yml @@ -220,4 +220,3 @@ # When all shards are skipped current logic returns 1 to produce a valid search result - match: { _shards.skipped : 1} - match: { _shards.failed: 0 } - diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml index e1ffcea930a42..840c7f5f6297e 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml @@ -204,4 +204,3 @@ tasks.get: wait_for_completion: true task_id: $task - diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/20_date_range.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/20_date_range.yml index 89992eeba616f..6427a45e19f58 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/20_date_range.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/20_date_range.yml @@ -111,5 +111,3 @@ gte: "2019-02-01T00:00+01:00" lte: "2019-02-01T00:00+01:00" - match: { hits.total: 1 } - - diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml index cb74c33cbd31a..f1c00ee896f92 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml @@ -133,5 +133,3 @@ wait_for_completion: true task_id: $task_id - match: { task.headers.X-Opaque-Id: "Reindexing Again" } - - diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_date_range.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_date_range.yml index 7dae2a4d6241a..026dcff32e175 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_date_range.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_date_range.yml @@ -38,4 +38,3 @@ time_frame: gte: "2019-02-01T00:00+01:00" lte: "2019-02-01T00:00+01:00" - diff --git a/qa/smoke-test-ingest-disabled/src/test/resources/rest-api-spec/test/ingest_mustache/10_ingest_disabled.yml b/qa/smoke-test-ingest-disabled/src/test/resources/rest-api-spec/test/ingest_mustache/10_ingest_disabled.yml index 7a0cdcbef0786..c8fcebfba67ab 100644 --- a/qa/smoke-test-ingest-disabled/src/test/resources/rest-api-spec/test/ingest_mustache/10_ingest_disabled.yml +++ b/qa/smoke-test-ingest-disabled/src/test/resources/rest-api-spec/test/ingest_mustache/10_ingest_disabled.yml @@ -112,4 +112,3 @@ _id: test_id2 pipeline: my_pipeline_1 - f1: v2 - diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/50_script_processor_using_painless.yml b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/50_script_processor_using_painless.yml index eaf6b24030a06..c58735f7862e6 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/50_script_processor_using_painless.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/50_script_processor_using_painless.yml @@ -102,4 +102,3 @@ - match: { error.processor_type: "script" } - match: { error.type: "script_exception" } - match: { error.reason: "compile error" } - diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/60_pipeline_timestamp_date_mapping.yml b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/60_pipeline_timestamp_date_mapping.yml index 0f514f2213492..d7f565f30c93d 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/60_pipeline_timestamp_date_mapping.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/60_pipeline_timestamp_date_mapping.yml @@ -34,4 +34,3 @@ id: 1 pipeline: "my_timely_pipeline" body: {} - diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/scripts/master.painless b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/scripts/master.painless index 29880e8fd5f57..82f007e8e4dac 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/scripts/master.painless +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/scripts/master.painless @@ -1 +1 @@ -ctx.bytes_total = ctx.bytes_in + ctx.bytes_out \ No newline at end of file +ctx.bytes_total = ctx.bytes_in + ctx.bytes_out diff --git a/qa/smoke-test-plugins/src/test/java/org/opensearch/smoketest/SmokeTestPluginsClientYamlTestSuiteIT.java b/qa/smoke-test-plugins/src/test/java/org/opensearch/smoketest/SmokeTestPluginsClientYamlTestSuiteIT.java index a7e50601ad9df..a1c61ca97f877 100644 --- a/qa/smoke-test-plugins/src/test/java/org/opensearch/smoketest/SmokeTestPluginsClientYamlTestSuiteIT.java +++ b/qa/smoke-test-plugins/src/test/java/org/opensearch/smoketest/SmokeTestPluginsClientYamlTestSuiteIT.java @@ -49,4 +49,3 @@ public static Iterable parameters() throws Exception { return OpenSearchClientYamlSuiteTestCase.createParameters(); } } - diff --git a/release-notes/opensearch.release-notes-1.0.0-rc1.md b/release-notes/opensearch.release-notes-1.0.0-rc1.md index 2223d732abb98..205873fd282b2 100644 --- a/release-notes/opensearch.release-notes-1.0.0-rc1.md +++ b/release-notes/opensearch.release-notes-1.0.0-rc1.md @@ -411,5 +411,3 @@ Signed-off-by: Abbas Hussain <abbas_10690@yahoo.com> - - diff --git a/release-notes/opensearch.release-notes-1.1.0.md b/release-notes/opensearch.release-notes-1.1.0.md index 0545e106a15a5..ba5a5d4c95c60 100644 --- a/release-notes/opensearch.release-notes-1.1.0.md +++ b/release-notes/opensearch.release-notes-1.1.0.md @@ -386,5 +386,3 @@ Signed-off-by: Sooraj Sinha <soosinha@amazon.com> - - diff --git a/release-notes/opensearch.release-notes-1.2.0.md b/release-notes/opensearch.release-notes-1.2.0.md index 86860e5f872da..d7c75f3c0eaf2 100644 --- a/release-notes/opensearch.release-notes-1.2.0.md +++ b/release-notes/opensearch.release-notes-1.2.0.md @@ -458,4 +458,3 @@ Signed-off-by: Nicholas Walter Knize <nknize@apache.org> - diff --git a/release-notes/opensearch.release-notes-1.2.4.md b/release-notes/opensearch.release-notes-1.2.4.md index dc2852a102c44..dc0bce20a2a00 100644 --- a/release-notes/opensearch.release-notes-1.2.4.md +++ b/release-notes/opensearch.release-notes-1.2.4.md @@ -72,5 +72,3 @@ Signed-off-by: dblock <dblock@amazon.com> - - diff --git a/release-notes/opensearch.release-notes-1.3.0.md b/release-notes/opensearch.release-notes-1.3.0.md index 62c5be8413943..7dd71fa47b72f 100644 --- a/release-notes/opensearch.release-notes-1.3.0.md +++ b/release-notes/opensearch.release-notes-1.3.0.md @@ -1295,5 +1295,3 @@ [Nick Knize](mailto:nknize@apache.org) - Thu, 4 Nov 2021 14:46:57 -0500 Signed-off-by: Nicholas Walter Knize <nknize@apache.org> - - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/10_basic.yml index 8c8c6d50abf41..c91ec511a0fdb 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/10_basic.yml @@ -223,4 +223,3 @@ - match: { items.0.index.status: 400 } - match: { items.0.index.error.type: illegal_argument_exception } - match: { items.0.index.error.reason: "no write index is defined for alias [test_index]. The write index may be explicitly disabled using is_write_index=false or the alias points to multiple indices without one being designated as a write index" } - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/20_list_of_strings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/20_list_of_strings.yml index 3d956dce54289..cb3553abbd435 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/20_list_of_strings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/20_list_of_strings.yml @@ -14,4 +14,3 @@ index: test_index - match: {count: 2} - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/30_big_string.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/30_big_string.yml index 8b6467eeed975..fabe674697cde 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/30_big_string.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/30_big_string.yml @@ -14,4 +14,3 @@ index: test_index - match: {count: 2} - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/40_source.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/40_source.yml index e29e84740ee5c..fb9554619a818 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/40_source.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/40_source.yml @@ -68,4 +68,3 @@ - match: { items.0.update.get._source.foo: garply } - is_false: items.0.update.get._source.bar - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yml index 789ea5fc19c3f..f04c674d420ee 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yml @@ -131,4 +131,3 @@ - match: $body: | /^(\S{5,}\n)+$/ - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.pending_tasks/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.pending_tasks/10_basic.yml index f8fd8ebef170d..885f6c4a97912 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.pending_tasks/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.pending_tasks/10_basic.yml @@ -11,4 +11,3 @@ local: true - is_true: tasks - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.remote_info/10_info.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.remote_info/10_info.yml index e11eff2b78a3c..75058cefd5c53 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.remote_info/10_info.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.remote_info/10_info.yml @@ -3,4 +3,3 @@ - do: cluster.remote_info: {} - is_true: '' - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.voting_config_exclusions/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.voting_config_exclusions/10_basic.yml index 5474c9bdf4da0..23eebacabf3f3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.voting_config_exclusions/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.voting_config_exclusions/10_basic.yml @@ -104,4 +104,3 @@ teardown: cluster.post_voting_config_exclusions: node_ids: nodeId node_names: nodeName - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/40_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/40_routing.yml index 9c048c361bd5c..e1341ac2b5380 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/40_routing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/40_routing.yml @@ -39,4 +39,3 @@ get: index: test_1 id: 1 - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/30_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/30_routing.yml index 27e9350caed70..c3c407cd9173a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/30_routing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/30_routing.yml @@ -30,4 +30,3 @@ index: test_1 id: 1 routing: 5 - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/15_default_values.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/15_default_values.yml index 921397b238f51..fabf8fb87a7b6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/15_default_values.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/15_default_values.yml @@ -14,4 +14,3 @@ - match: { _index: test_1 } - match: { _id: '1' } - match: { _source: { foo: "bar" } } - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/20_stored_fields.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/20_stored_fields.yml index 23c7e5cbc90a6..1bafdc3dab21f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/20_stored_fields.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/20_stored_fields.yml @@ -48,5 +48,3 @@ - match: { fields.foo: [bar] } - match: { fields.count: [1] } - match: { _source.foo: bar } - - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/40_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/40_routing.yml index 9ba546d6ef942..7f45b39add8a7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/40_routing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/40_routing.yml @@ -41,4 +41,3 @@ get: index: test_1 id: 1 - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/90_versions.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/90_versions.yml index 9037a9113e937..3f45a1da09dce 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/90_versions.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/90_versions.yml @@ -80,4 +80,3 @@ id: 1 version: 1 version_type: external_gte - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/40_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/40_routing.yml index 630cf39dbe65c..f6f497269b043 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/40_routing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/40_routing.yml @@ -40,4 +40,3 @@ get: index: test_1 id: 1 - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_alias/all_path_options.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_alias/all_path_options.yml index d1d01cbaaa7e6..67369b67b3249 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_alias/all_path_options.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_alias/all_path_options.yml @@ -221,4 +221,3 @@ setup: catch: param indices.delete_alias: index: "test_index1" - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.exists_template/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.exists_template/10_basic.yml index c7892f58a6f59..32b692b16c5d6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.exists_template/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.exists_template/10_basic.yml @@ -41,4 +41,3 @@ setup: local: true - is_false: '' - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml index 137736e6823a9..d62c4c8882b13 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml @@ -27,5 +27,3 @@ index: test max_num_segments: 10 only_expunge_deletes: true - - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/40_missing_index.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/40_missing_index.yml index 7c7b07b587849..690f83d5f3c2b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/40_missing_index.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/40_missing_index.yml @@ -6,5 +6,3 @@ indices.get_field_mapping: index: test_index fields: field - - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/50_field_wildcards.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/50_field_wildcards.yml index 2c9ff58b445df..814bd1e3a4063 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/50_field_wildcards.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/50_field_wildcards.yml @@ -127,4 +127,3 @@ setup: - match: {test_index_2.mappings.t1.full_name: t1 } - match: {test_index_2.mappings.t2.full_name: t2 } - length: {test_index_2.mappings: 2} - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_index_template/20_get_missing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_index_template/20_get_missing.yml index 2dfa466ba0eca..4c855d928d1c0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_index_template/20_get_missing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_index_template/20_get_missing.yml @@ -17,4 +17,3 @@ setup: catch: missing indices.get_index_template: name: test - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/30_missing_index.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/30_missing_index.yml index 1bbfbc4f4c967..e9502cdc08436 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/30_missing_index.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/30_missing_index.yml @@ -29,4 +29,3 @@ index: test_index ignore_unavailable: true allow_no_indices: false - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_settings/20_aliases.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_settings/20_aliases.yml index da7678202ed34..4f8d1371d90b1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_settings/20_aliases.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_settings/20_aliases.yml @@ -23,4 +23,3 @@ - match: { test-index.settings.index.number_of_replicas: "3" } - match: { test-index.settings.index.number_of_shards: "2" } - match: { test-index.settings.index.refresh_interval: "-1" } - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_template/20_get_missing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_template/20_get_missing.yml index 2751f57dacb6c..ee7ba62c9beb4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_template/20_get_missing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_template/20_get_missing.yml @@ -10,4 +10,3 @@ setup: catch: missing indices.get_template: name: test - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/20_multiple_indices.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/20_multiple_indices.yml index bef5ea8a54651..2720b08514ba3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/20_multiple_indices.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/20_multiple_indices.yml @@ -101,4 +101,3 @@ setup: search: rest_total_hits_as_int: true index: test_index3 - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/all_path_options.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/all_path_options.yml index bef57bbddf165..47828c43350b7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/all_path_options.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/all_path_options.yml @@ -113,4 +113,3 @@ setup: - do: catch: param indices.put_alias: {} - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml index 36317c7ae173c..23f87ea1ec2b3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml @@ -143,4 +143,3 @@ - is_false: test_index.mappings.properties.foo.meta.bar - match: { test_index.mappings.properties.foo.meta.baz: "quux" } - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/all_path_options.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/all_path_options.yml index c1daa76fe3d6e..ca7a21df20ea4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/all_path_options.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/all_path_options.yml @@ -159,4 +159,3 @@ setup: indices.get_mapping: {} - match: {test_index1.mappings.properties.text.type: text} - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_settings/all_path_options.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_settings/all_path_options.yml index 07f1956f0fcca..ac45c4e098e6e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_settings/all_path_options.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_settings/all_path_options.yml @@ -110,4 +110,3 @@ setup: - match: {test_index1.settings.index.refresh_interval: 1s} - match: {test_index2.settings.index.refresh_interval: 1s} - match: {foo.settings.index.refresh_interval: 1s} - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.refresh/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.refresh/10_basic.yml index 6e493a0cce936..bf20d51bc97cd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.refresh/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.refresh/10_basic.yml @@ -55,4 +55,3 @@ setup: - match: { _shards.total: 0 } - match: { _shards.successful: 0 } - match: { _shards.failed: 0 } - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yml index dc68ffc9a3b86..a36db0cda8526 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yml @@ -148,4 +148,3 @@ body: conditions: max_docs: 1 - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/20_max_doc_condition.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/20_max_doc_condition.yml index f5d223259dc06..7d1b447b4e293 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/20_max_doc_condition.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/20_max_doc_condition.yml @@ -52,4 +52,3 @@ - match: { conditions: { "[max_docs: 2]": true } } - match: { rolled_over: true } - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/20_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/20_routing.yml index ecedcef0c1a48..c812d84dfe7e3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/20_routing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/20_routing.yml @@ -132,4 +132,3 @@ setup: index: test_index - match: {test_index.aliases.test_alias: {'index_routing': '5', 'search_routing': '5'}} - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.upgrade/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.upgrade/10_basic.yml index 55070cb8c1f97..62c1a51dace52 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.upgrade/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.upgrade/10_basic.yml @@ -69,4 +69,3 @@ indices.upgrade: index: ["test_index", "does_not_exist"] ignore_unavailable: false - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/info/20_lucene_version.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/info/20_lucene_version.yml index 83414fbabc565..427a585815db0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/info/20_lucene_version.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/info/20_lucene_version.yml @@ -2,6 +2,3 @@ "Lucene Version": - do: {info: {}} - is_true: version.lucene_version - - - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml index a09619b7255c3..1f1f42890355e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml @@ -224,4 +224,3 @@ - is_false: nodes.$node_id.indices.translog - is_false: nodes.$node_id.indices.recovery - is_true: nodes.$node_id.indices.segments.file_sizes - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/ping/10_ping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/ping/10_ping.yml index ec07c218dabd9..da160503caab4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/ping/10_ping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/ping/10_ping.yml @@ -2,4 +2,3 @@ "Ping": - do: { ping: {}} - is_true: '' - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml index 82965bda51576..c540814a1690d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml @@ -118,4 +118,3 @@ setup: 2: terms: field: date - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml index 339fe72b77730..c0a8d2fb4500c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml @@ -74,4 +74,3 @@ buckets_path: "the_avg" window: -1 script: "MovingFunctions.windowMax(values)" - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/350_variable_width_histogram.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/350_variable_width_histogram.yml index 071e543e8a25e..cc41ef1fa6cd3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/350_variable_width_histogram.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/350_variable_width_histogram.yml @@ -47,4 +47,3 @@ setup: - match: { aggregations.histo.buckets.1.doc_count: 1 } - match: { aggregations.histo.buckets.2.key: 4.5 } - match: { aggregations.histo.buckets.2.doc_count: 2 } - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml index 091638d6a07fb..4de2e8142f6ec 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml @@ -196,4 +196,3 @@ setup: # When this test is run during runtime-field's tests we *don't* get floating point errors. Thus the funny assertion here that matches both. - lt: { hits.hits.0.fields.d.0: 3.141 } - gte: { hits.hits.0.fields.d.0: 3.14 } - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml index 31f6f35003e2d..40e1fbcf7a2ab 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml @@ -154,4 +154,3 @@ setup: - match: { _shards.failed: 0 } - match: { hits.total: 2 } - length: { aggregations.idx_terms.buckets: 2 } - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml index 0286d3caf66b8..0b9172e0740ea 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml @@ -750,5 +750,3 @@ setup: - prefix: prefix: out - match: { hits.total.value: 3 } - - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml index feb875e81a785..1ddba45c97c72 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml @@ -164,4 +164,3 @@ setup: - match: { aggregations.date.buckets.1.key: 1540857600000 } - match: { aggregations.date.buckets.1.key_as_string: "2018-10-30T00:00:00.000Z" } - match: { aggregations.date.buckets.1.doc_count: 2 } - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml index 5f5d88dba7687..d7690ac6097ef 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml @@ -224,4 +224,3 @@ - match: {hits.hits.0._index: test } - match: {hits.hits.0._source.timestamp: "2019-10-21 00:30:04.828740" } - match: {hits.hits.0.sort: [1571617804828740000] } - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/20_doc_upsert.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/20_doc_upsert.yml index 4d03971aba252..cfdd38b9ffd1c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/20_doc_upsert.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/20_doc_upsert.yml @@ -32,5 +32,3 @@ - match: { _source.foo: bar } - match: { _source.count: 1 } - - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/22_doc_as_upsert.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/22_doc_as_upsert.yml index c65fc5af27fcc..7ee5c01089ff1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/22_doc_as_upsert.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/22_doc_as_upsert.yml @@ -32,5 +32,3 @@ - match: { _source.foo: bar } - match: { _source.count: 2 } - - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/35_if_seq_no.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/35_if_seq_no.yml index f982adf693ad0..c93be37be49f5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/35_if_seq_no.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/35_if_seq_no.yml @@ -61,4 +61,3 @@ - match: { errors: true } - match: { items.0.update.status: 409 } - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/40_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/40_routing.yml index 6f43d381e0537..28e42f9dafea9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/40_routing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/40_routing.yml @@ -55,4 +55,3 @@ doc: { foo: baz } - match: { get._source.foo: baz } - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/85_fields_meta.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/85_fields_meta.yml index fe76ab5299cda..2d4fda22f4442 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/85_fields_meta.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/85_fields_meta.yml @@ -27,5 +27,3 @@ id: 1 parent: 5 stored_fields: [ _routing ] - - diff --git a/sandbox/plugins/concurrent-search/build.gradle b/sandbox/plugins/concurrent-search/build.gradle index acc3cb5092cd8..0e766dc4fc1ba 100644 --- a/sandbox/plugins/concurrent-search/build.gradle +++ b/sandbox/plugins/concurrent-search/build.gradle @@ -39,4 +39,4 @@ opensearchplugin { } yamlRestTest.enabled = false; -testingConventions.enabled = false; \ No newline at end of file +testingConventions.enabled = false; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/package-info b/server/src/main/java/org/opensearch/search/aggregations/bucket/package-info index a2cb4a9493c56..52b88548dacdd 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/package-info +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/package-info @@ -21,4 +21,3 @@ * Aggregations module */ package org.opensearch.search.aggregations.bucket; - diff --git a/server/src/test/resources/config/opensearch.yml b/server/src/test/resources/config/opensearch.yml index b6ebc6bd10576..21f4f7b1b933a 100644 --- a/server/src/test/resources/config/opensearch.yml +++ b/server/src/test/resources/config/opensearch.yml @@ -1,3 +1,2 @@ yaml.config.exists: true - diff --git a/server/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/settings.yml b/server/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/settings.yml index 1a91653e56a1d..7e9eed5920f2e 100644 --- a/server/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/settings.yml +++ b/server/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/settings.yml @@ -1,2 +1,2 @@ ignore_case: true -strict_affix_parsing: true \ No newline at end of file +strict_affix_parsing: true diff --git a/server/src/test/resources/org/opensearch/action/admin/invalid.txt.keystore b/server/src/test/resources/org/opensearch/action/admin/invalid.txt.keystore index 04613ffab7f36..a18f3707ac0d9 100644 --- a/server/src/test/resources/org/opensearch/action/admin/invalid.txt.keystore +++ b/server/src/test/resources/org/opensearch/action/admin/invalid.txt.keystore @@ -1,3 +1,2 @@ admin admin dragon 12345 - diff --git a/server/src/test/resources/org/opensearch/action/fieldstats/fieldstats-index-constraints-request.json b/server/src/test/resources/org/opensearch/action/fieldstats/fieldstats-index-constraints-request.json index eb8ca972dcd52..8fbf7684b6819 100644 --- a/server/src/test/resources/org/opensearch/action/fieldstats/fieldstats-index-constraints-request.json +++ b/server/src/test/resources/org/opensearch/action/fieldstats/fieldstats-index-constraints-request.json @@ -40,4 +40,4 @@ } } } -} \ No newline at end of file +} diff --git a/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis.json b/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis.json index 33c09fe8dbd9f..53e5bbd9fa946 100644 --- a/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis.json +++ b/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis.json @@ -20,4 +20,4 @@ } } } -} \ No newline at end of file +} diff --git a/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis2.json b/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis2.json index a81ea538f19fe..19b4d24063b8e 100644 --- a/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis2.json +++ b/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis2.json @@ -12,4 +12,4 @@ } } } -} \ No newline at end of file +} diff --git a/server/src/test/resources/org/opensearch/index/analysis/stop.json b/server/src/test/resources/org/opensearch/index/analysis/stop.json index 717c9fdee5b08..c59b0fb5056d0 100644 --- a/server/src/test/resources/org/opensearch/index/analysis/stop.json +++ b/server/src/test/resources/org/opensearch/index/analysis/stop.json @@ -15,4 +15,4 @@ } } } -} \ No newline at end of file +} diff --git a/server/src/test/resources/org/opensearch/index/mapper/dynamictemplate/genericstore/test-data.json b/server/src/test/resources/org/opensearch/index/mapper/dynamictemplate/genericstore/test-data.json index b7439dcb9fe6f..14ebf16d92f36 100644 --- a/server/src/test/resources/org/opensearch/index/mapper/dynamictemplate/genericstore/test-data.json +++ b/server/src/test/resources/org/opensearch/index/mapper/dynamictemplate/genericstore/test-data.json @@ -1,4 +1,4 @@ { "name":"some name", "age":1 -} \ No newline at end of file +} diff --git a/server/src/test/resources/org/opensearch/index/mapper/dynamictemplate/pathmatch/test-data.json b/server/src/test/resources/org/opensearch/index/mapper/dynamictemplate/pathmatch/test-data.json index 2e6ec997c4612..765a850b0b663 100644 --- a/server/src/test/resources/org/opensearch/index/mapper/dynamictemplate/pathmatch/test-data.json +++ b/server/src/test/resources/org/opensearch/index/mapper/dynamictemplate/pathmatch/test-data.json @@ -11,4 +11,4 @@ "prop1":"prop1_value" } } -} \ No newline at end of file +} diff --git a/server/src/test/resources/org/opensearch/index/mapper/dynamictemplate/simple/test-data.json b/server/src/test/resources/org/opensearch/index/mapper/dynamictemplate/simple/test-data.json index 1ed3c50b98d59..8509d846c13bb 100644 --- a/server/src/test/resources/org/opensearch/index/mapper/dynamictemplate/simple/test-data.json +++ b/server/src/test/resources/org/opensearch/index/mapper/dynamictemplate/simple/test-data.json @@ -3,4 +3,4 @@ "age":1, "multi1":"multi 1", "multi2":"multi 2" -} \ No newline at end of file +} diff --git a/server/src/test/resources/org/opensearch/index/mapper/multifield/merge/test-data.json b/server/src/test/resources/org/opensearch/index/mapper/multifield/merge/test-data.json index c539fcc885d3c..ec6b1b95ed888 100644 --- a/server/src/test/resources/org/opensearch/index/mapper/multifield/merge/test-data.json +++ b/server/src/test/resources/org/opensearch/index/mapper/multifield/merge/test-data.json @@ -1,4 +1,4 @@ { _id:1, name:"some name" -} \ No newline at end of file +} diff --git a/server/src/test/resources/org/opensearch/index/mapper/multifield/test-data.json b/server/src/test/resources/org/opensearch/index/mapper/multifield/test-data.json index 2e8ab256df92a..fd7ecad71b894 100644 --- a/server/src/test/resources/org/opensearch/index/mapper/multifield/test-data.json +++ b/server/src/test/resources/org/opensearch/index/mapper/multifield/test-data.json @@ -4,4 +4,4 @@ "object1":{ "multi1":"2010-01-01" } -} \ No newline at end of file +} diff --git a/settings.gradle b/settings.gradle index cb8167ee02efe..183a5ec8d1ae1 100644 --- a/settings.gradle +++ b/settings.gradle @@ -136,4 +136,3 @@ if (extraProjects.exists()) { addSubProjects('', extraProjectDir) } } - diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/addprinc.sh b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/addprinc.sh index a58df6d47f36f..201c437f00b73 100755 --- a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/addprinc.sh +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/addprinc.sh @@ -72,4 +72,4 @@ echo "Copying conf to local" # make the configuration available externally cp -v $LOCALSTATEDIR/krb5.conf $BUILD_DIR/krb5.conf.template # We are running as root in the container, allow non root users running the container to be able to clean these up -chmod -R 777 $BUILD_DIR \ No newline at end of file +chmod -R 777 $BUILD_DIR diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template index ba0832b2b7d99..207fe939fb7a5 100644 --- a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template @@ -59,4 +59,3 @@ .${BUILD_ZONE} = ${REALM_NAME} ${OPENSEARCH_ZONE} = ${REALM_NAME} .${OPENSEARCH_ZONE} = ${REALM_NAME} - diff --git a/test/fixtures/minio-fixture/build.gradle b/test/fixtures/minio-fixture/build.gradle index ba5e0a7d2d814..61f417690b210 100644 --- a/test/fixtures/minio-fixture/build.gradle +++ b/test/fixtures/minio-fixture/build.gradle @@ -30,4 +30,3 @@ apply plugin: 'opensearch.test.fixtures' description = 'Fixture for MinIO Storage service' - diff --git a/test/framework/src/main/resources/mockito-extensions/org.mockito.plugins.MockMaker b/test/framework/src/main/resources/mockito-extensions/org.mockito.plugins.MockMaker index e1795b7b9b3d6..99b0d419fc445 100644 --- a/test/framework/src/main/resources/mockito-extensions/org.mockito.plugins.MockMaker +++ b/test/framework/src/main/resources/mockito-extensions/org.mockito.plugins.MockMaker @@ -1 +1 @@ -org.opensearch.mockito.plugin.PriviledgedMockMaker \ No newline at end of file +org.opensearch.mockito.plugin.PriviledgedMockMaker diff --git a/test/framework/src/test/resources/rest-api-spec/test/suite1/20_another_test.yml b/test/framework/src/test/resources/rest-api-spec/test/suite1/20_another_test.yml index 5e08112253ef0..0d27f91a83dd3 100644 --- a/test/framework/src/test/resources/rest-api-spec/test/suite1/20_another_test.yml +++ b/test/framework/src/test/resources/rest-api-spec/test/suite1/20_another_test.yml @@ -18,4 +18,3 @@ - match: { _type: test } - match: { _id: '1' } - match: { _source: { foo: "bar" } } - From c4b684d3ae0f56786f0b2c40b48d4329bd595a24 Mon Sep 17 00:00:00 2001 From: Poojita Raj Date: Wed, 13 Apr 2022 14:00:43 -0700 Subject: [PATCH 079/653] Add functionality to fast forward local processed checkpoints [segment replication] (#2576) (#2883) * fix local processed checkpoint update (#2576) Signed-off-by: Poojita Raj * separated tests + wrapper function Signed-off-by: Poojita Raj * moved tests + compareAndSet change Signed-off-by: Poojita Raj --- .../index/seqno/LocalCheckpointTracker.java | 28 +++++++++- .../seqno/LocalCheckpointTrackerTests.java | 56 +++++++++++++++++++ 2 files changed, 83 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java b/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java index 7aab597f8816c..8e2d81d0fe711 100644 --- a/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java @@ -33,6 +33,7 @@ package org.opensearch.index.seqno; import com.carrotsearch.hppc.LongObjectHashMap; +import org.opensearch.common.Nullable; import org.opensearch.common.SuppressForbidden; import java.util.concurrent.atomic.AtomicLong; @@ -116,6 +117,13 @@ public void advanceMaxSeqNo(final long seqNo) { nextSeqNo.accumulateAndGet(seqNo + 1, Math::max); } + /** + * Checks that the sequence number is in an acceptable range for an update to take place. + */ + private boolean shouldUpdateSeqNo(final long seqNo, final long lowerBound, @Nullable final AtomicLong upperBound) { + return !((seqNo <= lowerBound) || (upperBound != null && seqNo > upperBound.get())); + } + /** * Marks the provided sequence number as processed and updates the processed checkpoint if possible. * @@ -134,11 +142,29 @@ public synchronized void markSeqNoAsPersisted(final long seqNo) { markSeqNo(seqNo, persistedCheckpoint, persistedSeqNo); } + /** + * Updates the processed sequence checkpoint to the given value. + * + * This method is only used for segment replication since indexing doesn't + * take place on the replica allowing us to avoid the check that all sequence numbers + * are consecutively processed. + * + * @param seqNo the sequence number to mark as processed + */ + public synchronized void fastForwardProcessedSeqNo(final long seqNo) { + advanceMaxSeqNo(seqNo); + final long currentProcessedCheckpoint = processedCheckpoint.get(); + if (shouldUpdateSeqNo(seqNo, currentProcessedCheckpoint, persistedCheckpoint) == false) { + return; + } + processedCheckpoint.compareAndSet(currentProcessedCheckpoint, seqNo); + } + private void markSeqNo(final long seqNo, final AtomicLong checkPoint, final LongObjectHashMap bitSetMap) { assert Thread.holdsLock(this); // make sure we track highest seen sequence number advanceMaxSeqNo(seqNo); - if (seqNo <= checkPoint.get()) { + if (shouldUpdateSeqNo(seqNo, checkPoint.get(), null) == false) { // this is possible during recovery where we might replay an operation that was also replicated return; } diff --git a/server/src/test/java/org/opensearch/index/seqno/LocalCheckpointTrackerTests.java b/server/src/test/java/org/opensearch/index/seqno/LocalCheckpointTrackerTests.java index bcb178e05065c..237066e549b09 100644 --- a/server/src/test/java/org/opensearch/index/seqno/LocalCheckpointTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/LocalCheckpointTrackerTests.java @@ -331,4 +331,60 @@ public void testContains() { final long seqNo = randomNonNegativeLong(); assertThat(tracker.hasProcessed(seqNo), equalTo(seqNo <= localCheckpoint || seqNos.contains(seqNo))); } + + public void testFastForwardProcessedNoPersistentUpdate() { + // base case with no persistent checkpoint update + long seqNo1; + assertThat(tracker.getProcessedCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); + seqNo1 = tracker.generateSeqNo(); + assertThat(seqNo1, equalTo(0L)); + tracker.fastForwardProcessedSeqNo(seqNo1); + assertThat(tracker.getProcessedCheckpoint(), equalTo(-1L)); + } + + public void testFastForwardProcessedPersistentUpdate() { + // base case with persistent checkpoint update + long seqNo1; + assertThat(tracker.getProcessedCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); + seqNo1 = tracker.generateSeqNo(); + assertThat(seqNo1, equalTo(0L)); + + tracker.markSeqNoAsPersisted(seqNo1); + assertThat(tracker.getPersistedCheckpoint(), equalTo(0L)); + tracker.fastForwardProcessedSeqNo(seqNo1); + assertThat(tracker.getProcessedCheckpoint(), equalTo(0L)); + assertThat(tracker.hasProcessed(0L), equalTo(true)); + assertThat(tracker.hasProcessed(atLeast(1)), equalTo(false)); + + // idempotent case + tracker.fastForwardProcessedSeqNo(seqNo1); + assertThat(tracker.getProcessedCheckpoint(), equalTo(0L)); + assertThat(tracker.hasProcessed(0L), equalTo(true)); + assertThat(tracker.hasProcessed(atLeast(1)), equalTo(false)); + + } + + public void testFastForwardProcessedPersistentUpdate2() { + long seqNo1, seqNo2; + assertThat(tracker.getProcessedCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); + seqNo1 = tracker.generateSeqNo(); + seqNo2 = tracker.generateSeqNo(); + assertThat(seqNo1, equalTo(0L)); + assertThat(seqNo2, equalTo(1L)); + tracker.markSeqNoAsPersisted(seqNo1); + tracker.markSeqNoAsPersisted(seqNo2); + assertThat(tracker.getProcessedCheckpoint(), equalTo(-1L)); + assertThat(tracker.getPersistedCheckpoint(), equalTo(1L)); + + tracker.fastForwardProcessedSeqNo(seqNo2); + assertThat(tracker.getProcessedCheckpoint(), equalTo(1L)); + assertThat(tracker.hasProcessed(seqNo1), equalTo(true)); + assertThat(tracker.hasProcessed(seqNo2), equalTo(true)); + + tracker.fastForwardProcessedSeqNo(seqNo1); + assertThat(tracker.getProcessedCheckpoint(), equalTo(1L)); + assertThat(tracker.hasProcessed(between(0, 1)), equalTo(true)); + assertThat(tracker.hasProcessed(atLeast(2)), equalTo(false)); + assertThat(tracker.getMaxSeqNo(), equalTo(1L)); + } } From 452e368bdeb623f4d6b72631069a4fe5264177c6 Mon Sep 17 00:00:00 2001 From: Vacha Shah Date: Thu, 14 Apr 2022 11:07:22 -0700 Subject: [PATCH 080/653] Adding a null pointer check to fix index_prefix query (#2879) * Adding a null pointer check to fix index_prefix query Signed-off-by: Vacha Shah * Adding test Signed-off-by: Vacha Shah --- .../org/opensearch/index/mapper/TextFieldMapper.java | 4 +++- .../opensearch/index/mapper/TextFieldTypeTests.java | 11 +++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java index 049c85dc910ed..4b2c20586834d 100644 --- a/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java @@ -583,7 +583,9 @@ public Query prefixQuery(String value, MultiTermQuery.RewriteMethod method, bool } Automaton automaton = Operations.concatenate(automata); AutomatonQuery query = new AutomatonQuery(new Term(name(), value + "*"), automaton); - query.setRewriteMethod(method); + if (method != null) { + query.setRewriteMethod(method); + } return new BooleanQuery.Builder().add(query, BooleanClause.Occur.SHOULD) .add(new TermQuery(new Term(parentField.name(), value)), BooleanClause.Occur.SHOULD) .build(); diff --git a/server/src/test/java/org/opensearch/index/mapper/TextFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/TextFieldTypeTests.java index 18b04c1be5668..b9ec5a07b207d 100644 --- a/server/src/test/java/org/opensearch/index/mapper/TextFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/TextFieldTypeTests.java @@ -190,6 +190,17 @@ public void testIndexPrefixes() { ); assertThat(q, equalTo(expected)); + + q = ft.prefixQuery("g", null, false, randomMockShardContext()); + automaton = Operations.concatenate(Arrays.asList(Automata.makeChar('g'), Automata.makeAnyChar())); + + expected = new ConstantScoreQuery( + new BooleanQuery.Builder().add(new AutomatonQuery(new Term("field._index_prefix", "g*"), automaton), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term("field", "g")), BooleanClause.Occur.SHOULD) + .build() + ); + + assertThat(q, equalTo(expected)); } public void testFetchSourceValue() throws IOException { From d39c18f7fe77d886a00672613ce50119821030d1 Mon Sep 17 00:00:00 2001 From: Ankit Jain Date: Fri, 15 Apr 2022 05:06:01 +0530 Subject: [PATCH 081/653] Excluding system indices from max shard limit validator (#2894) * Excluding system indices from max shard limit validator Signed-off-by: Ankit Jain * Fixing spotless check violations Signed-off-by: Ankit Jain * Fixing NPE due to null isHidden Signed-off-by: Ankit Jain * Adding unit tests for shard opening scenario Signed-off-by: Ankit Jain * Addressing review comments Signed-off-by: Ankit Jain --- .../metadata/MetadataCreateIndexService.java | 17 +-- .../indices/ShardLimitValidator.java | 14 ++- .../org/opensearch/indices/SystemIndices.java | 25 +++++ .../main/java/org/opensearch/node/Node.java | 2 +- .../opensearch/snapshots/RestoreService.java | 6 +- .../MetadataRolloverServiceTests.java | 10 +- .../indices/ShardLimitValidatorTests.java | 106 +++++++++++++++++- .../indices/cluster/ClusterStateChanges.java | 5 +- .../snapshots/SnapshotResiliencyTests.java | 5 +- 9 files changed, 160 insertions(+), 30 deletions(-) diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 64198dce89cef..7f2be879f3637 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -88,7 +88,6 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.InvalidIndexNameException; import org.opensearch.indices.ShardLimitValidator; -import org.opensearch.indices.SystemIndexDescriptor; import org.opensearch.indices.SystemIndices; import org.opensearch.threadpool.ThreadPool; @@ -214,17 +213,9 @@ public void validateIndexName(String index, ClusterState state) { * @param isHidden Whether or not this is a hidden index */ public boolean validateDotIndex(String index, @Nullable Boolean isHidden) { - boolean isSystem = false; if (index.charAt(0) == '.') { - SystemIndexDescriptor matchingDescriptor = systemIndices.findMatchingDescriptor(index); - if (matchingDescriptor != null) { - logger.trace( - "index [{}] is a system index because it matches index pattern [{}] with description [{}]", - index, - matchingDescriptor.getIndexPattern(), - matchingDescriptor.getDescription() - ); - isSystem = true; + if (systemIndices.validateSystemIndex(index)) { + return true; } else if (isHidden) { logger.trace("index [{}] is a hidden index", index); } else { @@ -237,7 +228,7 @@ public boolean validateDotIndex(String index, @Nullable Boolean isHidden) { } } - return isSystem; + return false; } /** @@ -884,7 +875,7 @@ static Settings aggregateIndexSettings( * We can not validate settings until we have applied templates, otherwise we do not know the actual settings * that will be used to create this index. */ - shardLimitValidator.validateShardLimit(indexSettings, currentState); + shardLimitValidator.validateShardLimit(request.index(), indexSettings, currentState); if (IndexSettings.INDEX_SOFT_DELETES_SETTING.get(indexSettings) == false && IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(indexSettings).onOrAfter(Version.V_2_0_0)) { throw new IllegalArgumentException( diff --git a/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java b/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java index 3ed0dbee59e71..7e4376e8ea8de 100644 --- a/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java +++ b/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java @@ -63,10 +63,12 @@ public class ShardLimitValidator { Setting.Property.NodeScope ); protected final AtomicInteger shardLimitPerNode = new AtomicInteger(); + private final SystemIndices systemIndices; - public ShardLimitValidator(final Settings settings, ClusterService clusterService) { + public ShardLimitValidator(final Settings settings, ClusterService clusterService, SystemIndices systemIndices) { this.shardLimitPerNode.set(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.get(settings)); clusterService.getClusterSettings().addSettingsUpdateConsumer(SETTING_CLUSTER_MAX_SHARDS_PER_NODE, this::setShardLimitPerNode); + this.systemIndices = systemIndices; } private void setShardLimitPerNode(int newValue) { @@ -84,11 +86,17 @@ public int getShardLimitPerNode() { /** * Checks whether an index can be created without going over the cluster shard limit. * + * @param indexName the name of the index being created * @param settings the settings of the index to be created * @param state the current cluster state * @throws ValidationException if creating this index would put the cluster over the cluster shard limit */ - public void validateShardLimit(final Settings settings, final ClusterState state) { + public void validateShardLimit(final String indexName, final Settings settings, final ClusterState state) { + // Validate shard limit only for non system indices as it is not hard limit anyways + if (systemIndices.validateSystemIndex(indexName)) { + return; + } + final int numberOfShards = INDEX_NUMBER_OF_SHARDS_SETTING.get(settings); final int numberOfReplicas = IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(settings); final int shardsToCreate = numberOfShards * (1 + numberOfReplicas); @@ -111,6 +119,8 @@ public void validateShardLimit(final Settings settings, final ClusterState state */ public void validateShardLimit(ClusterState currentState, Index[] indicesToOpen) { int shardsToOpen = Arrays.stream(indicesToOpen) + // Validate shard limit only for non system indices as it is not hard limit anyways + .filter(index -> !systemIndices.validateSystemIndex(index.getName())) .filter(index -> currentState.metadata().index(index).getState().equals(IndexMetadata.State.CLOSE)) .mapToInt(index -> getTotalShardCount(currentState, index)) .sum(); diff --git a/server/src/main/java/org/opensearch/indices/SystemIndices.java b/server/src/main/java/org/opensearch/indices/SystemIndices.java index fc34645b4326f..042291554670c 100644 --- a/server/src/main/java/org/opensearch/indices/SystemIndices.java +++ b/server/src/main/java/org/opensearch/indices/SystemIndices.java @@ -32,6 +32,8 @@ package org.opensearch.indices; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.automaton.Automata; import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.CharacterRunAutomaton; @@ -63,6 +65,8 @@ * to reduce the locations within the code that need to deal with {@link SystemIndexDescriptor}s. */ public class SystemIndices { + private static final Logger logger = LogManager.getLogger(SystemIndices.class); + private static final Map> SERVER_SYSTEM_INDEX_DESCRIPTORS = singletonMap( TaskResultsService.class.getName(), singletonList(new SystemIndexDescriptor(TASK_INDEX + "*", "Task Result Index")) @@ -135,6 +139,27 @@ public boolean isSystemIndex(String indexName) { } } + /** + * Validates (if this index has a dot-prefixed name) and it is system index. + * @param index The name of the index in question + */ + public boolean validateSystemIndex(String index) { + if (index.charAt(0) == '.') { + SystemIndexDescriptor matchingDescriptor = findMatchingDescriptor(index); + if (matchingDescriptor != null) { + logger.trace( + "index [{}] is a system index because it matches index pattern [{}] with description [{}]", + index, + matchingDescriptor.getIndexPattern(), + matchingDescriptor.getDescription() + ); + return true; + } + } + + return false; + } + private static CharacterRunAutomaton buildCharacterRunAutomaton(Collection descriptors) { Optional automaton = descriptors.stream() .map(descriptor -> Regex.simpleMatchToAutomaton(descriptor.getIndexPattern())) diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 8ede6fdf76653..46400e5c8d269 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -635,7 +635,7 @@ protected Node( final AliasValidator aliasValidator = new AliasValidator(); - final ShardLimitValidator shardLimitValidator = new ShardLimitValidator(settings, clusterService); + final ShardLimitValidator shardLimitValidator = new ShardLimitValidator(settings, clusterService, systemIndices); final MetadataCreateIndexService metadataCreateIndexService = new MetadataCreateIndexService( settings, clusterService, diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index ad5cfe6e443ff..e1b143b5f5274 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -384,7 +384,11 @@ public ClusterState execute(ClusterState currentState) { .put(snapshotIndexMetadata.getSettings()) .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) ); - shardLimitValidator.validateShardLimit(snapshotIndexMetadata.getSettings(), currentState); + shardLimitValidator.validateShardLimit( + renamedIndexName, + snapshotIndexMetadata.getSettings(), + currentState + ); if (!request.includeAliases() && !snapshotIndexMetadata.getAliases().isEmpty()) { // Remove all aliases - they shouldn't be restored indexMdBuilder.removeAllAliases(); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java b/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java index fd052308ed87b..afe35538adaf5 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java @@ -603,7 +603,8 @@ public void testRolloverClusterState() throws Exception { IndexNameExpressionResolver mockIndexNameExpressionResolver = mock(IndexNameExpressionResolver.class); when(mockIndexNameExpressionResolver.resolveDateMathExpression(any())).then(returnsFirstArg()); - ShardLimitValidator shardLimitValidator = new ShardLimitValidator(Settings.EMPTY, clusterService); + final SystemIndices systemIndices = new SystemIndices(emptyMap()); + ShardLimitValidator shardLimitValidator = new ShardLimitValidator(Settings.EMPTY, clusterService, systemIndices); MetadataCreateIndexService createIndexService = new MetadataCreateIndexService( Settings.EMPTY, clusterService, @@ -615,7 +616,7 @@ public void testRolloverClusterState() throws Exception { IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, testThreadPool, null, - new SystemIndices(emptyMap()), + systemIndices, false ); MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService( @@ -739,7 +740,8 @@ public void testRolloverClusterStateForDataStream() throws Exception { IndexNameExpressionResolver mockIndexNameExpressionResolver = mock(IndexNameExpressionResolver.class); when(mockIndexNameExpressionResolver.resolveDateMathExpression(any())).then(returnsFirstArg()); - ShardLimitValidator shardLimitValidator = new ShardLimitValidator(Settings.EMPTY, clusterService); + final SystemIndices systemIndices = new SystemIndices(emptyMap()); + ShardLimitValidator shardLimitValidator = new ShardLimitValidator(Settings.EMPTY, clusterService, systemIndices); MetadataCreateIndexService createIndexService = new MetadataCreateIndexService( Settings.EMPTY, clusterService, @@ -751,7 +753,7 @@ public void testRolloverClusterStateForDataStream() throws Exception { IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, testThreadPool, null, - new SystemIndices(emptyMap()), + systemIndices, false ); MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService( diff --git a/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java b/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java index 7e9c971cae1e8..a61ca13df0215 100644 --- a/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java +++ b/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java @@ -52,6 +52,8 @@ import java.util.Optional; import java.util.stream.Collectors; +import static java.util.Collections.emptyMap; +import static org.opensearch.cluster.metadata.IndexMetadata.*; import static org.opensearch.cluster.metadata.MetadataIndexStateServiceTests.addClosedIndex; import static org.opensearch.cluster.metadata.MetadataIndexStateServiceTests.addOpenedIndex; import static org.opensearch.cluster.shards.ShardCounts.forDataNodeCount; @@ -104,7 +106,54 @@ public void testUnderShardLimit() { assertFalse(errorMessage.isPresent()); } - public void testValidateShardLimit() { + /** + * This test validates that system index creation succeeds + * even though it exceeds the cluster max shard limit + */ + public void testSystemIndexCreationSucceeds() { + final ShardLimitValidator shardLimitValidator = createTestShardLimitService(1); + final Settings settings = Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + final ClusterState state = createClusterForShardLimitTest(1, 1, 0); + shardLimitValidator.validateShardLimit(".tasks", settings, state); + } + + /** + * This test validates that non-system index creation + * fails when it exceeds the cluster max shard limit + */ + public void testNonSystemIndexCreationFails() { + final ShardLimitValidator shardLimitValidator = createTestShardLimitService(1); + final Settings settings = Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + final ClusterState state = createClusterForShardLimitTest(1, 1, 0); + final ValidationException exception = expectThrows( + ValidationException.class, + () -> shardLimitValidator.validateShardLimit("abc", settings, state) + ); + assertEquals( + "Validation Failed: 1: this action would add [" + + 2 + + "] total shards, but this cluster currently has [" + + 1 + + "]/[" + + 1 + + "] maximum shards open;", + exception.getMessage() + ); + } + + /** + * This test validates that non-system index opening + * fails when it exceeds the cluster max shard limit + */ + public void testNonSystemIndexOpeningFails() { int nodesInCluster = randomIntBetween(2, 90); ShardCounts counts = forDataNodeCount(nodesInCluster); ClusterState state = createClusterForShardLimitTest( @@ -140,6 +189,33 @@ public void testValidateShardLimit() { ); } + /** + * This test validates that system index opening succeeds + * even when it exceeds the cluster max shard limit + */ + public void testSystemIndexOpeningSucceeds() { + int nodesInCluster = randomIntBetween(2, 90); + ShardCounts counts = forDataNodeCount(nodesInCluster); + ClusterState state = createClusterForShardLimitTest( + nodesInCluster, + randomAlphaOfLengthBetween(5, 15), + counts.getFirstIndexShards(), + counts.getFirstIndexReplicas(), + ".tasks", // Adding closed system index to cluster state + counts.getFailingIndexShards(), + counts.getFailingIndexReplicas() + ); + + Index[] indices = Arrays.stream(state.metadata().indices().values().toArray(IndexMetadata.class)) + .map(IndexMetadata::getIndex) + .collect(Collectors.toList()) + .toArray(new Index[2]); + + // Shard limit validation succeeds without any issues as system index is being opened + ShardLimitValidator shardLimitValidator = createTestShardLimitService(counts.getShardsPerNode()); + shardLimitValidator.validateShardLimit(state, indices); + } + public static ClusterState createClusterForShardLimitTest(int nodesInCluster, int shardsInIndex, int replicas) { ImmutableOpenMap.Builder dataNodes = ImmutableOpenMap.builder(); for (int i = 0; i < nodesInCluster; i++) { @@ -165,8 +241,10 @@ public static ClusterState createClusterForShardLimitTest(int nodesInCluster, in public static ClusterState createClusterForShardLimitTest( int nodesInCluster, + String openIndexName, int openIndexShards, int openIndexReplicas, + String closeIndexName, int closedIndexShards, int closedIndexReplicas ) { @@ -178,8 +256,8 @@ public static ClusterState createClusterForShardLimitTest( when(nodes.getDataNodes()).thenReturn(dataNodes.build()); ClusterState state = ClusterState.builder(ClusterName.DEFAULT).build(); - state = addOpenedIndex(randomAlphaOfLengthBetween(5, 15), openIndexShards, openIndexReplicas, state); - state = addClosedIndex(randomAlphaOfLengthBetween(5, 15), closedIndexShards, closedIndexReplicas, state); + state = addOpenedIndex(openIndexName, openIndexShards, openIndexReplicas, state); + state = addClosedIndex(closeIndexName, closedIndexShards, closedIndexReplicas, state); final Metadata.Builder metadata = Metadata.builder(state.metadata()); if (randomBoolean()) { @@ -190,6 +268,24 @@ public static ClusterState createClusterForShardLimitTest( return ClusterState.builder(state).metadata(metadata).nodes(nodes).build(); } + public static ClusterState createClusterForShardLimitTest( + int nodesInCluster, + int openIndexShards, + int openIndexReplicas, + int closedIndexShards, + int closedIndexReplicas + ) { + return createClusterForShardLimitTest( + nodesInCluster, + randomAlphaOfLengthBetween(5, 15), + openIndexShards, + openIndexReplicas, + randomAlphaOfLengthBetween(5, 15), + closedIndexShards, + closedIndexReplicas + ); + } + /** * Creates a {@link ShardLimitValidator} for testing with the given setting and a mocked cluster service. * @@ -204,7 +300,7 @@ public static ShardLimitValidator createTestShardLimitService(int maxShardsPerNo new ClusterSettings(limitOnlySettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) ); - return new ShardLimitValidator(limitOnlySettings, clusterService); + return new ShardLimitValidator(limitOnlySettings, clusterService, new SystemIndices(emptyMap())); } /** @@ -217,6 +313,6 @@ public static ShardLimitValidator createTestShardLimitService(int maxShardsPerNo public static ShardLimitValidator createTestShardLimitService(int maxShardsPerNode, ClusterService clusterService) { Settings limitOnlySettings = Settings.builder().put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), maxShardsPerNode).build(); - return new ShardLimitValidator(limitOnlySettings, clusterService); + return new ShardLimitValidator(limitOnlySettings, clusterService, new SystemIndices(emptyMap())); } } diff --git a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java index 99ec043cc7801..a7d9ba0bf3d4b 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java @@ -259,7 +259,8 @@ public IndexMetadata upgradeIndexMetadata(IndexMetadata indexMetadata, Version m null, actionFilters ); - ShardLimitValidator shardLimitValidator = new ShardLimitValidator(SETTINGS, clusterService); + final SystemIndices systemIndices = new SystemIndices(emptyMap()); + ShardLimitValidator shardLimitValidator = new ShardLimitValidator(SETTINGS, clusterService, systemIndices); MetadataIndexStateService indexStateService = new MetadataIndexStateService( clusterService, allocationService, @@ -290,7 +291,7 @@ public IndexMetadata upgradeIndexMetadata(IndexMetadata indexMetadata, Version m IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, threadPool, xContentRegistry, - new SystemIndices(emptyMap()), + systemIndices, true ); diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 26e19e532b6bc..a896aab0f70c9 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -1863,7 +1863,8 @@ public void onFailure(final Exception e) { RetentionLeaseSyncer.EMPTY ); Map actions = new HashMap<>(); - final ShardLimitValidator shardLimitValidator = new ShardLimitValidator(settings, clusterService); + final SystemIndices systemIndices = new SystemIndices(emptyMap()); + final ShardLimitValidator shardLimitValidator = new ShardLimitValidator(settings, clusterService, systemIndices); final MetadataCreateIndexService metadataCreateIndexService = new MetadataCreateIndexService( settings, clusterService, @@ -1875,7 +1876,7 @@ public void onFailure(final Exception e) { indexScopedSettings, threadPool, namedXContentRegistry, - new SystemIndices(emptyMap()), + systemIndices, false ); actions.put( From a6b4967dcf39b7ff3d47aff0a75314becbce72f4 Mon Sep 17 00:00:00 2001 From: "Daniel Doubrovkine (dB.)" Date: Fri, 15 Apr 2022 11:10:42 -0400 Subject: [PATCH 082/653] Adding @reta to OpenSearch maintainers. (#2905) * Adding @reta to OpenSearch maintainers. Signed-off-by: dblock * Update Andrew Ross github id Signed-off-by: Nicholas Walter Knize Co-authored-by: Nick Knize --- MAINTAINERS.md | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/MAINTAINERS.md b/MAINTAINERS.md index db6cd6c0f3309..7aa77cad0e713 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -3,27 +3,27 @@ | Maintainer | GitHub ID | Affiliation | | --------------- | --------- | ----------- | | Abbas Hussain | [abbashus](https://github.com/abbashus) | Amazon | +| Anas Alkouz | [anasalkouz](https://github.com/anasalkouz) | Amazon | +| Andrew Ross | [andrross](https://github.com/andrross)| Amazon | +| Andriy Redko | [reta](https://github.com/reta) | Aiven | | Charlotte Henkle | [CEHENKLE](https://github.com/CEHENKLE) | Amazon | +| Daniel "dB." Doubrovkine | [dblock](https://github.com/dblock) | Amazon | +| Gopala Krishna Ambareesh | [krishna-ggk](https://github.com/krishna-ggk) |Amazon | | Himanshu Setia | [setiah](https://github.com/setiah) | Amazon | +| Itiyama Sadana | [itiyamas](https://github.com/itiyamas) | Amazon | +| Kartik Ganesh | [kartg](https://github.com/kartg) | Amazon | +| Marc Handalian | [mch2](https://github.com/mch2) | Amazon | +| Megha Sai Kavikondala | [meghasaik](https://github.com/meghasaik) | Amazon | | Nick Knize | [nknize](https://github.com/nknize) | Amazon | +| Owais Kazi | [owaiskazi19](https://github.com/owaiskazi19) | Amazon | | Rabi Panda | [adnapibar](https://github.com/adnapibar) | Amazon | +| Rishikesh Pasham | [Rishikesh1159](https://github.com/Rishikesh1159) | Amazon| +| Ryan Bogan | [ryanbogan](https://github.com/ryanbogan) | Amazon | | Sarat Vemulapalli | [saratvemulapalli](https://github.com/saratvemulapalli) | Amazon | -| Tianli Feng | [tlfeng](https://github.com/tlfeng) | Amazon | -| Gopala Krishna Ambareesh | [krishna-ggk](https://github.com/krishna-ggk) |Amazon | -| Vengadanathan Srinivasan | [vengadanathan-s](https://github.com/vengadanathan-s) | Amazon | | Shweta Thareja |[shwetathareja](https://github.com/shwetathareja) | Amazon | -| Itiyama Sadana | [itiyamas](https://github.com/itiyamas) | Amazon | -| Daniel "dB." Doubrovkine | [dblock](https://github.com/dblock) | Amazon | -| Andrew Ross | [andross](https://github.com/andrross)| Amazon | +| Tianli Feng | [tlfeng](https://github.com/tlfeng) | Amazon | | Vacha Shah | [VachaShah](https://github.com/VachaShah) | Amazon | -| Anas Alkouz | [anasalkouz](https://github.com/anasalkouz) | Amazon | -| Megha Sai Kavikondala | [meghasaik](https://github.com/meghasaik) | Amazon | -| Rishikesh Pasham | [Rishikesh1159](https://github.com/Rishikesh1159) | Amazon| +| Vengadanathan Srinivasan | [vengadanathan-s](https://github.com/vengadanathan-s) | Amazon | | Xue Zhou | [xuezhou25](https://github.com/xuezhou25) | Amazon | -| Kartik Ganesh | [kartg](https://github.com/kartg) | Amazon | -| Marc Handalian | [mch2](https://github.com/mch2) | Amazon | -| Ryan Bogan | [ryanbogan](https://github.com/ryanbogan) | Amazon | -| Owais Kazi | [owaiskazi19](https://github.com/owaiskazi19) | Amazon | - [This document](https://github.com/opensearch-project/.github/blob/main/MAINTAINERS.md) explains what maintainers do in this repo, and how they should be doing it. If you're interested in contributing, see [CONTRIBUTING](CONTRIBUTING.md). From ca9151fa24222ff33ca0cc412b5c1ba8b9c65158 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Fri, 15 Apr 2022 13:09:33 -0700 Subject: [PATCH 083/653] Add Github Workflow to build and publish lucene snapshots. (#2906) This change introduces a github workflow so that we can build and push snapshots of lucene. The RepositoriesSetupPlugin is also updated with a url from where these snapshots can be retrieved. Signed-off-by: Marc Handalian --- .github/workflows/lucene-snapshots.yml | 55 +++++++++++++++++++ DEVELOPER_GUIDE.md | 6 ++ .../gradle/RepositoriesSetupPlugin.java | 3 +- 3 files changed, 62 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/lucene-snapshots.yml diff --git a/.github/workflows/lucene-snapshots.yml b/.github/workflows/lucene-snapshots.yml new file mode 100644 index 0000000000000..0fe025ad1aa16 --- /dev/null +++ b/.github/workflows/lucene-snapshots.yml @@ -0,0 +1,55 @@ +# This workflow will check out, build, and publish snapshots of lucene. + +name: OpenSearch Lucene snapshots + +on: + workflow_dispatch: + # Inputs the workflow accepts. + inputs: + ref: + description: + required: false + default: 'main' + +jobs: + publish-snapshots: + runs-on: ubuntu-latest + # These permissions are needed to interact with GitHub's OIDC Token endpoint. + permissions: + id-token: write + contents: read + + steps: + - uses: actions/checkout@v2 + - name: Set up JDK 17 + uses: actions/setup-java@v2 + with: + java-version: '17' + distribution: 'adopt' + + - name: Checkout Lucene + uses: actions/checkout@v2 + with: + repository: 'apache/lucene' + path: lucene + ref: ${{ github.event.inputs.ref }} + + - name: Set hash + working-directory: ./lucene + run: | + echo "::set-output name=REVISION::$(git rev-parse --short HEAD)" + id: version + + - name: Publish Lucene to local maven repo. + working-directory: ./lucene + run: ./gradlew publishJarsPublicationToMavenLocal -Pversion.suffix=snapshot-${{ steps.version.outputs.REVISION }} + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ secrets.LUCENE_SNAPSHOTS_ROLE }} + aws-region: us-west-2 + + - name: Copy files to S3 with the aws CLI. + run: | + aws s3 cp ~/.m2/repository/org/apache/lucene/ s3://${{ secrets.LUCENE_SNAPSHOTS_BUCKET }}/snapshots/lucene/org/apache/lucene/ --recursive --no-progress diff --git a/DEVELOPER_GUIDE.md b/DEVELOPER_GUIDE.md index 11fcb324c8cae..70abfda767353 100644 --- a/DEVELOPER_GUIDE.md +++ b/DEVELOPER_GUIDE.md @@ -49,6 +49,7 @@ - [Submitting Changes](#submitting-changes) - [Backports](#backports) - [LineLint](#linelint) + - [Lucene Snapshots](#lucene-snapshots) # Developer Guide @@ -488,3 +489,8 @@ Executing the binary will automatically search the local directory tree for lint Pass a list of files or directories to limit your search. linelint README.md LICENSE + +# Lucene Snapshots +The Github workflow in [lucene-snapshots.yml](.github/workflows/lucene-snapshots.yml) is a Github worfklow executable by maintainers to build a top-down snapshot build of lucene. +These snapshots are available to test compatibility with upcoming changes to Lucene by updating the version at [version.properties](buildsrc/version.properties) with the `version-snapshot-sha` version. +Example: `lucene = 10.0.0-snapshot-2e941fc`. diff --git a/buildSrc/src/main/java/org/opensearch/gradle/RepositoriesSetupPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/RepositoriesSetupPlugin.java index 30847f0648c5c..63b88f671c84c 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/RepositoriesSetupPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/RepositoriesSetupPlugin.java @@ -92,10 +92,9 @@ public static void configureRepositories(Project project) { throw new GradleException("Malformed lucene snapshot version: " + luceneVersion); } String revision = matcher.group(1); - // TODO(cleanup) - Setup own lucene snapshot repo MavenArtifactRepository luceneRepo = repos.maven(repo -> { repo.setName("lucene-snapshots"); - repo.setUrl("https://artifacts.opensearch.org/snapshots/lucene/"); + repo.setUrl("https://d1nvenhzbhpy0q.cloudfront.net/snapshots/lucene/"); }); repos.exclusiveContent(exclusiveRepo -> { exclusiveRepo.filter( From 1eda2bbe3ad5d60d10c60d5e4fd1e8711198b4c4 Mon Sep 17 00:00:00 2001 From: Sarat Vemulapalli Date: Fri, 15 Apr 2022 16:07:10 -0700 Subject: [PATCH 084/653] Fixing Scaled float field mapper to respect ignoreMalformed setting (#2918) * Fixing Scaled float field mapper to respect ignoreMalformed setting Signed-off-by: Sarat Vemulapalli * Adding unit tests Signed-off-by: Sarat Vemulapalli --- .../org/opensearch/index/mapper/ScaledFloatFieldMapper.java | 3 ++- .../opensearch/index/mapper/ScaledFloatFieldMapperTests.java | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/ScaledFloatFieldMapper.java index 78a9e389eb63f..73ce1cf96d7d0 100644 --- a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/ScaledFloatFieldMapper.java @@ -32,6 +32,7 @@ package org.opensearch.index.mapper; +import com.fasterxml.jackson.core.JsonParseException; import org.apache.lucene.document.Field; import org.apache.lucene.index.DocValues; import org.apache.lucene.index.LeafReaderContext; @@ -392,7 +393,7 @@ protected void parseCreateField(ParseContext context) throws IOException { } else { try { numericValue = parse(parser, coerce.value()); - } catch (IllegalArgumentException e) { + } catch (IllegalArgumentException | JsonParseException e) { if (ignoreMalformed.value()) { return; } else { diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldMapperTests.java index 3de322b286183..e19f9dd7988e1 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldMapperTests.java @@ -229,6 +229,7 @@ public void testCoerce() throws Exception { public void testIgnoreMalformed() throws Exception { doTestIgnoreMalformed("a", "For input string: \"a\""); + doTestIgnoreMalformed(true, "Current token (VALUE_TRUE) not numeric"); List values = Arrays.asList("NaN", "Infinity", "-Infinity"); for (String value : values) { @@ -236,7 +237,7 @@ public void testIgnoreMalformed() throws Exception { } } - private void doTestIgnoreMalformed(String value, String exceptionMessageContains) throws Exception { + private void doTestIgnoreMalformed(Object value, String exceptionMessageContains) throws Exception { DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); ThrowingRunnable runnable = () -> mapper.parse( new SourceToParse( From eba19351786944f6d9b3285c257c1637d18e5867 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Apr 2022 10:06:02 -0700 Subject: [PATCH 085/653] Bump org.gradle.test-retry from 1.3.1 to 1.3.2 (#2940) Bumps org.gradle.test-retry from 1.3.1 to 1.3.2. --- updated-dependencies: - dependency-name: org.gradle.test-retry dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index a41ad61de39a6..7949872a85b86 100644 --- a/build.gradle +++ b/build.gradle @@ -49,7 +49,7 @@ plugins { id 'opensearch.docker-support' id 'opensearch.global-build-info' id "com.diffplug.spotless" version "6.4.2" apply false - id "org.gradle.test-retry" version "1.3.1" apply false + id "org.gradle.test-retry" version "1.3.2" apply false } apply from: 'gradle/build-complete.gradle' From fc378c7256bc1e46f4c1cbee1ac6360624e1e574 Mon Sep 17 00:00:00 2001 From: Vacha Shah Date: Mon, 18 Apr 2022 10:07:14 -0700 Subject: [PATCH 086/653] Adding asm to version file and upgrading (#2933) Signed-off-by: Vacha Shah --- buildSrc/version.properties | 1 + modules/lang-expression/build.gradle | 6 +++--- .../lang-expression/licenses/asm-9.2.jar.sha1 | 1 - .../lang-expression/licenses/asm-9.3.jar.sha1 | 1 + .../licenses/asm-commons-9.2.jar.sha1 | 1 - .../licenses/asm-commons-9.3.jar.sha1 | 1 + .../licenses/asm-tree-9.2.jar.sha1 | 1 - .../licenses/asm-tree-9.3.jar.sha1 | 1 + modules/lang-painless/build.gradle | 20 +++++++++---------- .../lang-painless/licenses/asm-9.2.jar.sha1 | 1 - .../lang-painless/licenses/asm-9.3.jar.sha1 | 1 + .../licenses/asm-analysis-9.2.jar.sha1 | 1 - .../licenses/asm-analysis-9.3.jar.sha1 | 1 + .../licenses/asm-commons-9.2.jar.sha1 | 1 - .../licenses/asm-commons-9.3.jar.sha1 | 1 + .../licenses/asm-tree-9.2.jar.sha1 | 1 - .../licenses/asm-tree-9.3.jar.sha1 | 1 + .../licenses/asm-util-9.2.jar.sha1 | 1 - .../licenses/asm-util-9.3.jar.sha1 | 1 + test/logger-usage/build.gradle | 6 +++--- 20 files changed, 25 insertions(+), 24 deletions(-) delete mode 100644 modules/lang-expression/licenses/asm-9.2.jar.sha1 create mode 100644 modules/lang-expression/licenses/asm-9.3.jar.sha1 delete mode 100644 modules/lang-expression/licenses/asm-commons-9.2.jar.sha1 create mode 100644 modules/lang-expression/licenses/asm-commons-9.3.jar.sha1 delete mode 100644 modules/lang-expression/licenses/asm-tree-9.2.jar.sha1 create mode 100644 modules/lang-expression/licenses/asm-tree-9.3.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-9.2.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-9.3.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-analysis-9.2.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-analysis-9.3.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-commons-9.2.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-commons-9.3.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-tree-9.2.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-tree-9.3.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-util-9.2.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-util-9.3.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 7ae3bfaa19b5a..d3499b0df599b 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -16,6 +16,7 @@ icu4j = 70.1 supercsv = 2.4.0 log4j = 2.17.1 slf4j = 1.6.2 +asm = 9.3 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.5.0 diff --git a/modules/lang-expression/build.gradle b/modules/lang-expression/build.gradle index 9d7b0e2f0979c..203c332069c5f 100644 --- a/modules/lang-expression/build.gradle +++ b/modules/lang-expression/build.gradle @@ -38,9 +38,9 @@ opensearchplugin { dependencies { api "org.apache.lucene:lucene-expressions:${versions.lucene}" api 'org.antlr:antlr4-runtime:4.9.3' - api 'org.ow2.asm:asm:9.2' - api 'org.ow2.asm:asm-commons:9.2' - api 'org.ow2.asm:asm-tree:9.2' + api "org.ow2.asm:asm:${versions.asm}" + api "org.ow2.asm:asm-commons:${versions.asm}" + api "org.ow2.asm:asm-tree:${versions.asm}" } restResources { restApi { diff --git a/modules/lang-expression/licenses/asm-9.2.jar.sha1 b/modules/lang-expression/licenses/asm-9.2.jar.sha1 deleted file mode 100644 index 28f456d3cbcb2..0000000000000 --- a/modules/lang-expression/licenses/asm-9.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -81a03f76019c67362299c40e0ba13405f5467bff \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-9.3.jar.sha1 b/modules/lang-expression/licenses/asm-9.3.jar.sha1 new file mode 100644 index 0000000000000..71d3966a6f6f9 --- /dev/null +++ b/modules/lang-expression/licenses/asm-9.3.jar.sha1 @@ -0,0 +1 @@ +8e6300ef51c1d801a7ed62d07cd221aca3a90640 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-commons-9.2.jar.sha1 b/modules/lang-expression/licenses/asm-commons-9.2.jar.sha1 deleted file mode 100644 index 7beb3d29afe86..0000000000000 --- a/modules/lang-expression/licenses/asm-commons-9.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f4d7f0fc9054386f2893b602454d48e07d4fbead \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-commons-9.3.jar.sha1 b/modules/lang-expression/licenses/asm-commons-9.3.jar.sha1 new file mode 100644 index 0000000000000..fd7cd4943a57c --- /dev/null +++ b/modules/lang-expression/licenses/asm-commons-9.3.jar.sha1 @@ -0,0 +1 @@ +1f2a432d1212f5c352ae607d7b61dcae20c20af5 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-tree-9.2.jar.sha1 b/modules/lang-expression/licenses/asm-tree-9.2.jar.sha1 deleted file mode 100644 index 7b486521ecef3..0000000000000 --- a/modules/lang-expression/licenses/asm-tree-9.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d96c99a30f5e1a19b0e609dbb19a44d8518ac01e \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-tree-9.3.jar.sha1 b/modules/lang-expression/licenses/asm-tree-9.3.jar.sha1 new file mode 100644 index 0000000000000..238f0006424d3 --- /dev/null +++ b/modules/lang-expression/licenses/asm-tree-9.3.jar.sha1 @@ -0,0 +1 @@ +78d2ecd61318b5a58cd04fb237636c0e86b77d97 \ No newline at end of file diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index 069158fb678ef..f8e25c20cbf15 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -49,11 +49,11 @@ testClusters.all { dependencies { api 'org.antlr:antlr4-runtime:4.9.3' - api 'org.ow2.asm:asm-util:9.2' - api 'org.ow2.asm:asm-tree:9.2' - api 'org.ow2.asm:asm-commons:9.2' - api 'org.ow2.asm:asm-analysis:9.2' - api 'org.ow2.asm:asm:9.2' + api "org.ow2.asm:asm-util:${versions.asm}" + api "org.ow2.asm:asm-tree:${versions.asm}" + api "org.ow2.asm:asm-commons:${versions.asm}" + api "org.ow2.asm:asm-analysis:${versions.asm}" + api "org.ow2.asm:asm:${versions.asm}" api project('spi') } @@ -69,11 +69,11 @@ shadowJar { classifier = null relocate 'org.objectweb', 'org.opensearch.repackage.org.objectweb' dependencies { - include(dependency('org.ow2.asm:asm:9.2')) - include(dependency('org.ow2.asm:asm-util:9.2')) - include(dependency('org.ow2.asm:asm-tree:9.2')) - include(dependency('org.ow2.asm:asm-commons:9.2')) - include(dependency('org.ow2.asm:asm-analysis:9.2')) + include(dependency("org.ow2.asm:asm:${versions.asm}")) + include(dependency("org.ow2.asm:asm-util:${versions.asm}")) + include(dependency("org.ow2.asm:asm-tree:${versions.asm}")) + include(dependency("org.ow2.asm:asm-commons:${versions.asm}")) + include(dependency("org.ow2.asm:asm-analysis:${versions.asm}")) } } diff --git a/modules/lang-painless/licenses/asm-9.2.jar.sha1 b/modules/lang-painless/licenses/asm-9.2.jar.sha1 deleted file mode 100644 index 28f456d3cbcb2..0000000000000 --- a/modules/lang-painless/licenses/asm-9.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -81a03f76019c67362299c40e0ba13405f5467bff \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-9.3.jar.sha1 b/modules/lang-painless/licenses/asm-9.3.jar.sha1 new file mode 100644 index 0000000000000..71d3966a6f6f9 --- /dev/null +++ b/modules/lang-painless/licenses/asm-9.3.jar.sha1 @@ -0,0 +1 @@ +8e6300ef51c1d801a7ed62d07cd221aca3a90640 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-analysis-9.2.jar.sha1 b/modules/lang-painless/licenses/asm-analysis-9.2.jar.sha1 deleted file mode 100644 index b93483a24da5d..0000000000000 --- a/modules/lang-painless/licenses/asm-analysis-9.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7487dd756daf96cab9986e44b9d7bcb796a61c10 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-analysis-9.3.jar.sha1 b/modules/lang-painless/licenses/asm-analysis-9.3.jar.sha1 new file mode 100644 index 0000000000000..f5a04d0196823 --- /dev/null +++ b/modules/lang-painless/licenses/asm-analysis-9.3.jar.sha1 @@ -0,0 +1 @@ +4b071f211b37c38e0e9f5998550197c8593f6ad8 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-commons-9.2.jar.sha1 b/modules/lang-painless/licenses/asm-commons-9.2.jar.sha1 deleted file mode 100644 index 7beb3d29afe86..0000000000000 --- a/modules/lang-painless/licenses/asm-commons-9.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f4d7f0fc9054386f2893b602454d48e07d4fbead \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-commons-9.3.jar.sha1 b/modules/lang-painless/licenses/asm-commons-9.3.jar.sha1 new file mode 100644 index 0000000000000..fd7cd4943a57c --- /dev/null +++ b/modules/lang-painless/licenses/asm-commons-9.3.jar.sha1 @@ -0,0 +1 @@ +1f2a432d1212f5c352ae607d7b61dcae20c20af5 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-tree-9.2.jar.sha1 b/modules/lang-painless/licenses/asm-tree-9.2.jar.sha1 deleted file mode 100644 index 7b486521ecef3..0000000000000 --- a/modules/lang-painless/licenses/asm-tree-9.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d96c99a30f5e1a19b0e609dbb19a44d8518ac01e \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-tree-9.3.jar.sha1 b/modules/lang-painless/licenses/asm-tree-9.3.jar.sha1 new file mode 100644 index 0000000000000..238f0006424d3 --- /dev/null +++ b/modules/lang-painless/licenses/asm-tree-9.3.jar.sha1 @@ -0,0 +1 @@ +78d2ecd61318b5a58cd04fb237636c0e86b77d97 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-util-9.2.jar.sha1 b/modules/lang-painless/licenses/asm-util-9.2.jar.sha1 deleted file mode 100644 index 5cb89aa115f30..0000000000000 --- a/modules/lang-painless/licenses/asm-util-9.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fbc178fc5ba3dab50fd7e8a5317b8b647c8e8946 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-util-9.3.jar.sha1 b/modules/lang-painless/licenses/asm-util-9.3.jar.sha1 new file mode 100644 index 0000000000000..8859c317794ba --- /dev/null +++ b/modules/lang-painless/licenses/asm-util-9.3.jar.sha1 @@ -0,0 +1 @@ +9595bc05510d0bd4b610188b77333fe4851a1975 \ No newline at end of file diff --git a/test/logger-usage/build.gradle b/test/logger-usage/build.gradle index 3154e556a87cf..e81cdef04df1f 100644 --- a/test/logger-usage/build.gradle +++ b/test/logger-usage/build.gradle @@ -31,9 +31,9 @@ apply plugin: 'opensearch.java' dependencies { - api 'org.ow2.asm:asm:9.2' - api 'org.ow2.asm:asm-tree:9.2' - api 'org.ow2.asm:asm-analysis:9.3' + api "org.ow2.asm:asm:${versions.asm}" + api "org.ow2.asm:asm-tree:${versions.asm}" + api "org.ow2.asm:asm-analysis:${versions.asm}" api "org.apache.logging.log4j:log4j-api:${versions.log4j}" testImplementation project(":test:framework") } From d61d170332be07b809cc982f0a5dc5a3d6148d77 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 18 Apr 2022 13:16:22 -0400 Subject: [PATCH 087/653] Removed java11 source folders since JDK-11 is the baseline now (#2898) Signed-off-by: Andriy Redko --- .../org/opensearch/common/collect/List.java | 34 +-- .../org/opensearch/common/collect/Map.java | 102 +++------ .../org/opensearch/common/collect/Set.java | 36 +--- .../opensearch/core/internal/io/Streams.java | 9 +- .../org/opensearch/common/collect/List.java | 94 --------- .../org/opensearch/common/collect/Map.java | 194 ------------------ .../org/opensearch/common/collect/Set.java | 95 --------- .../opensearch/core/internal/io/Streams.java | 101 --------- .../org/opensearch/monitor/jvm/JvmPid.java | 23 +-- .../org/opensearch/monitor/jvm/JvmPid.java | 41 ---- 10 files changed, 55 insertions(+), 674 deletions(-) delete mode 100644 libs/core/src/main/java11/org/opensearch/common/collect/List.java delete mode 100644 libs/core/src/main/java11/org/opensearch/common/collect/Map.java delete mode 100644 libs/core/src/main/java11/org/opensearch/common/collect/Set.java delete mode 100644 libs/core/src/main/java11/org/opensearch/core/internal/io/Streams.java delete mode 100644 server/src/main/java11/org/opensearch/monitor/jvm/JvmPid.java diff --git a/libs/core/src/main/java/org/opensearch/common/collect/List.java b/libs/core/src/main/java/org/opensearch/common/collect/List.java index 96bdacc276323..56216d6bbafe2 100644 --- a/libs/core/src/main/java/org/opensearch/common/collect/List.java +++ b/libs/core/src/main/java/org/opensearch/common/collect/List.java @@ -32,48 +32,44 @@ package org.opensearch.common.collect; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; public class List { /** - * Returns an unmodifiable list containing zero elements. + * Delegates to the Java9 {@code List.of()} method. * * @param the {@code List}'s element type * @return an empty {@code List} */ public static java.util.List of() { - return Collections.emptyList(); + return java.util.List.of(); } /** - * Returns an unmodifiable list containing one element. + * Delegates to the Java9 {@code List.of()} method. * * @param the {@code List}'s element type * @param e1 the single element * @return a {@code List} containing the specified element */ public static java.util.List of(T e1) { - return Collections.singletonList(e1); + return java.util.List.of(e1); } /** - * Returns an unmodifiable list containing two elements. + * Delegates to the Java9 {@code List.of()} method. * * @param the {@code List}'s element type - * @param e1 the first element - * @param e2 the second element + * @param e1 the single element * @return a {@code List} containing the specified element */ - @SuppressWarnings("unchecked") public static java.util.List of(T e1, T e2) { - return List.of((T[]) new Object[] { e1, e2 }); + return java.util.List.of(e1, e2); } /** - * Returns an unmodifiable list containing an arbitrary number of elements. + * Delegates to the Java9 {@code List.of()} method. * * @param entries the elements to be contained in the list * @param the {@code List}'s element type @@ -82,25 +78,17 @@ public static java.util.List of(T e1, T e2) { @SafeVarargs @SuppressWarnings("varargs") public static java.util.List of(T... entries) { - switch (entries.length) { - case 0: - return List.of(); - case 1: - return List.of(entries[0]); - default: - return Collections.unmodifiableList(Arrays.asList(entries)); - } + return java.util.List.of(entries); } /** - * Returns an unmodifiable {@code List} containing the elements of the given {@code Collection} in iteration order. + * Delegates to the Java9 {@code List.copyOf()} method. * * @param the {@code List}'s element type * @param coll a {@code Collection} from which elements are drawn, must be non-null * @return a {@code List} containing the elements of the given {@code Collection} */ - @SuppressWarnings("unchecked") public static java.util.List copyOf(Collection coll) { - return (java.util.List) List.of(coll.toArray()); + return java.util.List.copyOf(coll); } } diff --git a/libs/core/src/main/java/org/opensearch/common/collect/Map.java b/libs/core/src/main/java/org/opensearch/common/collect/Map.java index 3b401ee0e1c1b..21de546869390 100644 --- a/libs/core/src/main/java/org/opensearch/common/collect/Map.java +++ b/libs/core/src/main/java/org/opensearch/common/collect/Map.java @@ -32,70 +32,66 @@ package org.opensearch.common.collect; -import java.util.AbstractMap; -import java.util.Collections; -import java.util.HashMap; - public class Map { /** - * Returns an unmodifiable map containing one mapping. + * Delegates to the Java9 {@code Map.of()} method. */ public static java.util.Map of() { - return Collections.emptyMap(); + return java.util.Map.of(); } /** - * Returns an unmodifiable map containing one mapping. + * Delegates to the Java9 {@code Map.of()} method. */ public static java.util.Map of(K k1, V v1) { - return Collections.singletonMap(k1, v1); + return java.util.Map.of(k1, v1); } /** - * Returns an unmodifiable map containing two mappings. + * Delegates to the Java9 {@code Map.of()} method. */ public static java.util.Map of(K k1, V v1, K k2, V v2) { - return mapN(k1, v1, k2, v2); + return java.util.Map.of(k1, v1, k2, v2); } /** - * Returns an unmodifiable map containing three mappings. + * Delegates to the Java9 {@code Map.of()} method. */ public static java.util.Map of(K k1, V v1, K k2, V v2, K k3, V v3) { - return mapN(k1, v1, k2, v2, k3, v3); + return java.util.Map.of(k1, v1, k2, v2, k3, v3); } /** - * Returns an unmodifiable map containing four mappings. + * Delegates to the Java9 {@code Map.of()} method. */ public static java.util.Map of(K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4) { - return mapN(k1, v1, k2, v2, k3, v3, k4, v4); + return java.util.Map.of(k1, v1, k2, v2, k3, v3, k4, v4); } /** - * Returns an unmodifiable map containing five mappings. + * Delegates to the Java9 {@code Map.of()} method. */ public static java.util.Map of(K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4, K k5, V v5) { - return mapN(k1, v1, k2, v2, k3, v3, k4, v4, k5, v5); + return java.util.Map.of(k1, v1, k2, v2, k3, v3, k4, v4, k5, v5); } /** - * Returns an unmodifiable map containing six mappings. + * Delegates to the Java9 {@code Map.of()} method. */ public static java.util.Map of(K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4, K k5, V v5, K k6, V v6) { - return mapN(k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6); + return java.util.Map.of(k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6); } /** - * Returns an unmodifiable map containing seven mappings. + * Delegates to the Java9 {@code Map.of()} method. */ public static java.util.Map of(K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4, K k5, V v5, K k6, V v6, K k7, V v7) { - return mapN(k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7); + return java.util.Map.of(k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7); } /** - * Returns an unmodifiable map containing eight mappings. + * Delegates to the Java9 {@code Map.of()} method. */ public static java.util.Map of( K k1, @@ -115,11 +111,11 @@ public static java.util.Map of( K k8, V v8 ) { - return mapN(k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7, k8, v8); + return java.util.Map.of(k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7, k8, v8); } /** - * Returns an unmodifiable map containing nine mappings. + * Delegates to the Java9 {@code Map.of()} method. */ public static java.util.Map of( K k1, @@ -141,11 +137,11 @@ public static java.util.Map of( K k9, V v9 ) { - return mapN(k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7, k8, v8, k9, v9); + return java.util.Map.of(k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7, k8, v8, k9, v9); } /** - * Returns an unmodifiable map containing ten mappings. + * Delegates to the Java9 {@code Map.of()} method. */ public static java.util.Map of( K k1, @@ -169,68 +165,30 @@ public static java.util.Map of( K k10, V v10 ) { - return mapN(k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7, k8, v8, k9, v9, k10, v10); - } - - @SuppressWarnings("unchecked") - private static java.util.Map mapN(Object... objects) { - if (objects.length % 2 != 0) { - throw new IllegalStateException("Must provide an even number of arguments to Map::of method"); - } - switch (objects.length) { - case 0: - return Map.of(); - case 2: - return Map.of((K) objects[0], (V) objects[1]); - default: - HashMap map = new HashMap<>(); - for (int k = 0; k < objects.length / 2; k++) { - map.put((K) objects[k * 2], (V) objects[k * 2 + 1]); - } - return Collections.unmodifiableMap(map); - } + return java.util.Map.of(k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7, k8, v8, k9, v9, k10, v10); } /** - * Returns an unmodifiable map containing keys and values extracted from the given entries. - * - * @param the {@code Map}'s key type - * @param the {@code Map}'s value type - * @param entries {@code Map.Entry}s containing the keys and values from which the map is populated - * @return a {@code Map} containing the specified mappings + * Delegates to the Java9 {@code Map.ofEntries()} method. */ @SafeVarargs + @SuppressWarnings("varargs") public static java.util.Map ofEntries(java.util.Map.Entry... entries) { - if (entries.length == 0) { - return Collections.emptyMap(); - } else if (entries.length == 1) { - return Collections.singletonMap(entries[0].getKey(), entries[0].getValue()); - } else { - HashMap map = new HashMap<>(); - for (java.util.Map.Entry entry : entries) { - map.put(entry.getKey(), entry.getValue()); - } - return Collections.unmodifiableMap(map); - } + return java.util.Map.ofEntries(entries); } /** - * Returns an unmodifiable Map.Entry for the provided key and value. + * Delegates to the Java9 {@code Map.entry()} method. */ public static java.util.Map.Entry entry(K k, V v) { - return new AbstractMap.SimpleImmutableEntry<>(k, v); + return java.util.Map.entry(k, v); } /** - * Returns an unmodifiable {@code Map} containing the entries of the given {@code Map}. - * - * @param the {@code Map}'s key type - * @param the {@code Map}'s value type - * @param map a {@code Map} from which entries are drawn, must be non-null - * @return a {@code Map} containing the entries of the given {@code Map} + * Delegates to the Java10 {@code Map.copyOf()} method. */ - @SuppressWarnings({ "unchecked", "rawtypes" }) public static java.util.Map copyOf(java.util.Map map) { - return (java.util.Map) Map.ofEntries(map.entrySet().toArray(new java.util.Map.Entry[0])); + return java.util.Map.copyOf(map); } + } diff --git a/libs/core/src/main/java/org/opensearch/common/collect/Set.java b/libs/core/src/main/java/org/opensearch/common/collect/Set.java index 921408b88241f..0350023e4e894 100644 --- a/libs/core/src/main/java/org/opensearch/common/collect/Set.java +++ b/libs/core/src/main/java/org/opensearch/common/collect/Set.java @@ -32,49 +32,45 @@ package org.opensearch.common.collect; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; public class Set { /** - * Returns an unmodifiable set containing zero elements. + * Delegates to the Java9 {@code Set.of()} method. * * @param the {@code Set}'s element type * @return an empty {@code Set} */ public static java.util.Set of() { - return Collections.emptySet(); + return java.util.Set.of(); } /** - * Returns an unmodifiable set containing one element. + * Delegates to the Java9 {@code Set.of()} method. * * @param the {@code Set}'s element type * @param e1 the single element * @return a {@code Set} containing the specified element */ public static java.util.Set of(T e1) { - return Collections.singleton(e1); + return java.util.Set.of(e1); } /** - * Returns an unmodifiable set containing two elements. + * Delegates to the Java9 {@code Set.of()} method. * * @param the {@code Set}'s element type * @param e1 the first element * @param e2 the second element * @return a {@code Set} containing the specified element */ - @SuppressWarnings("unchecked") public static java.util.Set of(T e1, T e2) { - return Set.of((T[]) new Object[] { e1, e2 }); + return java.util.Set.of(e1, e2); } /** - * Returns an unmodifiable set containing an arbitrary number of elements. + * Delegates to the Java9 {@code Set.of()} method. * * @param entries the elements to be contained in the set * @param the {@code Set}'s element type @@ -83,27 +79,17 @@ public static java.util.Set of(T e1, T e2) { @SafeVarargs @SuppressWarnings("varargs") public static java.util.Set of(T... entries) { - switch (entries.length) { - case 0: - return Set.of(); - case 1: - return Set.of(entries[0]); - default: - return Collections.unmodifiableSet(new HashSet<>(Arrays.asList(entries))); - } + return java.util.Set.of(entries); } /** - * Returns an unmodifiable {@code Set} containing the elements of the given Collection. + * Delegates to the Java10 {@code Set.copyOf} method. * - * @param the {@code Set}'s element type + * @param the {@code Set}'s element type * @param coll a {@code Collection} from which elements are drawn, must be non-null * @return a {@code Set} containing the elements of the given {@code Collection} - * @throws NullPointerException if coll is null, or if it contains any nulls - * @since 10 */ - @SuppressWarnings("unchecked") public static java.util.Set copyOf(Collection coll) { - return (java.util.Set) Set.of(new HashSet<>(coll).toArray()); + return java.util.Set.copyOf(coll); } } diff --git a/libs/core/src/main/java/org/opensearch/core/internal/io/Streams.java b/libs/core/src/main/java/org/opensearch/core/internal/io/Streams.java index 1938e1bbf4dff..67765392b1d46 100644 --- a/libs/core/src/main/java/org/opensearch/core/internal/io/Streams.java +++ b/libs/core/src/main/java/org/opensearch/core/internal/io/Streams.java @@ -38,20 +38,15 @@ /** * Simple utility methods for file and stream copying. - * All copy methods use a block size of 4096 bytes, - * and close all affected streams when done. + * All copy methods close all affected streams when done. *

* Mainly for use within the framework, * but also useful for application code. */ -public class Streams { +public abstract class Streams { private static final ThreadLocal buffer = ThreadLocal.withInitial(() -> new byte[8 * 1024]); - private Streams() { - - } - /** * Copy the contents of the given InputStream to the given OutputStream. Optionally, closes both streams when done. * diff --git a/libs/core/src/main/java11/org/opensearch/common/collect/List.java b/libs/core/src/main/java11/org/opensearch/common/collect/List.java deleted file mode 100644 index 56216d6bbafe2..0000000000000 --- a/libs/core/src/main/java11/org/opensearch/common/collect/List.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.collect; - -import java.util.Collection; - -public class List { - - /** - * Delegates to the Java9 {@code List.of()} method. - * - * @param the {@code List}'s element type - * @return an empty {@code List} - */ - public static java.util.List of() { - return java.util.List.of(); - } - - /** - * Delegates to the Java9 {@code List.of()} method. - * - * @param the {@code List}'s element type - * @param e1 the single element - * @return a {@code List} containing the specified element - */ - public static java.util.List of(T e1) { - return java.util.List.of(e1); - } - - /** - * Delegates to the Java9 {@code List.of()} method. - * - * @param the {@code List}'s element type - * @param e1 the single element - * @return a {@code List} containing the specified element - */ - public static java.util.List of(T e1, T e2) { - return java.util.List.of(e1, e2); - } - - /** - * Delegates to the Java9 {@code List.of()} method. - * - * @param entries the elements to be contained in the list - * @param the {@code List}'s element type - * @return an unmodifiable list containing the specified elements. - */ - @SafeVarargs - @SuppressWarnings("varargs") - public static java.util.List of(T... entries) { - return java.util.List.of(entries); - } - - /** - * Delegates to the Java9 {@code List.copyOf()} method. - * - * @param the {@code List}'s element type - * @param coll a {@code Collection} from which elements are drawn, must be non-null - * @return a {@code List} containing the elements of the given {@code Collection} - */ - public static java.util.List copyOf(Collection coll) { - return java.util.List.copyOf(coll); - } -} diff --git a/libs/core/src/main/java11/org/opensearch/common/collect/Map.java b/libs/core/src/main/java11/org/opensearch/common/collect/Map.java deleted file mode 100644 index 21de546869390..0000000000000 --- a/libs/core/src/main/java11/org/opensearch/common/collect/Map.java +++ /dev/null @@ -1,194 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.collect; - -public class Map { - - /** - * Delegates to the Java9 {@code Map.of()} method. - */ - public static java.util.Map of() { - return java.util.Map.of(); - } - - /** - * Delegates to the Java9 {@code Map.of()} method. - */ - public static java.util.Map of(K k1, V v1) { - return java.util.Map.of(k1, v1); - } - - /** - * Delegates to the Java9 {@code Map.of()} method. - */ - public static java.util.Map of(K k1, V v1, K k2, V v2) { - return java.util.Map.of(k1, v1, k2, v2); - } - - /** - * Delegates to the Java9 {@code Map.of()} method. - */ - public static java.util.Map of(K k1, V v1, K k2, V v2, K k3, V v3) { - return java.util.Map.of(k1, v1, k2, v2, k3, v3); - } - - /** - * Delegates to the Java9 {@code Map.of()} method. - */ - public static java.util.Map of(K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4) { - return java.util.Map.of(k1, v1, k2, v2, k3, v3, k4, v4); - } - - /** - * Delegates to the Java9 {@code Map.of()} method. - */ - public static java.util.Map of(K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4, K k5, V v5) { - return java.util.Map.of(k1, v1, k2, v2, k3, v3, k4, v4, k5, v5); - } - - /** - * Delegates to the Java9 {@code Map.of()} method. - */ - public static java.util.Map of(K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4, K k5, V v5, K k6, V v6) { - return java.util.Map.of(k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6); - } - - /** - * Delegates to the Java9 {@code Map.of()} method. - */ - public static java.util.Map of(K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4, K k5, V v5, K k6, V v6, K k7, V v7) { - return java.util.Map.of(k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7); - } - - /** - * Delegates to the Java9 {@code Map.of()} method. - */ - public static java.util.Map of( - K k1, - V v1, - K k2, - V v2, - K k3, - V v3, - K k4, - V v4, - K k5, - V v5, - K k6, - V v6, - K k7, - V v7, - K k8, - V v8 - ) { - return java.util.Map.of(k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7, k8, v8); - } - - /** - * Delegates to the Java9 {@code Map.of()} method. - */ - public static java.util.Map of( - K k1, - V v1, - K k2, - V v2, - K k3, - V v3, - K k4, - V v4, - K k5, - V v5, - K k6, - V v6, - K k7, - V v7, - K k8, - V v8, - K k9, - V v9 - ) { - return java.util.Map.of(k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7, k8, v8, k9, v9); - } - - /** - * Delegates to the Java9 {@code Map.of()} method. - */ - public static java.util.Map of( - K k1, - V v1, - K k2, - V v2, - K k3, - V v3, - K k4, - V v4, - K k5, - V v5, - K k6, - V v6, - K k7, - V v7, - K k8, - V v8, - K k9, - V v9, - K k10, - V v10 - ) { - return java.util.Map.of(k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7, k8, v8, k9, v9, k10, v10); - } - - /** - * Delegates to the Java9 {@code Map.ofEntries()} method. - */ - @SafeVarargs - @SuppressWarnings("varargs") - public static java.util.Map ofEntries(java.util.Map.Entry... entries) { - return java.util.Map.ofEntries(entries); - } - - /** - * Delegates to the Java9 {@code Map.entry()} method. - */ - public static java.util.Map.Entry entry(K k, V v) { - return java.util.Map.entry(k, v); - } - - /** - * Delegates to the Java10 {@code Map.copyOf()} method. - */ - public static java.util.Map copyOf(java.util.Map map) { - return java.util.Map.copyOf(map); - } - -} diff --git a/libs/core/src/main/java11/org/opensearch/common/collect/Set.java b/libs/core/src/main/java11/org/opensearch/common/collect/Set.java deleted file mode 100644 index 0350023e4e894..0000000000000 --- a/libs/core/src/main/java11/org/opensearch/common/collect/Set.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.collect; - -import java.util.Collection; - -public class Set { - - /** - * Delegates to the Java9 {@code Set.of()} method. - * - * @param the {@code Set}'s element type - * @return an empty {@code Set} - */ - public static java.util.Set of() { - return java.util.Set.of(); - } - - /** - * Delegates to the Java9 {@code Set.of()} method. - * - * @param the {@code Set}'s element type - * @param e1 the single element - * @return a {@code Set} containing the specified element - */ - public static java.util.Set of(T e1) { - return java.util.Set.of(e1); - } - - /** - * Delegates to the Java9 {@code Set.of()} method. - * - * @param the {@code Set}'s element type - * @param e1 the first element - * @param e2 the second element - * @return a {@code Set} containing the specified element - */ - public static java.util.Set of(T e1, T e2) { - return java.util.Set.of(e1, e2); - } - - /** - * Delegates to the Java9 {@code Set.of()} method. - * - * @param entries the elements to be contained in the set - * @param the {@code Set}'s element type - * @return an unmodifiable set containing the specified elements. - */ - @SafeVarargs - @SuppressWarnings("varargs") - public static java.util.Set of(T... entries) { - return java.util.Set.of(entries); - } - - /** - * Delegates to the Java10 {@code Set.copyOf} method. - * - * @param the {@code Set}'s element type - * @param coll a {@code Collection} from which elements are drawn, must be non-null - * @return a {@code Set} containing the elements of the given {@code Collection} - */ - public static java.util.Set copyOf(Collection coll) { - return java.util.Set.copyOf(coll); - } -} diff --git a/libs/core/src/main/java11/org/opensearch/core/internal/io/Streams.java b/libs/core/src/main/java11/org/opensearch/core/internal/io/Streams.java deleted file mode 100644 index 67765392b1d46..0000000000000 --- a/libs/core/src/main/java11/org/opensearch/core/internal/io/Streams.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.core.internal.io; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; - -/** - * Simple utility methods for file and stream copying. - * All copy methods close all affected streams when done. - *

- * Mainly for use within the framework, - * but also useful for application code. - */ -public abstract class Streams { - - private static final ThreadLocal buffer = ThreadLocal.withInitial(() -> new byte[8 * 1024]); - - /** - * Copy the contents of the given InputStream to the given OutputStream. Optionally, closes both streams when done. - * - * @param in the stream to copy from - * @param out the stream to copy to - * @param close whether to close both streams after copying - * @param buffer buffer to use for copying - * @return the number of bytes copied - * @throws IOException in case of I/O errors - */ - public static long copy(final InputStream in, final OutputStream out, byte[] buffer, boolean close) throws IOException { - Exception err = null; - try { - long byteCount = 0; - int bytesRead; - while ((bytesRead = in.read(buffer)) != -1) { - out.write(buffer, 0, bytesRead); - byteCount += bytesRead; - } - out.flush(); - return byteCount; - } catch (IOException | RuntimeException e) { - err = e; - throw e; - } finally { - if (close) { - IOUtils.close(err, in, out); - } - } - } - - /** - * @see #copy(InputStream, OutputStream, byte[], boolean) - */ - public static long copy(final InputStream in, final OutputStream out, boolean close) throws IOException { - return copy(in, out, buffer.get(), close); - } - - /** - * @see #copy(InputStream, OutputStream, byte[], boolean) - */ - public static long copy(final InputStream in, final OutputStream out, byte[] buffer) throws IOException { - return copy(in, out, buffer, true); - } - - /** - * @see #copy(InputStream, OutputStream, byte[], boolean) - */ - public static long copy(final InputStream in, final OutputStream out) throws IOException { - return copy(in, out, buffer.get(), true); - } -} diff --git a/server/src/main/java/org/opensearch/monitor/jvm/JvmPid.java b/server/src/main/java/org/opensearch/monitor/jvm/JvmPid.java index bf9b5f6fe4f3f..9a2fbfbd27c68 100644 --- a/server/src/main/java/org/opensearch/monitor/jvm/JvmPid.java +++ b/server/src/main/java/org/opensearch/monitor/jvm/JvmPid.java @@ -32,31 +32,10 @@ package org.opensearch.monitor.jvm; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.message.ParameterizedMessage; - -import java.lang.management.ManagementFactory; - class JvmPid { - private static final long PID; - static long getPid() { - return PID; - } - - static { - PID = initializePid(); - } - - private static long initializePid() { - final String name = ManagementFactory.getRuntimeMXBean().getName(); - try { - return Long.parseLong(name.split("@")[0]); - } catch (final NumberFormatException e) { - LogManager.getLogger(JvmPid.class).debug(new ParameterizedMessage("failed parsing PID from [{}]", name), e); - return -1; - } + return ProcessHandle.current().pid(); } } diff --git a/server/src/main/java11/org/opensearch/monitor/jvm/JvmPid.java b/server/src/main/java11/org/opensearch/monitor/jvm/JvmPid.java deleted file mode 100644 index 9a2fbfbd27c68..0000000000000 --- a/server/src/main/java11/org/opensearch/monitor/jvm/JvmPid.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.monitor.jvm; - -class JvmPid { - - static long getPid() { - return ProcessHandle.current().pid(); - } - -} From a36a1e395c2f535b4c78be1a14241b2e05a4a52c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Apr 2022 11:17:21 -0700 Subject: [PATCH 088/653] Bump jna from 5.10.0 to 5.11.0 in /buildSrc (#2946) Bumps [jna](https://github.com/java-native-access/jna) from 5.10.0 to 5.11.0. - [Release notes](https://github.com/java-native-access/jna/releases) - [Changelog](https://github.com/java-native-access/jna/blob/master/CHANGES.md) - [Commits](https://github.com/java-native-access/jna/compare/5.10.0...5.11.0) --- updated-dependencies: - dependency-name: net.java.dev.jna:jna dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- buildSrc/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index d478a1fd45e80..077064e33187c 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -110,7 +110,7 @@ dependencies { api 'com.netflix.nebula:gradle-info-plugin:11.3.3' api 'org.apache.rat:apache-rat:0.13' api 'commons-io:commons-io:2.7' - api "net.java.dev.jna:jna:5.10.0" + api "net.java.dev.jna:jna:5.11.0" api 'gradle.plugin.com.github.johnrengelman:shadow:7.1.2' api 'de.thetaphi:forbiddenapis:3.3' api 'com.avast.gradle:gradle-docker-compose-plugin:0.15.2' From ab026025d711ab6f4e807c0ad40cae3d9ee055ac Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Apr 2022 11:18:02 -0700 Subject: [PATCH 089/653] Bump reactor-core from 3.4.15 to 3.4.17 in /plugins/repository-azure (#2947) * Bump reactor-core from 3.4.15 to 3.4.17 in /plugins/repository-azure Bumps [reactor-core](https://github.com/reactor/reactor-core) from 3.4.15 to 3.4.17. - [Release notes](https://github.com/reactor/reactor-core/releases) - [Commits](https://github.com/reactor/reactor-core/compare/v3.4.15...v3.4.17) --- updated-dependencies: - dependency-name: io.projectreactor:reactor-core dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/repository-azure/build.gradle | 2 +- plugins/repository-azure/licenses/reactor-core-3.4.15.jar.sha1 | 1 - plugins/repository-azure/licenses/reactor-core-3.4.17.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-azure/licenses/reactor-core-3.4.15.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactor-core-3.4.17.jar.sha1 diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index da644d77eb488..a18f18cea185e 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -56,7 +56,7 @@ dependencies { implementation project(':modules:transport-netty4') api 'com.azure:azure-storage-blob:12.15.0' api 'org.reactivestreams:reactive-streams:1.0.3' - api 'io.projectreactor:reactor-core:3.4.15' + api 'io.projectreactor:reactor-core:3.4.17' api 'io.projectreactor.netty:reactor-netty:1.0.17' api 'io.projectreactor.netty:reactor-netty-core:1.0.16' api 'io.projectreactor.netty:reactor-netty-http:1.0.16' diff --git a/plugins/repository-azure/licenses/reactor-core-3.4.15.jar.sha1 b/plugins/repository-azure/licenses/reactor-core-3.4.15.jar.sha1 deleted file mode 100644 index a89de48b20b51..0000000000000 --- a/plugins/repository-azure/licenses/reactor-core-3.4.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -28ccf513fe64709c8ded30ea3f387fc718db9626 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-core-3.4.17.jar.sha1 b/plugins/repository-azure/licenses/reactor-core-3.4.17.jar.sha1 new file mode 100644 index 0000000000000..3803458775631 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-core-3.4.17.jar.sha1 @@ -0,0 +1 @@ +52176b50d2191bc32a8a235124e7aff7f291754b \ No newline at end of file From 002e614ecf9357a2636aaf6a4db98cec8af33129 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Apr 2022 11:27:19 -0700 Subject: [PATCH 090/653] Bump hadoop-minicluster from 3.3.1 to 3.3.2 in /test/fixtures/hdfs-fixture (#2381) * Bump hadoop-minicluster in /test/fixtures/hdfs-fixture Bumps hadoop-minicluster from 3.3.1 to 3.3.2. --- updated-dependencies: - dependency-name: org.apache.hadoop:hadoop-minicluster dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Fixing gradle check Signed-off-by: Vacha Shah Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Vacha Shah --- test/fixtures/hdfs-fixture/build.gradle | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index c56cc6d196b63..2ff444c03b123 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -33,7 +33,7 @@ apply plugin: 'opensearch.java' group = 'hdfs' dependencies { - api "org.apache.hadoop:hadoop-minicluster:3.3.1" + api "org.apache.hadoop:hadoop-minicluster:3.3.2" api "org.apache.commons:commons-compress:1.21" api "commons-codec:commons-codec:${versions.commonscodec}" api "org.apache.logging.log4j:log4j-core:${versions.log4j}" @@ -43,4 +43,5 @@ dependencies { api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api 'net.minidev:json-smart:2.4.8' + api "org.mockito:mockito-core:${versions.mockito}" } From bb19f627f090b04cd60213e4b00ee38b31f6de63 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Apr 2022 11:30:36 -0700 Subject: [PATCH 091/653] Bump guava from 30.1.1-jre to 31.1-jre in /plugins/repository-hdfs (#2948) * Bump guava from 30.1.1-jre to 31.1-jre in /plugins/repository-hdfs Bumps [guava](https://github.com/google/guava) from 30.1.1-jre to 31.1-jre. - [Release notes](https://github.com/google/guava/releases) - [Commits](https://github.com/google/guava/commits) --- updated-dependencies: - dependency-name: com.google.guava:guava dependency-type: direct:production ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/guava-30.1.1-jre.jar.sha1 | 1 - plugins/repository-hdfs/licenses/guava-31.1-jre.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/guava-30.1.1-jre.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/guava-31.1-jre.jar.sha1 diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index d8811ded8d092..02ac822f94995 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -66,7 +66,7 @@ dependencies { api 'org.apache.avro:avro:1.10.2' api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api 'com.google.code.gson:gson:2.9.0' - runtimeOnly 'com.google.guava:guava:30.1.1-jre' + runtimeOnly 'com.google.guava:guava:31.1-jre' api 'com.google.protobuf:protobuf-java:3.20.0' api "commons-logging:commons-logging:${versions.commonslogging}" api 'commons-cli:commons-cli:1.2' diff --git a/plugins/repository-hdfs/licenses/guava-30.1.1-jre.jar.sha1 b/plugins/repository-hdfs/licenses/guava-30.1.1-jre.jar.sha1 deleted file mode 100644 index 39e641fc7834f..0000000000000 --- a/plugins/repository-hdfs/licenses/guava-30.1.1-jre.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -87e0fd1df874ea3cbe577702fe6f17068b790fd8 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/guava-31.1-jre.jar.sha1 b/plugins/repository-hdfs/licenses/guava-31.1-jre.jar.sha1 new file mode 100644 index 0000000000000..e57390ebe1299 --- /dev/null +++ b/plugins/repository-hdfs/licenses/guava-31.1-jre.jar.sha1 @@ -0,0 +1 @@ +60458f877d055d0c9114d9e1a2efb737b4bc282c \ No newline at end of file From ca102ea209d142b73f5c037ff0624ab704e00c04 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Apr 2022 12:25:14 -0700 Subject: [PATCH 092/653] Bump grpc-context from 1.29.0 to 1.45.1 in /plugins/repository-gcs (#2944) * Bump grpc-context from 1.29.0 to 1.45.1 in /plugins/repository-gcs Bumps [grpc-context](https://github.com/grpc/grpc-java) from 1.29.0 to 1.45.1. - [Release notes](https://github.com/grpc/grpc-java/releases) - [Commits](https://github.com/grpc/grpc-java/compare/v1.29.0...v1.45.1) --- updated-dependencies: - dependency-name: io.grpc:grpc-context dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/repository-gcs/build.gradle | 2 +- plugins/repository-gcs/licenses/grpc-context-1.29.0.jar.sha1 | 1 - plugins/repository-gcs/licenses/grpc-context-1.45.1.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/grpc-context-1.29.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/grpc-context-1.45.1.jar.sha1 diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 0e1ed06879f91..241cd70eba071 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -75,7 +75,7 @@ dependencies { api 'com.google.http-client:google-http-client-jackson2:1.35.0' api 'com.google.http-client:google-http-client-gson:1.41.4' api 'com.google.api:gax-httpjson:0.62.0' - api 'io.grpc:grpc-context:1.29.0' + api 'io.grpc:grpc-context:1.45.1' api 'io.opencensus:opencensus-api:0.18.0' api 'io.opencensus:opencensus-contrib-http-util:0.18.0' api 'com.google.apis:google-api-services-storage:v1-rev20200814-1.30.10' diff --git a/plugins/repository-gcs/licenses/grpc-context-1.29.0.jar.sha1 b/plugins/repository-gcs/licenses/grpc-context-1.29.0.jar.sha1 deleted file mode 100644 index a549827edd283..0000000000000 --- a/plugins/repository-gcs/licenses/grpc-context-1.29.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1d8a441110f86f8927543dc3007639080441ea3c \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/grpc-context-1.45.1.jar.sha1 b/plugins/repository-gcs/licenses/grpc-context-1.45.1.jar.sha1 new file mode 100644 index 0000000000000..eb7e4fcd78e97 --- /dev/null +++ b/plugins/repository-gcs/licenses/grpc-context-1.45.1.jar.sha1 @@ -0,0 +1 @@ +485a08c019cc78914a477b1dfc7052820b8d822c \ No newline at end of file From 25d81c5fd5f680da2ca3db1cbbda66b68078c2fb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Apr 2022 15:22:12 -0700 Subject: [PATCH 093/653] Bump google-oauth-client from 1.33.2 to 1.33.3 in /plugins/discovery-gce (#2943) * Bump google-oauth-client from 1.33.2 to 1.33.3 in /plugins/discovery-gce Bumps [google-oauth-client](https://github.com/googleapis/google-oauth-java-client) from 1.33.2 to 1.33.3. - [Release notes](https://github.com/googleapis/google-oauth-java-client/releases) - [Changelog](https://github.com/googleapis/google-oauth-java-client/blob/main/CHANGELOG.md) - [Commits](https://github.com/googleapis/google-oauth-java-client/compare/v1.33.2...v1.33.3) --- updated-dependencies: - dependency-name: com.google.oauth-client:google-oauth-client dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Fixing precommit failures Signed-off-by: Vacha Shah Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Vacha Shah --- plugins/discovery-gce/build.gradle | 14 +++++++++++--- .../licenses/google-oauth-client-1.33.2.jar.sha1 | 1 - .../licenses/google-oauth-client-1.33.3.jar.sha1 | 1 + 3 files changed, 12 insertions(+), 4 deletions(-) delete mode 100644 plugins/discovery-gce/licenses/google-oauth-client-1.33.2.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/google-oauth-client-1.33.3.jar.sha1 diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index eb695f84b2bd0..beae0d84685a4 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -24,7 +24,7 @@ versions << [ dependencies { api "com.google.apis:google-api-services-compute:v1-rev160-${versions.google}" api "com.google.api-client:google-api-client:${versions.google}" - api "com.google.oauth-client:google-oauth-client:1.33.2" + api "com.google.oauth-client:google-oauth-client:1.33.3" api "com.google.http-client:google-http-client:${versions.google}" api "com.google.http-client:google-http-client-jackson2:${versions.google}" api 'com.google.code.findbugs:jsr305:3.0.2' @@ -58,13 +58,21 @@ test { thirdPartyAudit.ignoreMissingClasses( // classes are missing 'javax.jms.Message', - 'com.google.common.base.Splitter', - 'com.google.common.collect.Lists', 'javax.servlet.ServletContextEvent', 'javax.servlet.ServletContextListener', 'org.apache.avalon.framework.logger.Logger', 'org.apache.log.Hierarchy', 'org.apache.log.Logger', + 'com.google.api.client.json.gson.GsonFactory', + 'com.google.common.base.Preconditions', + 'com.google.common.base.Splitter', + 'com.google.common.cache.CacheBuilder', + 'com.google.common.cache.CacheLoader', + 'com.google.common.cache.LoadingCache', + 'com.google.common.collect.ImmutableMap', + 'com.google.common.collect.ImmutableMap$Builder', + 'com.google.common.collect.ImmutableSet', + 'com.google.common.collect.Lists', 'com.google.common.collect.Multiset', 'com.google.common.collect.SortedMultiset', 'com.google.common.collect.TreeMultiset', diff --git a/plugins/discovery-gce/licenses/google-oauth-client-1.33.2.jar.sha1 b/plugins/discovery-gce/licenses/google-oauth-client-1.33.2.jar.sha1 deleted file mode 100644 index 289e8e8261fd3..0000000000000 --- a/plugins/discovery-gce/licenses/google-oauth-client-1.33.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2810fb515fe110295dc6867fc9f70c401b66daf3 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-oauth-client-1.33.3.jar.sha1 b/plugins/discovery-gce/licenses/google-oauth-client-1.33.3.jar.sha1 new file mode 100644 index 0000000000000..f2afaa1bc2dba --- /dev/null +++ b/plugins/discovery-gce/licenses/google-oauth-client-1.33.3.jar.sha1 @@ -0,0 +1 @@ +9d445a8649b0de731922b9a3ebf1552b5403611d \ No newline at end of file From b2f2658f8d342340ce6047ccd3027aba04a88f07 Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Mon, 18 Apr 2022 15:50:21 -0700 Subject: [PATCH 094/653] Override toString for usable logs (#2895) Signed-off-by: Kunal Kotwani --- .../opensearch/client/cluster/ProxyModeInfo.java | 16 ++++++++++++++++ .../client/cluster/RemoteConnectionInfo.java | 16 ++++++++++++++++ .../opensearch/client/cluster/SniffModeInfo.java | 12 ++++++++++++ 3 files changed, 44 insertions(+) diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/cluster/ProxyModeInfo.java b/client/rest-high-level/src/main/java/org/opensearch/client/cluster/ProxyModeInfo.java index fe5b767a7f68f..1b6838d455182 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/cluster/ProxyModeInfo.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/cluster/ProxyModeInfo.java @@ -94,4 +94,20 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(address, serverName, maxSocketConnections, numSocketsConnected); } + + @Override + public String toString() { + return "ProxyModeInfo{" + + "address='" + + address + + '\'' + + ", serverName='" + + serverName + + '\'' + + ", maxSocketConnections=" + + maxSocketConnections + + ", numSocketsConnected=" + + numSocketsConnected + + '}'; + } } diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/cluster/RemoteConnectionInfo.java b/client/rest-high-level/src/main/java/org/opensearch/client/cluster/RemoteConnectionInfo.java index 4f91d32452d26..ee51a5e58f26e 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/cluster/RemoteConnectionInfo.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/cluster/RemoteConnectionInfo.java @@ -142,6 +142,22 @@ public int hashCode() { return Objects.hash(modeInfo, initialConnectionTimeoutString, clusterAlias, skipUnavailable); } + @Override + public String toString() { + return "RemoteConnectionInfo{" + + "modeInfo=" + + modeInfo + + ", initialConnectionTimeoutString='" + + initialConnectionTimeoutString + + '\'' + + ", clusterAlias='" + + clusterAlias + + '\'' + + ", skipUnavailable=" + + skipUnavailable + + '}'; + } + public interface ModeInfo { boolean isConnected(); diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/cluster/SniffModeInfo.java b/client/rest-high-level/src/main/java/org/opensearch/client/cluster/SniffModeInfo.java index 63c53ab4f69d5..96eddb72189b9 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/cluster/SniffModeInfo.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/cluster/SniffModeInfo.java @@ -86,4 +86,16 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(seedNodes, maxConnectionsPerCluster, numNodesConnected); } + + @Override + public String toString() { + return "SniffModeInfo{" + + "seedNodes=" + + seedNodes + + ", maxConnectionsPerCluster=" + + maxConnectionsPerCluster + + ", numNodesConnected=" + + numNodesConnected + + '}'; + } } From 8033183c469fbc2a97ee32095b3dcd46f3dfc983 Mon Sep 17 00:00:00 2001 From: Vacha Shah Date: Mon, 18 Apr 2022 16:14:35 -0700 Subject: [PATCH 095/653] Adding reta to codeowners (#2967) Signed-off-by: Vacha Shah --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 5dfa9099866d1..8b63b291a8a54 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,3 +1,3 @@ # This should match the owning team set up in https://github.com/orgs/opensearch-project/teams -* @opensearch-project/opensearch-core +* @opensearch-project/opensearch-core @reta From 8bfb082ee136aa65063d17ec1e41fb10ed759d1f Mon Sep 17 00:00:00 2001 From: Matt Weber Date: Mon, 18 Apr 2022 21:34:54 -0700 Subject: [PATCH 096/653] Use G1GC on JDK11+ (#2964) Update default jvm settings to use G1GC by default for JDK11 and greater. Signed-off-by: Matt Weber --- distribution/src/config/jvm.options | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/distribution/src/config/jvm.options b/distribution/src/config/jvm.options index a20baf1be0906..ef1035489c9fc 100644 --- a/distribution/src/config/jvm.options +++ b/distribution/src/config/jvm.options @@ -33,19 +33,19 @@ ################################################################ ## GC configuration -8-13:-XX:+UseConcMarkSweepGC -8-13:-XX:CMSInitiatingOccupancyFraction=75 -8-13:-XX:+UseCMSInitiatingOccupancyOnly +8-10:-XX:+UseConcMarkSweepGC +8-10:-XX:CMSInitiatingOccupancyFraction=75 +8-10:-XX:+UseCMSInitiatingOccupancyOnly ## G1GC Configuration # NOTE: G1 GC is only supported on JDK version 10 or later # to use G1GC, uncomment the next two lines and update the version on the # following three lines to your version of the JDK -# 10-13:-XX:-UseConcMarkSweepGC -# 10-13:-XX:-UseCMSInitiatingOccupancyOnly -14-:-XX:+UseG1GC -14-:-XX:G1ReservePercent=25 -14-:-XX:InitiatingHeapOccupancyPercent=30 +# 10:-XX:-UseConcMarkSweepGC +# 10:-XX:-UseCMSInitiatingOccupancyOnly +11-:-XX:+UseG1GC +11-:-XX:G1ReservePercent=25 +11-:-XX:InitiatingHeapOccupancyPercent=30 ## JVM temporary directory -Djava.io.tmpdir=${OPENSEARCH_TMPDIR} From d8c815c6be80d592cf22f4a1c5060c2ca1857c2a Mon Sep 17 00:00:00 2001 From: Yevhen Tienkaiev Date: Tue, 19 Apr 2022 07:38:54 +0300 Subject: [PATCH 097/653] Add `positive_score_impact` support for `rank_features` (#2725) Adds positive_score_impact support for rank_features field mapper. Signed-off-by: Yevhen Tienkaiev --- .../index/mapper/RankFeaturesFieldMapper.java | 41 ++++++++++++++++--- .../mapper/RankFeaturesFieldMapperTests.java | 31 +++++++++++++- .../mapper/RankFeaturesFieldTypeTests.java | 2 +- .../test/rank_feature/10_basic.yml | 21 ++++++++++ .../test/rank_features/10_basic.yml | 40 ++++++++++++++++++ 5 files changed, 126 insertions(+), 9 deletions(-) diff --git a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/RankFeaturesFieldMapper.java b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/RankFeaturesFieldMapper.java index 43853eb40f432..21a0acd508a39 100644 --- a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/RankFeaturesFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/RankFeaturesFieldMapper.java @@ -42,7 +42,6 @@ import org.opensearch.search.lookup.SearchLookup; import java.io.IOException; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.function.Supplier; @@ -55,8 +54,18 @@ public class RankFeaturesFieldMapper extends ParametrizedFieldMapper { public static final String CONTENT_TYPE = "rank_features"; + private static RankFeaturesFieldType ft(FieldMapper in) { + return ((RankFeaturesFieldMapper) in).fieldType(); + } + public static class Builder extends ParametrizedFieldMapper.Builder { + private final Parameter positiveScoreImpact = Parameter.boolParam( + "positive_score_impact", + false, + m -> ft(m).positiveScoreImpact, + true + ); private final Parameter> meta = Parameter.metaParam(); public Builder(String name) { @@ -66,16 +75,17 @@ public Builder(String name) { @Override protected List> getParameters() { - return Collections.singletonList(meta); + return List.of(meta, positiveScoreImpact); } @Override public RankFeaturesFieldMapper build(BuilderContext context) { return new RankFeaturesFieldMapper( name, - new RankFeaturesFieldType(buildFullName(context), meta.getValue()), + new RankFeaturesFieldType(buildFullName(context), meta.getValue(), positiveScoreImpact.getValue()), multiFieldsBuilder.build(this, context), - copyTo.build() + copyTo.build(), + positiveScoreImpact.getValue() ); } } @@ -84,9 +94,12 @@ public RankFeaturesFieldMapper build(BuilderContext context) { public static final class RankFeaturesFieldType extends MappedFieldType { - public RankFeaturesFieldType(String name, Map meta) { + private final boolean positiveScoreImpact; + + public RankFeaturesFieldType(String name, Map meta, boolean positiveScoreImpact) { super(name, false, false, false, TextSearchInfo.NONE, meta); setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); + this.positiveScoreImpact = positiveScoreImpact; } @Override @@ -94,6 +107,10 @@ public String typeName() { return CONTENT_TYPE; } + public boolean positiveScoreImpact() { + return positiveScoreImpact; + } + @Override public Query existsQuery(QueryShardContext context) { throw new IllegalArgumentException("[rank_features] fields do not support [exists] queries"); @@ -115,9 +132,18 @@ public Query termQuery(Object value, QueryShardContext context) { } } - private RankFeaturesFieldMapper(String simpleName, MappedFieldType mappedFieldType, MultiFields multiFields, CopyTo copyTo) { + private final boolean positiveScoreImpact; + + private RankFeaturesFieldMapper( + String simpleName, + MappedFieldType mappedFieldType, + MultiFields multiFields, + CopyTo copyTo, + boolean positiveScoreImpact + ) { super(simpleName, mappedFieldType, multiFields, copyTo); assert fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) <= 0; + this.positiveScoreImpact = positiveScoreImpact; } @Override @@ -164,6 +190,9 @@ public void parse(ParseContext context) throws IOException { + "] in the same document" ); } + if (positiveScoreImpact == false) { + value = 1 / value; + } context.doc().addWithKey(key, new FeatureField(name(), feature, value)); } else { throw new IllegalArgumentException( diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeaturesFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeaturesFieldMapperTests.java index 129ba6b126237..55d825d1b53bb 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeaturesFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeaturesFieldMapperTests.java @@ -67,8 +67,8 @@ protected void minimalMapping(XContentBuilder b) throws IOException { } @Override - protected void registerParameters(ParameterChecker checker) { - // no parameters to configure + protected void registerParameters(ParameterChecker checker) throws IOException { + checker.registerConflictCheck("positive_score_impact", b -> b.field("positive_score_impact", false)); } @Override @@ -95,6 +95,33 @@ public void testDefaults() throws Exception { assertTrue(freq1 < freq2); } + public void testNegativeScoreImpact() throws Exception { + DocumentMapper mapper = createDocumentMapper( + fieldMapping(b -> b.field("type", "rank_features").field("positive_score_impact", false)) + ); + + ParsedDocument doc1 = mapper.parse(source(this::writeField)); + + IndexableField[] fields = doc1.rootDoc().getFields("field"); + assertEquals(2, fields.length); + assertThat(fields[0], Matchers.instanceOf(FeatureField.class)); + FeatureField featureField1 = null; + FeatureField featureField2 = null; + for (IndexableField field : fields) { + if (field.stringValue().equals("foo")) { + featureField1 = (FeatureField) field; + } else if (field.stringValue().equals("bar")) { + featureField2 = (FeatureField) field; + } else { + throw new UnsupportedOperationException(); + } + } + + int freq1 = RankFeatureFieldMapperTests.getFrequency(featureField1.tokenStream(null, null)); + int freq2 = RankFeatureFieldMapperTests.getFrequency(featureField2.tokenStream(null, null)); + assertTrue(freq1 > freq2); + } + public void testRejectMultiValuedFields() throws MapperParsingException, IOException { DocumentMapper mapper = createDocumentMapper(mapping(b -> { b.startObject("field").field("type", "rank_features").endObject(); diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeaturesFieldTypeTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeaturesFieldTypeTests.java index b8c653bc97ce7..8ece0d63f05ba 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeaturesFieldTypeTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeaturesFieldTypeTests.java @@ -37,7 +37,7 @@ public class RankFeaturesFieldTypeTests extends FieldTypeTestCase { public void testIsNotAggregatable() { - MappedFieldType fieldType = new RankFeaturesFieldMapper.RankFeaturesFieldType("field", Collections.emptyMap()); + MappedFieldType fieldType = new RankFeaturesFieldMapper.RankFeaturesFieldType("field", Collections.emptyMap(), true); assertFalse(fieldType.isAggregatable()); } } diff --git a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_feature/10_basic.yml b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_feature/10_basic.yml index 6fea35eb21f4e..ac951263ca299 100644 --- a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_feature/10_basic.yml +++ b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_feature/10_basic.yml @@ -157,3 +157,24 @@ setup: - match: hits.hits.1._id: "1" + +--- +"Negative linear": + + - do: + search: + index: test + body: + query: + rank_feature: + field: url_length + linear: {} + + - match: + hits.total.value: 2 + + - match: + hits.hits.0._id: "2" + + - match: + hits.hits.1._id: "1" diff --git a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_features/10_basic.yml b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_features/10_basic.yml index d4d5d2a360406..2644b9e777f6a 100644 --- a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_features/10_basic.yml +++ b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_features/10_basic.yml @@ -9,6 +9,9 @@ setup: properties: tags: type: rank_features + negative_reviews: + type: rank_features + positive_score_impact: false - do: index: @@ -18,6 +21,9 @@ setup: tags: foo: 3 bar: 5 + negative_reviews: + 1star: 10 + 2star: 1 - do: index: @@ -27,6 +33,9 @@ setup: tags: bar: 6 quux: 10 + negative_reviews: + 1star: 1 + 2star: 10 - do: indices.refresh: {} @@ -97,3 +106,34 @@ setup: - match: hits.hits.1._id: "1" + +--- +"Linear negative impact": + + - do: + search: + index: test + body: + query: + rank_feature: + field: negative_reviews.1star + linear: {} + + - match: + hits.hits.0._id: "2" + - match: + hits.hits.1._id: "1" + + - do: + search: + index: test + body: + query: + rank_feature: + field: negative_reviews.2star + linear: {} + + - match: + hits.hits.0._id: "1" + - match: + hits.hits.1._id: "2" From 8c9078d134d6d4e9ec0d14d41c3565fc45ae7e33 Mon Sep 17 00:00:00 2001 From: Vacha Shah Date: Tue, 19 Apr 2022 09:19:59 -0700 Subject: [PATCH 098/653] Adding workflow to create documentation related issues in documentation-website repo (#2929) Signed-off-by: Vacha Shah --- .ci/documentation/issue.md | 11 +++++ .../workflows/create-documentation-issue.yml | 41 +++++++++++++++++++ 2 files changed, 52 insertions(+) create mode 100644 .ci/documentation/issue.md create mode 100644 .github/workflows/create-documentation-issue.yml diff --git a/.ci/documentation/issue.md b/.ci/documentation/issue.md new file mode 100644 index 0000000000000..c34905605b2f6 --- /dev/null +++ b/.ci/documentation/issue.md @@ -0,0 +1,11 @@ +**Is your feature request related to a problem?** +A new feature has been added. + +**What solution would you like?** +Document the usage of the new feature. + +**What alternatives have you considered?** +N/A + +**Do you have any additional context?** +See please diff --git a/.github/workflows/create-documentation-issue.yml b/.github/workflows/create-documentation-issue.yml new file mode 100644 index 0000000000000..c81f7355a0d22 --- /dev/null +++ b/.github/workflows/create-documentation-issue.yml @@ -0,0 +1,41 @@ +name: Create Documentation Issue +on: + pull_request: + types: + - labeled +env: + PR_NUMBER: ${{ github.event.number }} + +jobs: + create-issue: + if: ${{ github.event.label.name == 'needs-documentation' }} + runs-on: ubuntu-latest + name: Create Documentation Issue + steps: + - name: GitHub App token + id: github_app_token + uses: tibdex/github-app-token@v1.5.0 + with: + app_id: ${{ secrets.APP_ID }} + private_key: ${{ secrets.APP_PRIVATE_KEY }} + installation_id: 22958780 + + - name: Checkout code + uses: actions/checkout@v2 + + - name: Edit the issue template + run: | + echo "https://github.com/opensearch-project/OpenSearch/pull/${{ env.PR_NUMBER }}." >> ./ci/documentation/issue.md + + - name: Create Issue From File + id: create-issue + uses: peter-evans/create-issue-from-file@v4 + with: + title: Add documentation related to new feature + content-filepath: ./ci/documentation/issue.md + labels: documentation + repository: opensearch-project/documentation-website + token: ${{ steps.github_app_token.outputs.token }} + + - name: Print Issue + run: echo Created related documentation issue ${{ steps.create-issue.outputs.issue-number }} From 896b97e54df4e192a15994a1076a9d1d04312f74 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 19 Apr 2022 12:34:42 -0400 Subject: [PATCH 099/653] Remove binary (class) files from the repository (#2969) Signed-off-by: Andriy Redko --- .../missingdoclet/MissingDoclet.class | Bin 14156 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 doc-tools/missing-doclet/bin/main/org/opensearch/missingdoclet/MissingDoclet.class diff --git a/doc-tools/missing-doclet/bin/main/org/opensearch/missingdoclet/MissingDoclet.class b/doc-tools/missing-doclet/bin/main/org/opensearch/missingdoclet/MissingDoclet.class deleted file mode 100644 index 8729def18e4b3eda7184b1e3e49f1bf055d152db..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 14156 zcmbta34B!5)j#KEd6UTlhOlT*aeyetJXqnwXLEAqD9eG+uHWi?weigVi&vE(nYJ4?|<%lZKq+q?oGXiS2dq0V|fYLW!QhZ13X7?aV_*tEX!q827@Wt!3yk0mi8wJ{VOvnnHFkALg_(-mTe{ZHS zRvL5x_&zff!%Vu6Rsr9kaJVrFW-hPH!cGQ}4ON#Iv>NA}{4pz*c&U+^gqGhVYBfVEL#m#T_Ss2VNw~*D5WcDt3e^Aipk71sfR$kQZT%$si2v_ z=o9KP*>0OjGijD=*K1OQu7XHn5l2-xDsH|bF%u{`St@GMY&uDnikTD_g&+dh8taJ; zhOMxx4~6A+n3N#MvnK+59!kZb(`V;MWvkQPP)dUYQw3dKmcg&vYtk;-4VuQRU7+bq zcZF=$HRzo{giOr=-cgBl4p3e|e~n51MgI+Z)6;A9^g;94EHcrw1`(h^y3VBQ>D^%3 z-cT~2L4i8G1a)T@rumvzCtL~|1BSGxuRS#2=)6e*&{|3#pqmW35r%8bDYfNt@ov%w z>1J3jki;<^!EH?KlR4%1&^s$Rdw^OWrjHnO3)8|ez&OaL%bjA{d z7O}#CP(K7}UOuLC7m*>@?RL7uppU^J7&`l|E(CX>=bLv$E<^5NHqWH|YQkfg^BC z+x%8ebUFu}GP1!-pP>f~`m88||AUKVyh#tz=R|)d!jV`AmIkcYYS0(p3ynLyJsuv6 zTF$1W^m%&Nq=UlHC6VNYSPU2p%c&kU>5vGBsRg7d-Y-s&bQ~5R;eB$>5tAOH$8EW9 zf!xQF8My}zT>;g6*%nErK*FQ6!=NYN)o4VfdrENZV3?7e zBgwAau~e^>ioly3>~Gg@*CZ7K?Dz{zCynNg)q{~}*h+Zl+f1jn`orDR0E-;EmaPIEM@%=_ zvmGcJJoFQw#J}50Etb`ODy#i$)FSSFWBA`)L}uP0czemDU(&Czpp{67Mn88Fs%6Zo zP^nDbvIdV$vT)bc?MWP$WqY z{TX7QyMx<=H(xdBuk^QU-h@N#>;mDMggj~v`@2d15WXoCz7bH<0Eyvy&7{|bcf66L zYxlhLhG?lbb99C6Sb6C!z(^;+3lrIClbI12O~>(J+#SJ_XoP9~Byc>&2?)n~QkX3< zP1a%!rCi9x1{ZdMwB^mzeC;CNYw1k(dDd!KTSm z1UEBbCR<~@RszO9ybh9$<&z$sh8__Ysrf?*%kRXZvfOl&%XkKq8TJx+XwBwn$4NDK z7Hr`7kPE`VCw!90Co>`s1My@u)B_~Wu54<`UeIBA8GMS#r;6<;MEKQT!gJYg@aZlk zZDpnXKa=MXEPi2UFbExTH!m=`g3o{+O2ymaS6hjuP!b!R=}2eR#D;M;F7dELTU#Nu zecF8iTKqViqjS01|44mz%L2+XiDgB(3W}ydCKYrQjy;`MlC##ALQQ*4WQk*5nI$ z6^wo;w!1x)>gg5dW8yX8s`F}YHn<7!%I>@BQt@~+S=Ss1A=XdAWK?K#%q_eIpaz27 zEjn*8{?G;wuSEdoh=5;*(*CW3DgUbAPH5Cf*z$+1o+uIGE{o)a8G(?7QX33Ap<&s%4A)(@N8~s|RT~ZNKt5^w zsk^M6!IXsbj?OKEO6Wu!z+FtSw-vTd0N?0Ma>PrA4FCWfu>oEO!_wG6g+8g<>*b62 z5`#A*9W&-Insprb*F>TaIFm2s%it;5IT`4+jFjiB>H=bmzIJ^$NUs{aWg^F}8{w|A z9sGO_nY@*Iuv^^W2#EW2E-mwsYYaNx4(i4qn*|nf10IHcSU#3-ZIt-au)?>A!<$NE zaW9-n7z3BTaTbW+Xy3}oe5y2SX5Wnsw zMG&h!dnR-=vyaTQBG1Zpwr;{0PqKHj(-5wSCmb`_90yS^n23(@9+U#t4-OunAq4KnwKK;{*B_fJn4e|s;#hu>%L zdma65AHadoN!Y=Fnf!kK0FDbY51EGlkptl9;Tpc1O#UDv5I1`4{3+8}*)WcolkFfl zu1*NlbX&N zcG2%KgFLm_`ZCJm7I|#@8<*X9-3QltI^ql>}8%S zft(o)2Hy`D^26Hf82N6Tuat-QvnD?vaZ5=k>6!wN@+1fcx><$mVgacI7`@Q@t{*A%Eb}w!8{LVqp z3kgAfnSTo!!;4BH7=dH%s_pDi6&vmD1~31fUorR(V1J>8?NB&Nxr0ppqt3f+v-Dja zGEDqulZxmmi4$Hm`L7ZN6t#4A20O)NKQ12m-_!RH<3ta6{cOP9ZzYqs`a^iRZ7>>b zcWaJt=D`%Q^ZdFbCFcT3{Dz6t3Gxx(>`TWdnZsAMF2t<2Og;Wdp-dgW=R@=5E|wqR3ZTh69srkWvL z#qGi74K^IJOf_4Yih|v1TO=!}PBsav%GVrIouY?#wloDhB|)f8Gu2#a^4U#D5w>>( z11$j!kKa`Dq;q;_OIL71Cm_u}@d8s-NN*`l(cRjVKE;`)S}5&P1#er@M>kXz)0_-z z1>#+UJ-urpRy5oqVX{GUVP2x4{;lCq#aT>?ZCS}HXrn`H_~)f+u{z68bs+e-%4GC3 zQ!P#lk^;;yiGu~&_wP~(HpcI0m54`L)Rhw!DYa_aIn~*Zk*51xcJGfel zw;@q0S7cd4l^RI4!V5}!gM$<6cWQ#n3+_Kr@I zPhBAkhD_C@nlbrGpK3Ij5QP|LaRRJyv zAcfa%$?K0K%_GP?-u##}2Zpm4HA9{Rkq7mXthyKV(yY3SpORIdidlHGf?tx`dR^j1 zU6;7()+O$#b&1y~*k=yD6`g`#yh+K_ee1+xqIEjjPA5Np=b_e#v5fNM>ccd@y7&-P zR3D);4^gdMEPfb$iq69CY@CB=ITg}MoS+HNG-Kc8v;?iDy#_6%WvH<{9>FXdlI41z zyD(3oZ}sDJ&gR3&In-do^G2va(WaWmsc|!^O(V3%uC9}6Te{jV)u2;-2&)umh~{X# ztOZV6fsu7IA2;Yra6P(;j%cW6+dDc?d8nQ`G)CUxKy0^k0$!H)5BRk)QeacAy7mxl zsII9!Oq()Tn5z4Z0vAt!Zcplc{0^n<(+f5>(q~&@%ixx0aLQ+K>aWnbC|A+fvY=e;U<3Qh%M8G@6tixi_W-K*V!fH9PEaKk8iWgx zE3xb#(Ny%|PSk<_I@E--tEwN>puS%L)DH=~w;`5B*YDET2g!-8H z0@(IjAlA33g1(ytT2&gfDh&{B3;@n2=#vg;9^yBt!ocmAlE)1;IN+%wS(WD!e<1@B?bL_=IYyr!p@)EedZfB$m>wCT z2b}UgDTfR1rG2^qpd1mLd{NVIb%usO4q!Y;PwuCM?)TIC=@j?-nf)}){r>6!^5oF` z@gZu^`xM0y)Hw5X096i6e>|2NFJ1!!=U-si+dU2S3K1{z59=7FXqt+Wg zqcFWh1GwfKq+dgFUZ!hsGkGoj32!j|Odq1Z&=BqzAECd2%a7C7fuJAYvhYQ;{|48B zzoQfMI&k|IF7p_Xs-KH9q~v)nDe)z|#aL@Oc5%6EJ(avc;TgsW=qQ`j_Smd;g3far z4BDf)?*zy@YD_7?^#*_Lf!>-6ZomyUwh-aH#bM56z`$=@o~$kV-7x)OADOjff5OLJ znvLGSNUx^E@dH#;_+UYXO|_=usbumrIIBLY|u!1Ngy_t?v+V3Gk7Y$F zsmxl4St6Ik_fiSe+b~yUSfdjXM|>Jh;kh)G7vT66bUL4j$C3-Fo~!VjubP^;me%uP zyoRfzEBP$yZey+&KLeeN{cZgJ37iP?*=mckRN zX_)^>U`W)^R5GUe-lRDO7ka!)rywJX+g1mQuVO7P#vH8~VFVB(43G0M?1wK{lM7DW zOS5pi4F|aqhrMWoTfsjM);z_*y>wH}F%AZ*7d^(X0riFDg(KX}R9`$q+slhxHCta) zUZkb>2yfB?T<;m83(7rqKL_iLMI(GUG%{aNUxJ*Kx{o^Oo{f>)cstO&0vFo7909r;aj$t5 z_W`M!>0*vTMvG`SE#`jo8ko0$V;qOY2gbCWx5p1)yb!8~ir)m7C&*8REt70;&;U%1 z@SaX3Iqi;E>)!KDVBP%FN~pk@hE0^@_W-nZC|H~;!eI-r1<}nQ=@sn|0oX`~6|NRm z*rmV}?~<=;6;$yJBYY#IgN_D3JTQz9-~Ot8j6Wp9J|e?zt1rY-cgj+CK}++!2dLI= zIL1RG{25`E^1|}sBmAHqTwYjRUOdbXITi6uY^v6Sg^5E{#9Kkx9`bP*RJEvrw?Ut5 zrL(vfRE>FFeLaNBBv>;+JGF9E1AeMZ*2! z-^mm$1<#5!j__Bq)@wTs!rqN%g*VWh{2n@l`jh;DEO|PXrp7Tx zo`54qeNBT`@GbaV1yn~1`P=**?A%Tz{9UNDVi@ zQSk{{K*cumL;V?e$ft#6sR)Z>2%UD=;Ie!}-=Ibr>_`v>V__xlnH&P&20*0kxHt@l z`3Hd<-{}GL5cT7~82_;G)Zlzygi;to%2KB^ol<N9>CkO2k9mL9BwndkY(bIr+MUfnn#ZFKeT7y zp;!2y{4a;ni0a;?4uhUAJ;VD3b@_ZA1Zel!6jIk<5HQc@9^}_HKTCxV;mrSj;)~w( z)d%@MaBz*_5EX8!KB&ao_04i?1*jD}6?$5_Ot*F%wIa9jRq1jEsv@a*-M(j}mR{c@ zwW&^@Dn-rXR^WZy?<;YqJt?(Pw{N=C%G?@UlKXu=r^fKX?k{w^PLi(llI2pH;r5*> zwV7`1bg4~uYxAWx#jTwowP|*3^CSG)L-25Ykj7J0C6Kz1s!`S;Dm@qF3Pe6FDA!Ps zI#G7f<#Z*=5JH3)$~dgwyHH+3@1q-0-bA<29VqXlPtksq2k0Ss1m&ZM0G~qnG<}1f zNBIK%h+ahbQ+UyTK=}&1<=0TY4)?8CRl+Pz=b0#H;my^2lnbCa&O*P}Xoz zWYGn?sAjF9s!vFmhEjeisZD;6;2QI#J2F3+A(^&Ob4PV#sy?q|ifmSdC9yLEv#Lkb zB4}d;>p4T6J*>`wFP(9k^#S0*ECJwlI$)NnSE4ywMMvbc_>?4_oaO7Wyd2nv_*c9t z7}F88l7VD5@nEmKK5J#4Eo1-_i7IEE%?_Al)fv-m6hca}TrGR0aMns?s!^@ZfzfRd z0u+(*tzvA0l$Nv28kC|Ez16NmkjY2b%UfB?d&eyAwiK!stfJH!CCc<{DpCO|P(jtH zy7X7K+N3VlUz^os>T>Qnvj09LD*8dz1X ecBrHpEG~hEXaB8p%67X{J5hSTe~rkUwEr(LO30D` From ede26d945f06522947e1077c8d0b30c1537ae688 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 19 Apr 2022 14:16:13 -0400 Subject: [PATCH 100/653] Add Gradle 7.4 Aggregated Test Reports (#2821) Signed-off-by: Andriy Redko --- build.gradle | 24 +++++++++++ gradle/code-coverage.gradle | 83 +++++++------------------------------ 2 files changed, 38 insertions(+), 69 deletions(-) diff --git a/build.gradle b/build.gradle index 7949872a85b86..ae2247bb865d1 100644 --- a/build.gradle +++ b/build.gradle @@ -50,6 +50,8 @@ plugins { id 'opensearch.global-build-info' id "com.diffplug.spotless" version "6.4.2" apply false id "org.gradle.test-retry" version "1.3.2" apply false + id "test-report-aggregation" + id 'jacoco-report-aggregation' } apply from: 'gradle/build-complete.gradle' @@ -385,6 +387,15 @@ gradle.projectsEvaluated { } } } + + dependencies { + subprojects.findAll { it.pluginManager.hasPlugin('java') }.forEach { + testReportAggregation it + } + subprojects.findAll { it.pluginManager.hasPlugin('jacoco') }.forEach { + jacocoAggregation it + } + } } // test retry configuration @@ -402,6 +413,7 @@ subprojects { // eclipse configuration allprojects { apply plugin: 'eclipse' + // Name all the non-root projects after their path so that paths get grouped together when imported into eclipse. if (path != ':') { eclipse.project.name = path @@ -558,3 +570,15 @@ subprojects { } } } + +reporting { + reports { + testAggregateTestReport(AggregateTestReport) { + testType = TestSuiteType.UNIT_TEST + } + } +} + +tasks.named(JavaBasePlugin.CHECK_TASK_NAME) { + dependsOn tasks.named('testAggregateTestReport', TestReport) +} diff --git a/gradle/code-coverage.gradle b/gradle/code-coverage.gradle index de041eae7b72d..61719282c1ca2 100644 --- a/gradle/code-coverage.gradle +++ b/gradle/code-coverage.gradle @@ -10,92 +10,37 @@ apply plugin: 'jacoco' repositories { mavenCentral() + gradlePluginPortal() } allprojects { plugins.withId('jacoco') { - // The default JaCoCo version in Gradle 6.6.1 is 0.8.5, but at least version 0.8.6 officially supports Java 14 - jacoco.toolVersion = '0.8.7' + jacoco.toolVersion = '0.8.8' } } -def codeCoverageReportTask = tasks.register("codeCoverageReport", JacocoReport) { - description = 'Generates aggregate report from all subprojects.' - executionData.setFrom fileTree(dir: '.', include: '**/build/jacoco/*.exec') - dependsOn subprojects.findAll(s -> s.tasks.findByName('check') != null).check -} - -tasks.register("codeCoverageReportForUnitTest", JacocoReport) { - description = 'Generates aggregate report from all subprojects for unit test.' - executionData.setFrom fileTree(dir: '.', include: '**/build/jacoco/test.exec') -} - -tasks.register("codeCoverageReportForIntegrationTest", JacocoReport) { - description = 'Generates aggregate report from all subprojects for integration test.' - // These kinds of tests are integration test, and the tests can be ran by Gradle tasks with the same name - def integrationTestExecPathList = ['**/build/jacoco/integTest.exec', - '**/build/jacoco/internalClusterTest.exec', - '**/build/jacoco/javaRestTest.exec', - '**/build/jacoco/yamlRestTest.exec' ] - executionData.setFrom fileTree(dir: '.', include: integrationTestExecPathList) -} - tasks.withType(JacocoReport).configureEach { group = JavaBasePlugin.VERIFICATION_GROUP - // Select projects with corresponding tests in order to run proper tests and select proper classes to generate the report - def projectsWithJavaPlugin = subprojects.findAll { it.pluginManager.hasPlugin('java') } - def projectsWithUnitTest = projectsWithJavaPlugin.findAll { it.tasks.findByName('test').enabled } - def projectsWithIntegTest = projectsWithJavaPlugin.findAll {it.tasks.findByName('integTest')} - def projectsWithAsyncIntegTest = projectsWithJavaPlugin.findAll {it.tasks.findByName('asyncIntegTest')} - def projectsWithInternalClusterTest = projectsWithJavaPlugin.findAll {it.tasks.findByName('internalClusterTest')} - def projectsWithPooledInternalClusterTest = projectsWithJavaPlugin.findAll {it.tasks.findByName('pooledInternalClusterTest')} - def projectsWithJavaRestTest = projectsWithJavaPlugin.findAll {it.tasks.findByName('javaRestTest')} - def projectsWithYamlRestTest = projectsWithJavaPlugin.findAll {it.tasks.findByName('yamlRestTest')} - def projectsWithIntegrationTest = projectsWithIntegTest + projectsWithAsyncIntegTest + projectsWithInternalClusterTest + projectsWithPooledInternalClusterTest + projectsWithJavaRestTest + projectsWithYamlRestTest - def projectsWithTest = projectsWithUnitTest + projectsWithIntegrationTest - - def selectedProjects - switch (name) { - case "codeCoverageReportForUnitTest": - dependsOn projectsWithUnitTest.test - selectedProjects = projectsWithUnitTest - break - case "codeCoverageReportForIntegrationTest": - dependsOn projectsWithIntegTest.integTest - dependsOn projectsWithAsyncIntegTest.asyncIntegTest - dependsOn projectsWithInternalClusterTest.internalClusterTest - dependsOn projectsWithPooledInternalClusterTest.pooledInternalClusterTest - dependsOn projectsWithJavaRestTest.javaRestTest - dependsOn projectsWithYamlRestTest.yamlRestTest - selectedProjects = projectsWithIntegrationTest - break - default: - dependsOn projectsWithUnitTest.test - dependsOn projectsWithIntegTest.integTest - dependsOn projectsWithAsyncIntegTest.asyncIntegTest - dependsOn projectsWithInternalClusterTest.internalClusterTest - dependsOn projectsWithPooledInternalClusterTest.pooledInternalClusterTest - dependsOn projectsWithJavaRestTest.javaRestTest - dependsOn projectsWithYamlRestTest.yamlRestTest - selectedProjects = projectsWithJavaPlugin - break - } - - sourceDirectories.setFrom files(selectedProjects.sourceSets.main.allSource.srcDirs) - classDirectories.setFrom files(selectedProjects.sourceSets.main.output) - reports { // Code coverage report in HTML and CSV formats are on demand, in case they take extra disk space. - xml.getRequired().set(System.getProperty('tests.coverage.report.xml', 'true').toBoolean()) - html.getRequired().set(System.getProperty('tests.coverage.report.html', 'false').toBoolean()) - csv.getRequired().set(System.getProperty('tests.coverage.report.csv', 'false').toBoolean()) + xml.required = System.getProperty('tests.coverage.report.xml', 'true').toBoolean() + html.required = System.getProperty('tests.coverage.report.html', 'false').toBoolean() + csv.required = System.getProperty('tests.coverage.report.csv', 'false').toBoolean() } } if (System.getProperty("tests.coverage")) { + reporting { + reports { + testCodeCoverageReport(JacocoCoverageReport) { + testType = TestSuiteType.UNIT_TEST + } + } + } + // Attach code coverage report task to Gradle check task project.getTasks().named(JavaBasePlugin.CHECK_TASK_NAME).configure { - dependsOn codeCoverageReportTask + dependsOn tasks.named('testCodeCoverageReport', JacocoReport) } } From 18f4495f4d0ad5f6c6f0fa68447f43fb7544514d Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Tue, 19 Apr 2022 11:52:02 -0700 Subject: [PATCH 101/653] Replace parameter 'master_timeout' with 'cluster_manager_tiemout' in RequestConverters of High-Level-Rest-Client (#2683) The REST API request parameter "master_timeout" will be deprecated in server version 3.0, and removed in 4.0. The alternative parameter "cluster_manager_timeout" is added in server 2.0 . With the change in this commit, High-Level-Rest-Client will not be compatible with OpenSearch server 1.x (and below). - Use parameter `cluster_manager_timeout` instead of `master_timeout` in High-Level-Rest-Client `RequestConverters` class for building REST requests - Modify corresponding unit tests - Change lots of "master timeout" in internal method and class names. Signed-off-by: Tianli Feng --- .../client/ClusterRequestConverters.java | 14 +++--- .../client/IndicesRequestConverters.java | 46 +++++++++---------- .../client/IngestRequestConverters.java | 6 +-- .../opensearch/client/RequestConverters.java | 14 ++++-- .../client/SnapshotRequestConverters.java | 22 ++++----- .../client/ClusterRequestConvertersTests.java | 14 +++--- .../client/IndicesRequestConvertersTests.java | 34 +++++++------- .../client/IngestRequestConvertersTests.java | 6 +-- .../client/RequestConvertersTests.java | 34 +++++++------- .../SnapshotRequestConvertersTests.java | 20 ++++---- 10 files changed, 109 insertions(+), 101 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/ClusterRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/ClusterRequestConverters.java index 1cf52ac4169ba..da90521512dea 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/ClusterRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/ClusterRequestConverters.java @@ -58,7 +58,7 @@ static Request clusterPutSettings(ClusterUpdateSettingsRequest clusterUpdateSett RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(clusterUpdateSettingsRequest.timeout()); - parameters.withMasterTimeout(clusterUpdateSettingsRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(clusterUpdateSettingsRequest.masterNodeTimeout()); request.addParameters(parameters.asMap()); request.setEntity(RequestConverters.createEntity(clusterUpdateSettingsRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); return request; @@ -69,7 +69,7 @@ static Request clusterGetSettings(ClusterGetSettingsRequest clusterGetSettingsRe RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withLocal(clusterGetSettingsRequest.local()); parameters.withIncludeDefaults(clusterGetSettingsRequest.includeDefaults()); - parameters.withMasterTimeout(clusterGetSettingsRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(clusterGetSettingsRequest.masterNodeTimeout()); request.addParameters(parameters.asMap()); return request; } @@ -88,7 +88,7 @@ static Request clusterHealth(ClusterHealthRequest healthRequest) { .withWaitForNodes(healthRequest.waitForNodes()) .withWaitForEvents(healthRequest.waitForEvents()) .withTimeout(healthRequest.timeout()) - .withMasterTimeout(healthRequest.masterNodeTimeout()) + .withClusterManagerTimeout(healthRequest.masterNodeTimeout()) .withLocal(healthRequest.local()) .withLevel(healthRequest.level()); request.addParameters(params.asMap()); @@ -105,7 +105,7 @@ static Request putComponentTemplate(PutComponentTemplateRequest putComponentTemp .build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); RequestConverters.Params params = new RequestConverters.Params(); - params.withMasterTimeout(putComponentTemplateRequest.masterNodeTimeout()); + params.withClusterManagerTimeout(putComponentTemplateRequest.masterNodeTimeout()); if (putComponentTemplateRequest.create()) { params.putParam("create", Boolean.TRUE.toString()); } @@ -124,7 +124,7 @@ static Request getComponentTemplates(GetComponentTemplatesRequest getComponentTe final Request request = new Request(HttpGet.METHOD_NAME, endpoint); final RequestConverters.Params params = new RequestConverters.Params(); params.withLocal(getComponentTemplatesRequest.isLocal()); - params.withMasterTimeout(getComponentTemplatesRequest.getMasterNodeTimeout()); + params.withClusterManagerTimeout(getComponentTemplatesRequest.getMasterNodeTimeout()); request.addParameters(params.asMap()); return request; } @@ -136,7 +136,7 @@ static Request componentTemplatesExist(ComponentTemplatesExistRequest componentT final Request request = new Request(HttpHead.METHOD_NAME, endpoint); final RequestConverters.Params params = new RequestConverters.Params(); params.withLocal(componentTemplatesRequest.isLocal()); - params.withMasterTimeout(componentTemplatesRequest.getMasterNodeTimeout()); + params.withClusterManagerTimeout(componentTemplatesRequest.getMasterNodeTimeout()); request.addParameters(params.asMap()); return request; } @@ -146,7 +146,7 @@ static Request deleteComponentTemplate(DeleteComponentTemplateRequest deleteComp String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_component_template").addPathPart(name).build(); Request request = new Request(HttpDelete.METHOD_NAME, endpoint); RequestConverters.Params params = new RequestConverters.Params(); - params.withMasterTimeout(deleteComponentTemplateRequest.masterNodeTimeout()); + params.withClusterManagerTimeout(deleteComponentTemplateRequest.masterNodeTimeout()); request.addParameters(params.asMap()); return request; } diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java index c50ea58982e4e..4bd2f57e6b998 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java @@ -119,7 +119,7 @@ static Request deleteIndex(DeleteIndexRequest deleteIndexRequest) { RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(deleteIndexRequest.timeout()); - parameters.withMasterTimeout(deleteIndexRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(deleteIndexRequest.masterNodeTimeout()); parameters.withIndicesOptions(deleteIndexRequest.indicesOptions()); request.addParameters(parameters.asMap()); return request; @@ -131,7 +131,7 @@ static Request openIndex(OpenIndexRequest openIndexRequest) { RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(openIndexRequest.timeout()); - parameters.withMasterTimeout(openIndexRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(openIndexRequest.masterNodeTimeout()); parameters.withWaitForActiveShards(openIndexRequest.waitForActiveShards()); parameters.withIndicesOptions(openIndexRequest.indicesOptions()); request.addParameters(parameters.asMap()); @@ -144,7 +144,7 @@ static Request closeIndex(CloseIndexRequest closeIndexRequest) { RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(closeIndexRequest.timeout()); - parameters.withMasterTimeout(closeIndexRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(closeIndexRequest.masterNodeTimeout()); parameters.withIndicesOptions(closeIndexRequest.indicesOptions()); request.addParameters(parameters.asMap()); return request; @@ -156,7 +156,7 @@ static Request createIndex(CreateIndexRequest createIndexRequest) throws IOExcep RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(createIndexRequest.timeout()); - parameters.withMasterTimeout(createIndexRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(createIndexRequest.masterNodeTimeout()); parameters.withWaitForActiveShards(createIndexRequest.waitForActiveShards()); request.addParameters(parameters.asMap()); request.setEntity(RequestConverters.createEntity(createIndexRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); @@ -168,7 +168,7 @@ static Request updateAliases(IndicesAliasesRequest indicesAliasesRequest) throws RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(indicesAliasesRequest.timeout()); - parameters.withMasterTimeout(indicesAliasesRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(indicesAliasesRequest.masterNodeTimeout()); request.addParameters(parameters.asMap()); request.setEntity(RequestConverters.createEntity(indicesAliasesRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); return request; @@ -179,7 +179,7 @@ static Request putMapping(PutMappingRequest putMappingRequest) throws IOExceptio RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(putMappingRequest.timeout()); - parameters.withMasterTimeout(putMappingRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(putMappingRequest.masterNodeTimeout()); parameters.withIndicesOptions(putMappingRequest.indicesOptions()); request.addParameters(parameters.asMap()); request.setEntity(RequestConverters.createEntity(putMappingRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); @@ -192,7 +192,7 @@ static Request getMappings(GetMappingsRequest getMappingsRequest) { Request request = new Request(HttpGet.METHOD_NAME, RequestConverters.endpoint(indices, "_mapping")); RequestConverters.Params parameters = new RequestConverters.Params(); - parameters.withMasterTimeout(getMappingsRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(getMappingsRequest.masterNodeTimeout()); parameters.withIndicesOptions(getMappingsRequest.indicesOptions()); parameters.withLocal(getMappingsRequest.local()); request.addParameters(parameters.asMap()); @@ -332,7 +332,7 @@ private static Request resize(ResizeRequest resizeRequest, ResizeType type) thro RequestConverters.Params params = new RequestConverters.Params(); params.withTimeout(resizeRequest.timeout()); - params.withMasterTimeout(resizeRequest.masterNodeTimeout()); + params.withClusterManagerTimeout(resizeRequest.masterNodeTimeout()); params.withWaitForActiveShards(resizeRequest.getWaitForActiveShards()); request.addParameters(params.asMap()); request.setEntity(RequestConverters.createEntity(resizeRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); @@ -349,7 +349,7 @@ private static Request resize(org.opensearch.action.admin.indices.shrink.ResizeR RequestConverters.Params params = new RequestConverters.Params(); params.withTimeout(resizeRequest.timeout()); - params.withMasterTimeout(resizeRequest.masterNodeTimeout()); + params.withClusterManagerTimeout(resizeRequest.masterNodeTimeout()); params.withWaitForActiveShards(resizeRequest.getTargetIndexRequest().waitForActiveShards()); request.addParameters(params.asMap()); request.setEntity(RequestConverters.createEntity(resizeRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); @@ -365,7 +365,7 @@ static Request rollover(RolloverRequest rolloverRequest) throws IOException { RequestConverters.Params params = new RequestConverters.Params(); params.withTimeout(rolloverRequest.timeout()); - params.withMasterTimeout(rolloverRequest.masterNodeTimeout()); + params.withClusterManagerTimeout(rolloverRequest.masterNodeTimeout()); params.withWaitForActiveShards(rolloverRequest.getCreateIndexRequest().waitForActiveShards()); if (rolloverRequest.isDryRun()) { params.putParam("dry_run", Boolean.TRUE.toString()); @@ -386,7 +386,7 @@ static Request getSettings(GetSettingsRequest getSettingsRequest) { params.withIndicesOptions(getSettingsRequest.indicesOptions()); params.withLocal(getSettingsRequest.local()); params.withIncludeDefaults(getSettingsRequest.includeDefaults()); - params.withMasterTimeout(getSettingsRequest.masterNodeTimeout()); + params.withClusterManagerTimeout(getSettingsRequest.masterNodeTimeout()); request.addParameters(params.asMap()); return request; } @@ -402,7 +402,7 @@ static Request getIndex(GetIndexRequest getIndexRequest) { params.withLocal(getIndexRequest.local()); params.withIncludeDefaults(getIndexRequest.includeDefaults()); params.withHuman(getIndexRequest.humanReadable()); - params.withMasterTimeout(getIndexRequest.masterNodeTimeout()); + params.withClusterManagerTimeout(getIndexRequest.masterNodeTimeout()); request.addParameters(params.asMap()); return request; } @@ -429,7 +429,7 @@ static Request indexPutSettings(UpdateSettingsRequest updateSettingsRequest) thr RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(updateSettingsRequest.timeout()); - parameters.withMasterTimeout(updateSettingsRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(updateSettingsRequest.masterNodeTimeout()); parameters.withIndicesOptions(updateSettingsRequest.indicesOptions()); parameters.withPreserveExisting(updateSettingsRequest.isPreserveExisting()); request.addParameters(parameters.asMap()); @@ -443,7 +443,7 @@ static Request putTemplate(PutIndexTemplateRequest putIndexTemplateRequest) thro .build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); RequestConverters.Params params = new RequestConverters.Params(); - params.withMasterTimeout(putIndexTemplateRequest.masterNodeTimeout()); + params.withClusterManagerTimeout(putIndexTemplateRequest.masterNodeTimeout()); if (putIndexTemplateRequest.create()) { params.putParam("create", Boolean.TRUE.toString()); } @@ -461,7 +461,7 @@ static Request putIndexTemplate(PutComposableIndexTemplateRequest putIndexTempla .build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); RequestConverters.Params params = new RequestConverters.Params(); - params.withMasterTimeout(putIndexTemplateRequest.masterNodeTimeout()); + params.withClusterManagerTimeout(putIndexTemplateRequest.masterNodeTimeout()); if (putIndexTemplateRequest.create()) { params.putParam("create", Boolean.TRUE.toString()); } @@ -479,7 +479,7 @@ static Request simulateIndexTemplate(SimulateIndexTemplateRequest simulateIndexT .build(); Request request = new Request(HttpPost.METHOD_NAME, endpoint); RequestConverters.Params params = new RequestConverters.Params(); - params.withMasterTimeout(simulateIndexTemplateRequest.masterNodeTimeout()); + params.withClusterManagerTimeout(simulateIndexTemplateRequest.masterNodeTimeout()); PutComposableIndexTemplateRequest putComposableIndexTemplateRequest = simulateIndexTemplateRequest.indexTemplateV2Request(); if (putComposableIndexTemplateRequest != null) { if (putComposableIndexTemplateRequest.create()) { @@ -529,7 +529,7 @@ static Request getTemplates(GetIndexTemplatesRequest getIndexTemplatesRequest) { final Request request = new Request(HttpGet.METHOD_NAME, endpoint); final RequestConverters.Params params = new RequestConverters.Params(); params.withLocal(getIndexTemplatesRequest.isLocal()); - params.withMasterTimeout(getIndexTemplatesRequest.getMasterNodeTimeout()); + params.withClusterManagerTimeout(getIndexTemplatesRequest.getMasterNodeTimeout()); request.addParameters(params.asMap()); return request; } @@ -541,7 +541,7 @@ static Request getIndexTemplates(GetComposableIndexTemplateRequest getIndexTempl final Request request = new Request(HttpGet.METHOD_NAME, endpoint); final RequestConverters.Params params = new RequestConverters.Params(); params.withLocal(getIndexTemplatesRequest.isLocal()); - params.withMasterTimeout(getIndexTemplatesRequest.getMasterNodeTimeout()); + params.withClusterManagerTimeout(getIndexTemplatesRequest.getMasterNodeTimeout()); request.addParameters(params.asMap()); return request; } @@ -553,7 +553,7 @@ static Request templatesExist(IndexTemplatesExistRequest indexTemplatesExistRequ final Request request = new Request(HttpHead.METHOD_NAME, endpoint); final RequestConverters.Params params = new RequestConverters.Params(); params.withLocal(indexTemplatesExistRequest.isLocal()); - params.withMasterTimeout(indexTemplatesExistRequest.getMasterNodeTimeout()); + params.withClusterManagerTimeout(indexTemplatesExistRequest.getMasterNodeTimeout()); request.addParameters(params.asMap()); return request; } @@ -565,7 +565,7 @@ static Request templatesExist(ComposableIndexTemplateExistRequest indexTemplates final Request request = new Request(HttpHead.METHOD_NAME, endpoint); final RequestConverters.Params params = new RequestConverters.Params(); params.withLocal(indexTemplatesExistRequest.isLocal()); - params.withMasterTimeout(indexTemplatesExistRequest.getMasterNodeTimeout()); + params.withClusterManagerTimeout(indexTemplatesExistRequest.getMasterNodeTimeout()); request.addParameters(params.asMap()); return request; } @@ -587,7 +587,7 @@ static Request deleteTemplate(DeleteIndexTemplateRequest deleteIndexTemplateRequ String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_template").addPathPart(name).build(); Request request = new Request(HttpDelete.METHOD_NAME, endpoint); RequestConverters.Params params = new RequestConverters.Params(); - params.withMasterTimeout(deleteIndexTemplateRequest.masterNodeTimeout()); + params.withClusterManagerTimeout(deleteIndexTemplateRequest.masterNodeTimeout()); request.addParameters(params.asMap()); return request; } @@ -597,7 +597,7 @@ static Request deleteIndexTemplate(DeleteComposableIndexTemplateRequest deleteIn String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_index_template").addPathPart(name).build(); Request request = new Request(HttpDelete.METHOD_NAME, endpoint); RequestConverters.Params params = new RequestConverters.Params(); - params.withMasterTimeout(deleteIndexTemplateRequest.masterNodeTimeout()); + params.withClusterManagerTimeout(deleteIndexTemplateRequest.masterNodeTimeout()); request.addParameters(params.asMap()); return request; } @@ -610,7 +610,7 @@ static Request deleteAlias(DeleteAliasRequest deleteAliasRequest) { Request request = new Request(HttpDelete.METHOD_NAME, endpoint); RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(deleteAliasRequest.timeout()); - parameters.withMasterTimeout(deleteAliasRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(deleteAliasRequest.masterNodeTimeout()); request.addParameters(parameters.asMap()); return request; } diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/IngestRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/IngestRequestConverters.java index e2ede61f38ee9..829f6cf0bbba4 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/IngestRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/IngestRequestConverters.java @@ -54,7 +54,7 @@ static Request getPipeline(GetPipelineRequest getPipelineRequest) { Request request = new Request(HttpGet.METHOD_NAME, endpoint); RequestConverters.Params parameters = new RequestConverters.Params(); - parameters.withMasterTimeout(getPipelineRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(getPipelineRequest.masterNodeTimeout()); request.addParameters(parameters.asMap()); return request; } @@ -67,7 +67,7 @@ static Request putPipeline(PutPipelineRequest putPipelineRequest) throws IOExcep RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(putPipelineRequest.timeout()); - parameters.withMasterTimeout(putPipelineRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(putPipelineRequest.masterNodeTimeout()); request.addParameters(parameters.asMap()); request.setEntity(RequestConverters.createEntity(putPipelineRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); return request; @@ -81,7 +81,7 @@ static Request deletePipeline(DeletePipelineRequest deletePipelineRequest) { RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(deletePipelineRequest.timeout()); - parameters.withMasterTimeout(deletePipelineRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(deletePipelineRequest.masterNodeTimeout()); request.addParameters(parameters.asMap()); return request; } diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java index 3e43963db519f..7a6227a7c2ec2 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java @@ -702,7 +702,7 @@ static Request putScript(PutStoredScriptRequest putStoredScriptRequest) throws I Request request = new Request(HttpPost.METHOD_NAME, endpoint); Params params = new Params(); params.withTimeout(putStoredScriptRequest.timeout()); - params.withMasterTimeout(putStoredScriptRequest.masterNodeTimeout()); + params.withClusterManagerTimeout(putStoredScriptRequest.masterNodeTimeout()); if (Strings.hasText(putStoredScriptRequest.context())) { params.putParam("context", putStoredScriptRequest.context()); } @@ -757,7 +757,7 @@ static Request getScript(GetStoredScriptRequest getStoredScriptRequest) { String endpoint = new EndpointBuilder().addPathPartAsIs("_scripts").addPathPart(getStoredScriptRequest.id()).build(); Request request = new Request(HttpGet.METHOD_NAME, endpoint); Params params = new Params(); - params.withMasterTimeout(getStoredScriptRequest.masterNodeTimeout()); + params.withClusterManagerTimeout(getStoredScriptRequest.masterNodeTimeout()); request.addParameters(params.asMap()); return request; } @@ -767,7 +767,7 @@ static Request deleteScript(DeleteStoredScriptRequest deleteStoredScriptRequest) Request request = new Request(HttpDelete.METHOD_NAME, endpoint); Params params = new Params(); params.withTimeout(deleteStoredScriptRequest.timeout()); - params.withMasterTimeout(deleteStoredScriptRequest.masterNodeTimeout()); + params.withClusterManagerTimeout(deleteStoredScriptRequest.masterNodeTimeout()); request.addParameters(params.asMap()); return request; } @@ -891,10 +891,18 @@ Params withFields(String[] fields) { return this; } + /** + * @deprecated As of 2.0, because supporting inclusive language, replaced by {@link #withClusterManagerTimeout(TimeValue)} + */ + @Deprecated Params withMasterTimeout(TimeValue masterTimeout) { return putParam("master_timeout", masterTimeout); } + Params withClusterManagerTimeout(TimeValue clusterManagerTimeout) { + return putParam("cluster_manager_timeout", clusterManagerTimeout); + } + Params withPipeline(String pipeline) { return putParam("pipeline", pipeline); } diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotRequestConverters.java index 3c92bb5ec2ab8..3b2c72266a30b 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotRequestConverters.java @@ -63,7 +63,7 @@ static Request getRepositories(GetRepositoriesRequest getRepositoriesRequest) { Request request = new Request(HttpGet.METHOD_NAME, endpoint); RequestConverters.Params parameters = new RequestConverters.Params(); - parameters.withMasterTimeout(getRepositoriesRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(getRepositoriesRequest.masterNodeTimeout()); parameters.withLocal(getRepositoriesRequest.local()); request.addParameters(parameters.asMap()); return request; @@ -74,7 +74,7 @@ static Request createRepository(PutRepositoryRequest putRepositoryRequest) throw Request request = new Request(HttpPut.METHOD_NAME, endpoint); RequestConverters.Params parameters = new RequestConverters.Params(); - parameters.withMasterTimeout(putRepositoryRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(putRepositoryRequest.masterNodeTimeout()); parameters.withTimeout(putRepositoryRequest.timeout()); if (putRepositoryRequest.verify() == false) { parameters.putParam("verify", "false"); @@ -91,7 +91,7 @@ static Request deleteRepository(DeleteRepositoryRequest deleteRepositoryRequest) Request request = new Request(HttpDelete.METHOD_NAME, endpoint); RequestConverters.Params parameters = new RequestConverters.Params(); - parameters.withMasterTimeout(deleteRepositoryRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(deleteRepositoryRequest.masterNodeTimeout()); parameters.withTimeout(deleteRepositoryRequest.timeout()); request.addParameters(parameters.asMap()); return request; @@ -105,7 +105,7 @@ static Request verifyRepository(VerifyRepositoryRequest verifyRepositoryRequest) Request request = new Request(HttpPost.METHOD_NAME, endpoint); RequestConverters.Params parameters = new RequestConverters.Params(); - parameters.withMasterTimeout(verifyRepositoryRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(verifyRepositoryRequest.masterNodeTimeout()); parameters.withTimeout(verifyRepositoryRequest.timeout()); request.addParameters(parameters.asMap()); return request; @@ -119,7 +119,7 @@ static Request cleanupRepository(CleanupRepositoryRequest cleanupRepositoryReque Request request = new Request(HttpPost.METHOD_NAME, endpoint); RequestConverters.Params parameters = new RequestConverters.Params(); - parameters.withMasterTimeout(cleanupRepositoryRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(cleanupRepositoryRequest.masterNodeTimeout()); parameters.withTimeout(cleanupRepositoryRequest.timeout()); request.addParameters(parameters.asMap()); return request; @@ -132,7 +132,7 @@ static Request createSnapshot(CreateSnapshotRequest createSnapshotRequest) throw .build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); RequestConverters.Params params = new RequestConverters.Params(); - params.withMasterTimeout(createSnapshotRequest.masterNodeTimeout()); + params.withClusterManagerTimeout(createSnapshotRequest.masterNodeTimeout()); params.withWaitForCompletion(createSnapshotRequest.waitForCompletion()); request.addParameters(params.asMap()); request.setEntity(RequestConverters.createEntity(createSnapshotRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); @@ -148,7 +148,7 @@ static Request cloneSnapshot(CloneSnapshotRequest cloneSnapshotRequest) throws I .build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); RequestConverters.Params params = new RequestConverters.Params(); - params.withMasterTimeout(cloneSnapshotRequest.masterNodeTimeout()); + params.withClusterManagerTimeout(cloneSnapshotRequest.masterNodeTimeout()); request.addParameters(params.asMap()); request.setEntity(RequestConverters.createEntity(cloneSnapshotRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); return request; @@ -167,7 +167,7 @@ static Request getSnapshots(GetSnapshotsRequest getSnapshotsRequest) { Request request = new Request(HttpGet.METHOD_NAME, endpoint); RequestConverters.Params parameters = new RequestConverters.Params(); - parameters.withMasterTimeout(getSnapshotsRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(getSnapshotsRequest.masterNodeTimeout()); parameters.putParam("ignore_unavailable", Boolean.toString(getSnapshotsRequest.ignoreUnavailable())); parameters.putParam("verbose", Boolean.toString(getSnapshotsRequest.verbose())); request.addParameters(parameters.asMap()); @@ -183,7 +183,7 @@ static Request snapshotsStatus(SnapshotsStatusRequest snapshotsStatusRequest) { Request request = new Request(HttpGet.METHOD_NAME, endpoint); RequestConverters.Params parameters = new RequestConverters.Params(); - parameters.withMasterTimeout(snapshotsStatusRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(snapshotsStatusRequest.masterNodeTimeout()); parameters.withIgnoreUnavailable(snapshotsStatusRequest.ignoreUnavailable()); request.addParameters(parameters.asMap()); return request; @@ -197,7 +197,7 @@ static Request restoreSnapshot(RestoreSnapshotRequest restoreSnapshotRequest) th .build(); Request request = new Request(HttpPost.METHOD_NAME, endpoint); RequestConverters.Params parameters = new RequestConverters.Params(); - parameters.withMasterTimeout(restoreSnapshotRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(restoreSnapshotRequest.masterNodeTimeout()); parameters.withWaitForCompletion(restoreSnapshotRequest.waitForCompletion()); request.addParameters(parameters.asMap()); request.setEntity(RequestConverters.createEntity(restoreSnapshotRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); @@ -212,7 +212,7 @@ static Request deleteSnapshot(DeleteSnapshotRequest deleteSnapshotRequest) { Request request = new Request(HttpDelete.METHOD_NAME, endpoint); RequestConverters.Params parameters = new RequestConverters.Params(); - parameters.withMasterTimeout(deleteSnapshotRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(deleteSnapshotRequest.masterNodeTimeout()); request.addParameters(parameters.asMap()); return request; } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java index 2af164a51dbab..ed0a973081b62 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java @@ -61,7 +61,7 @@ public class ClusterRequestConvertersTests extends OpenSearchTestCase { public void testClusterPutSettings() throws IOException { ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(); Map expectedParams = new HashMap<>(); - RequestConvertersTests.setRandomMasterTimeout(request, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(request, expectedParams); RequestConvertersTests.setRandomTimeout(request::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); Request expectedRequest = ClusterRequestConverters.clusterPutSettings(request); @@ -73,7 +73,7 @@ public void testClusterPutSettings() throws IOException { public void testClusterGetSettings() throws IOException { ClusterGetSettingsRequest request = new ClusterGetSettingsRequest(); Map expectedParams = new HashMap<>(); - RequestConvertersTests.setRandomMasterTimeout(request, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(request, expectedParams); request.includeDefaults(OpenSearchTestCase.randomBoolean()); if (request.includeDefaults()) { expectedParams.put("include_defaults", String.valueOf(true)); @@ -96,23 +96,23 @@ public void testClusterHealth() { case "timeout": healthRequest.timeout(timeout); expectedParams.put("timeout", timeout); - // If Master Timeout wasn't set it uses the same value as Timeout - expectedParams.put("master_timeout", timeout); + // If Cluster Manager Timeout wasn't set it uses the same value as Timeout + expectedParams.put("cluster_manager_timeout", timeout); break; case "masterTimeout": expectedParams.put("timeout", "30s"); healthRequest.masterNodeTimeout(masterTimeout); - expectedParams.put("master_timeout", masterTimeout); + expectedParams.put("cluster_manager_timeout", masterTimeout); break; case "both": healthRequest.timeout(timeout); expectedParams.put("timeout", timeout); healthRequest.masterNodeTimeout(timeout); - expectedParams.put("master_timeout", timeout); + expectedParams.put("cluster_manager_timeout", timeout); break; case "none": expectedParams.put("timeout", "30s"); - expectedParams.put("master_timeout", "30s"); + expectedParams.put("cluster_manager_timeout", "30s"); break; default: throw new UnsupportedOperationException(); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java index f853378e789fa..bf6d6c922fdd7 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java @@ -144,7 +144,7 @@ public void testCreateIndex() throws IOException { Map expectedParams = new HashMap<>(); RequestConvertersTests.setRandomTimeout(createIndexRequest, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); - RequestConvertersTests.setRandomMasterTimeout(createIndexRequest, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(createIndexRequest, expectedParams); RequestConvertersTests.setRandomWaitForActiveShards(createIndexRequest::waitForActiveShards, expectedParams); Request request = IndicesRequestConverters.createIndex(createIndexRequest); @@ -166,7 +166,7 @@ public void testUpdateAliases() throws IOException { Map expectedParams = new HashMap<>(); RequestConvertersTests.setRandomTimeout(indicesAliasesRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); - RequestConvertersTests.setRandomMasterTimeout(indicesAliasesRequest, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(indicesAliasesRequest, expectedParams); Request request = IndicesRequestConverters.updateAliases(indicesAliasesRequest); Assert.assertEquals("/_aliases", request.getEndpoint()); @@ -180,7 +180,7 @@ public void testPutMapping() throws IOException { Map expectedParams = new HashMap<>(); RequestConvertersTests.setRandomTimeout(putMappingRequest, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); - RequestConvertersTests.setRandomMasterTimeout(putMappingRequest, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(putMappingRequest, expectedParams); RequestConvertersTests.setRandomIndicesOptions( putMappingRequest::indicesOptions, putMappingRequest::indicesOptions, @@ -219,7 +219,7 @@ public void testGetMapping() { getMappingRequest::indicesOptions, expectedParams ); - RequestConvertersTests.setRandomMasterTimeout(getMappingRequest, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(getMappingRequest, expectedParams); RequestConvertersTests.setRandomLocal(getMappingRequest::local, expectedParams); Request request = IndicesRequestConverters.getMappings(getMappingRequest); @@ -313,7 +313,7 @@ public void testDeleteIndex() { Map expectedParams = new HashMap<>(); RequestConvertersTests.setRandomTimeout(deleteIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); - RequestConvertersTests.setRandomMasterTimeout(deleteIndexRequest, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(deleteIndexRequest, expectedParams); RequestConvertersTests.setRandomIndicesOptions( deleteIndexRequest::indicesOptions, @@ -334,7 +334,7 @@ public void testGetSettings() throws IOException { GetSettingsRequest getSettingsRequest = new GetSettingsRequest().indices(indicesUnderTest); Map expectedParams = new HashMap<>(); - RequestConvertersTests.setRandomMasterTimeout(getSettingsRequest, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(getSettingsRequest, expectedParams); RequestConvertersTests.setRandomIndicesOptions( getSettingsRequest::indicesOptions, getSettingsRequest::indicesOptions, @@ -385,7 +385,7 @@ public void testGetIndex() throws IOException { GetIndexRequest getIndexRequest = new GetIndexRequest(indicesUnderTest); Map expectedParams = new HashMap<>(); - RequestConvertersTests.setRandomMasterTimeout(getIndexRequest, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(getIndexRequest, expectedParams); RequestConvertersTests.setRandomIndicesOptions(getIndexRequest::indicesOptions, getIndexRequest::indicesOptions, expectedParams); RequestConvertersTests.setRandomLocal(getIndexRequest::local, expectedParams); RequestConvertersTests.setRandomHumanReadable(getIndexRequest::humanReadable, expectedParams); @@ -425,7 +425,7 @@ public void testOpenIndex() { Map expectedParams = new HashMap<>(); RequestConvertersTests.setRandomTimeout(openIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); - RequestConvertersTests.setRandomMasterTimeout(openIndexRequest, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(openIndexRequest, expectedParams); RequestConvertersTests.setRandomIndicesOptions(openIndexRequest::indicesOptions, openIndexRequest::indicesOptions, expectedParams); RequestConvertersTests.setRandomWaitForActiveShards(openIndexRequest::waitForActiveShards, expectedParams); @@ -453,7 +453,7 @@ public void testCloseIndex() { AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams ); - RequestConvertersTests.setRandomMasterTimeout(closeIndexRequest, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(closeIndexRequest, expectedParams); RequestConvertersTests.setRandomIndicesOptions( closeIndexRequest::indicesOptions, closeIndexRequest::indicesOptions, @@ -680,7 +680,7 @@ private void resizeTest(ResizeType resizeType, CheckedFunction expectedParams = new HashMap<>(); - RequestConvertersTests.setRandomMasterTimeout(resizeRequest, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(resizeRequest, expectedParams); RequestConvertersTests.setRandomTimeout( s -> resizeRequest.setTimeout(TimeValue.parseTimeValue(s, "timeout")), resizeRequest.timeout(), @@ -723,7 +723,7 @@ public void testRollover() throws IOException { ); Map expectedParams = new HashMap<>(); RequestConvertersTests.setRandomTimeout(rolloverRequest, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); - RequestConvertersTests.setRandomMasterTimeout(rolloverRequest, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(rolloverRequest, expectedParams); if (OpenSearchTestCase.randomBoolean()) { rolloverRequest.dryRun(OpenSearchTestCase.randomBoolean()); if (rolloverRequest.isDryRun()) { @@ -796,7 +796,7 @@ public void testIndexPutSettings() throws IOException { String[] indices = OpenSearchTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 2); UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(indices); Map expectedParams = new HashMap<>(); - RequestConvertersTests.setRandomMasterTimeout(updateSettingsRequest, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(updateSettingsRequest, expectedParams); RequestConvertersTests.setRandomTimeout(updateSettingsRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); RequestConvertersTests.setRandomIndicesOptions( updateSettingsRequest::indicesOptions, @@ -866,7 +866,7 @@ public void testPutTemplateRequest() throws Exception { putTemplateRequest.cause(cause); expectedParams.put("cause", cause); } - RequestConvertersTests.setRandomMasterTimeout(putTemplateRequest, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(putTemplateRequest, expectedParams); Request request = IndicesRequestConverters.putTemplate(putTemplateRequest); Assert.assertThat(request.getEndpoint(), equalTo("/_template/" + names.get(putTemplateRequest.name()))); @@ -917,7 +917,7 @@ public void testGetTemplateRequest() throws Exception { List names = OpenSearchTestCase.randomSubsetOf(1, encodes.keySet()); GetIndexTemplatesRequest getTemplatesRequest = new GetIndexTemplatesRequest(names); Map expectedParams = new HashMap<>(); - RequestConvertersTests.setRandomMasterTimeout(getTemplatesRequest::setMasterNodeTimeout, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(getTemplatesRequest::setMasterNodeTimeout, expectedParams); RequestConvertersTests.setRandomLocal(getTemplatesRequest::setLocal, expectedParams); Request request = IndicesRequestConverters.getTemplates(getTemplatesRequest); @@ -946,7 +946,7 @@ public void testTemplatesExistRequest() { ); final Map expectedParams = new HashMap<>(); final IndexTemplatesExistRequest indexTemplatesExistRequest = new IndexTemplatesExistRequest(names); - RequestConvertersTests.setRandomMasterTimeout(indexTemplatesExistRequest::setMasterNodeTimeout, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(indexTemplatesExistRequest::setMasterNodeTimeout, expectedParams); RequestConvertersTests.setRandomLocal(indexTemplatesExistRequest::setLocal, expectedParams); assertThat(indexTemplatesExistRequest.names(), equalTo(names)); @@ -973,7 +973,7 @@ public void testDeleteTemplateRequest() { encodes.put("foo^bar", "foo%5Ebar"); DeleteIndexTemplateRequest deleteTemplateRequest = new DeleteIndexTemplateRequest().name(randomFrom(encodes.keySet())); Map expectedParams = new HashMap<>(); - RequestConvertersTests.setRandomMasterTimeout(deleteTemplateRequest, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(deleteTemplateRequest, expectedParams); Request request = IndicesRequestConverters.deleteTemplate(deleteTemplateRequest); Assert.assertThat(request.getMethod(), equalTo(HttpDelete.METHOD_NAME)); Assert.assertThat(request.getEndpoint(), equalTo("/_template/" + encodes.get(deleteTemplateRequest.name()))); @@ -985,7 +985,7 @@ public void testDeleteAlias() { DeleteAliasRequest deleteAliasRequest = new DeleteAliasRequest(randomAlphaOfLength(4), randomAlphaOfLength(4)); Map expectedParams = new HashMap<>(); - RequestConvertersTests.setRandomMasterTimeout(deleteAliasRequest, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(deleteAliasRequest, expectedParams); RequestConvertersTests.setRandomTimeout(deleteAliasRequest, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); Request request = IndicesRequestConverters.deleteAlias(deleteAliasRequest); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java index 0d95b3e7fddc0..e0c7f69325f87 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java @@ -62,7 +62,7 @@ public void testPutPipeline() throws IOException { XContentType.JSON ); Map expectedParams = new HashMap<>(); - RequestConvertersTests.setRandomMasterTimeout(request, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(request, expectedParams); RequestConvertersTests.setRandomTimeout(request::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); Request expectedRequest = IngestRequestConverters.putPipeline(request); @@ -78,7 +78,7 @@ public void testGetPipeline() { String pipelineId = "some_pipeline_id"; Map expectedParams = new HashMap<>(); GetPipelineRequest request = new GetPipelineRequest("some_pipeline_id"); - RequestConvertersTests.setRandomMasterTimeout(request, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(request, expectedParams); Request expectedRequest = IngestRequestConverters.getPipeline(request); StringJoiner endpoint = new StringJoiner("/", "/", ""); endpoint.add("_ingest/pipeline"); @@ -92,7 +92,7 @@ public void testDeletePipeline() { String pipelineId = "some_pipeline_id"; Map expectedParams = new HashMap<>(); DeletePipelineRequest request = new DeletePipelineRequest(pipelineId); - RequestConvertersTests.setRandomMasterTimeout(request, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(request, expectedParams); RequestConvertersTests.setRandomTimeout(request::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); Request expectedRequest = IngestRequestConverters.deletePipeline(request); StringJoiner endpoint = new StringJoiner("/", "/", ""); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java index 32c6cde0725b4..66581fdc42c2b 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java @@ -1663,7 +1663,7 @@ public void testPutScript() throws Exception { } Map expectedParams = new HashMap<>(); - setRandomMasterTimeout(putStoredScriptRequest, expectedParams); + setRandomClusterManagerTimeout(putStoredScriptRequest, expectedParams); setRandomTimeout(putStoredScriptRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); if (randomBoolean()) { @@ -1694,7 +1694,7 @@ public void testAnalyzeRequest() throws Exception { public void testGetScriptRequest() { GetStoredScriptRequest getStoredScriptRequest = new GetStoredScriptRequest("x-script"); Map expectedParams = new HashMap<>(); - setRandomMasterTimeout(getStoredScriptRequest, expectedParams); + setRandomClusterManagerTimeout(getStoredScriptRequest, expectedParams); Request request = RequestConverters.getScript(getStoredScriptRequest); assertThat(request.getEndpoint(), equalTo("/_scripts/" + getStoredScriptRequest.id())); @@ -1708,7 +1708,7 @@ public void testDeleteScriptRequest() { Map expectedParams = new HashMap<>(); setRandomTimeout(deleteStoredScriptRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); - setRandomMasterTimeout(deleteStoredScriptRequest, expectedParams); + setRandomClusterManagerTimeout(deleteStoredScriptRequest, expectedParams); Request request = RequestConverters.deleteScript(deleteStoredScriptRequest); assertThat(request.getEndpoint(), equalTo("/_scripts/" + deleteStoredScriptRequest.id())); @@ -2105,34 +2105,34 @@ static void setRandomTimeoutTimeValue(Consumer setter, TimeValue defa } } - static void setRandomMasterTimeout(MasterNodeRequest request, Map expectedParams) { - setRandomMasterTimeout(request::masterNodeTimeout, expectedParams); + static void setRandomClusterManagerTimeout(MasterNodeRequest request, Map expectedParams) { + setRandomClusterManagerTimeout(request::masterNodeTimeout, expectedParams); } - static void setRandomMasterTimeout(TimedRequest request, Map expectedParams) { - setRandomMasterTimeout( + static void setRandomClusterManagerTimeout(TimedRequest request, Map expectedParams) { + setRandomClusterManagerTimeout( s -> request.setMasterTimeout(TimeValue.parseTimeValue(s, request.getClass().getName() + ".masterNodeTimeout")), expectedParams ); } - static void setRandomMasterTimeout(Consumer setter, Map expectedParams) { + static void setRandomClusterManagerTimeout(Consumer setter, Map expectedParams) { if (randomBoolean()) { - String masterTimeout = randomTimeValue(); - setter.accept(masterTimeout); - expectedParams.put("master_timeout", masterTimeout); + String clusterManagerTimeout = randomTimeValue(); + setter.accept(clusterManagerTimeout); + expectedParams.put("cluster_manager_timeout", clusterManagerTimeout); } else { - expectedParams.put("master_timeout", MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT.getStringRep()); + expectedParams.put("cluster_manager_timeout", MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT.getStringRep()); } } - static void setRandomMasterTimeout(Consumer setter, TimeValue defaultTimeout, Map expectedParams) { + static void setRandomClusterManagerTimeout(Consumer setter, TimeValue defaultTimeout, Map expectedParams) { if (randomBoolean()) { - TimeValue masterTimeout = TimeValue.parseTimeValue(randomTimeValue(), "random_master_timeout"); - setter.accept(masterTimeout); - expectedParams.put("master_timeout", masterTimeout.getStringRep()); + TimeValue clusterManagerTimeout = TimeValue.parseTimeValue(randomTimeValue(), "random_master_timeout"); + setter.accept(clusterManagerTimeout); + expectedParams.put("cluster_manager_timeout", clusterManagerTimeout.getStringRep()); } else { - expectedParams.put("master_timeout", defaultTimeout.getStringRep()); + expectedParams.put("cluster_manager_timeout", defaultTimeout.getStringRep()); } } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java index f18679127bf2b..c75f0cff171f2 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java @@ -70,7 +70,7 @@ public void testGetRepositories() { StringBuilder endpoint = new StringBuilder("/_snapshot"); GetRepositoriesRequest getRepositoriesRequest = new GetRepositoriesRequest(); - RequestConvertersTests.setRandomMasterTimeout(getRepositoriesRequest, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(getRepositoriesRequest, expectedParams); RequestConvertersTests.setRandomLocal(getRepositoriesRequest::local, expectedParams); if (randomBoolean()) { @@ -121,7 +121,7 @@ public void testDeleteRepository() { DeleteRepositoryRequest deleteRepositoryRequest = new DeleteRepositoryRequest(); deleteRepositoryRequest.name(repository); - RequestConvertersTests.setRandomMasterTimeout(deleteRepositoryRequest, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(deleteRepositoryRequest, expectedParams); RequestConvertersTests.setRandomTimeout(deleteRepositoryRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); Request request = SnapshotRequestConverters.deleteRepository(deleteRepositoryRequest); @@ -137,7 +137,7 @@ public void testVerifyRepository() { String endpoint = "/_snapshot/" + repository + "/_verify"; VerifyRepositoryRequest verifyRepositoryRequest = new VerifyRepositoryRequest(repository); - RequestConvertersTests.setRandomMasterTimeout(verifyRepositoryRequest, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(verifyRepositoryRequest, expectedParams); RequestConvertersTests.setRandomTimeout(verifyRepositoryRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); Request request = SnapshotRequestConverters.verifyRepository(verifyRepositoryRequest); @@ -153,7 +153,7 @@ public void testCreateSnapshot() throws IOException { String endpoint = "/_snapshot/" + repository + "/" + snapshot; CreateSnapshotRequest createSnapshotRequest = new CreateSnapshotRequest(repository, snapshot); - RequestConvertersTests.setRandomMasterTimeout(createSnapshotRequest, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(createSnapshotRequest, expectedParams); Boolean waitForCompletion = randomBoolean(); createSnapshotRequest.waitForCompletion(waitForCompletion); @@ -177,7 +177,7 @@ public void testGetSnapshots() { GetSnapshotsRequest getSnapshotsRequest = new GetSnapshotsRequest(); getSnapshotsRequest.repository(repository); getSnapshotsRequest.snapshots(Arrays.asList(snapshot1, snapshot2).toArray(new String[0])); - RequestConvertersTests.setRandomMasterTimeout(getSnapshotsRequest, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(getSnapshotsRequest, expectedParams); if (randomBoolean()) { boolean ignoreUnavailable = randomBoolean(); @@ -209,7 +209,7 @@ public void testGetAllSnapshots() { String endpoint = String.format(Locale.ROOT, "/_snapshot/%s/_all", repository); GetSnapshotsRequest getSnapshotsRequest = new GetSnapshotsRequest(repository); - RequestConvertersTests.setRandomMasterTimeout(getSnapshotsRequest, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(getSnapshotsRequest, expectedParams); boolean ignoreUnavailable = randomBoolean(); getSnapshotsRequest.ignoreUnavailable(ignoreUnavailable); @@ -238,7 +238,7 @@ public void testSnapshotsStatus() { String endpoint = "/_snapshot/" + repository + "/" + snapshotNames.toString() + "/_status"; SnapshotsStatusRequest snapshotsStatusRequest = new SnapshotsStatusRequest(repository, snapshots); - RequestConvertersTests.setRandomMasterTimeout(snapshotsStatusRequest, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(snapshotsStatusRequest, expectedParams); snapshotsStatusRequest.ignoreUnavailable(ignoreUnavailable); expectedParams.put("ignore_unavailable", Boolean.toString(ignoreUnavailable)); @@ -256,7 +256,7 @@ public void testRestoreSnapshot() throws IOException { String endpoint = String.format(Locale.ROOT, "/_snapshot/%s/%s/_restore", repository, snapshot); RestoreSnapshotRequest restoreSnapshotRequest = new RestoreSnapshotRequest(repository, snapshot); - RequestConvertersTests.setRandomMasterTimeout(restoreSnapshotRequest, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(restoreSnapshotRequest, expectedParams); boolean waitForCompletion = randomBoolean(); restoreSnapshotRequest.waitForCompletion(waitForCompletion); expectedParams.put("wait_for_completion", Boolean.toString(waitForCompletion)); @@ -264,7 +264,7 @@ public void testRestoreSnapshot() throws IOException { if (randomBoolean()) { String timeout = randomTimeValue(); restoreSnapshotRequest.masterNodeTimeout(timeout); - expectedParams.put("master_timeout", timeout); + expectedParams.put("cluster_manager_timeout", timeout); } Request request = SnapshotRequestConverters.restoreSnapshot(restoreSnapshotRequest); @@ -284,7 +284,7 @@ public void testDeleteSnapshot() { DeleteSnapshotRequest deleteSnapshotRequest = new DeleteSnapshotRequest(); deleteSnapshotRequest.repository(repository); deleteSnapshotRequest.snapshots(snapshot); - RequestConvertersTests.setRandomMasterTimeout(deleteSnapshotRequest, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(deleteSnapshotRequest, expectedParams); Request request = SnapshotRequestConverters.deleteSnapshot(deleteSnapshotRequest); assertThat(request.getEndpoint(), equalTo(endpoint)); From b45bfc9afc5ae02978d578da543e9a2630e1965e Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Tue, 19 Apr 2022 13:39:42 -0700 Subject: [PATCH 102/653] Revert "Make High-Level-Rest-Client tests allow deprecation warning temporarily, during deprecation of request parameter 'master_timeout' (#2702)" (#2744) This reverts commit 6a2a33d1872850b04562164c39621698cb99d7b8. During the process of deprecating REST API request parameter master_timeout and adding alternative parameter cluster_manager_timeout, I made High-Level-Rest-Client tests allow deprecation warning temporarily, by changing the argument of `setStrictDeprecationMode()` to false when building `RestClient` for tests, in the above commit / PR https://github.com/opensearch-project/OpenSearch/pull/2702, This PR sets the High-Level-Rest-Client tests back to treating warning header as a failure. Signed-off-by: Tianli Feng --- .../java/org/opensearch/test/rest/OpenSearchRestTestCase.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java index f976b3619102a..9624a9d3d0554 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java @@ -824,8 +824,7 @@ protected String getProtocol() { protected RestClient buildClient(Settings settings, HttpHost[] hosts) throws IOException { RestClientBuilder builder = RestClient.builder(hosts); configureClient(builder, settings); - // TODO: set the method argument to 'true' after PR https://github.com/opensearch-project/OpenSearch/pull/2683 merged. - builder.setStrictDeprecationMode(false); + builder.setStrictDeprecationMode(true); return builder.build(); } From 6915df94c3a322a4fb7b6808e1f63a743a3af6be Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Wed, 20 Apr 2022 15:05:13 -0500 Subject: [PATCH 103/653] [Remove] remaining AllFieldMapper references (#3007) AllFieldMapper was deprecated in legacy 6x. The remaining references are removed, along with the field mapper and corresponding tests. Signed-off-by: Nicholas Walter Knize --- .../index/mapper/AllFieldMapper.java | 134 ------------------ .../indices/mapper/MapperRegistry.java | 10 +- .../index/mapper/AllFieldMapperTests.java | 63 -------- .../indices/IndicesModuleTests.java | 14 -- 4 files changed, 1 insertion(+), 220 deletions(-) delete mode 100644 server/src/main/java/org/opensearch/index/mapper/AllFieldMapper.java delete mode 100644 server/src/test/java/org/opensearch/index/mapper/AllFieldMapperTests.java diff --git a/server/src/main/java/org/opensearch/index/mapper/AllFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/AllFieldMapper.java deleted file mode 100644 index 634424d6f45a4..0000000000000 --- a/server/src/main/java/org/opensearch/index/mapper/AllFieldMapper.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.index.mapper; - -import org.apache.lucene.document.FieldType; -import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.search.MatchNoDocsQuery; -import org.apache.lucene.search.Query; -import org.opensearch.common.Explicit; -import org.opensearch.index.query.QueryShardContext; -import org.opensearch.search.lookup.SearchLookup; - -import java.util.Collections; -import java.util.List; - -/** - * Noop mapper that ensures that mappings created in 6x that explicitly disable the _all field - * can be restored in this version. - * - * TODO: Remove in 8 - */ -public class AllFieldMapper extends MetadataFieldMapper { - public static final String NAME = "_all"; - public static final String CONTENT_TYPE = "_all"; - - public static class Defaults { - public static final FieldType FIELD_TYPE = new FieldType(); - - static { - FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); - FIELD_TYPE.setTokenized(true); - FIELD_TYPE.freeze(); - } - } - - private static AllFieldMapper toType(FieldMapper in) { - return (AllFieldMapper) in; - } - - public static class Builder extends MetadataFieldMapper.Builder { - - private final Parameter> enabled = updateableBoolParam("enabled", m -> toType(m).enabled, false); - - public Builder() { - super(NAME); - } - - @Override - protected List> getParameters() { - return Collections.singletonList(enabled); - } - - @Override - public AllFieldMapper build(BuilderContext context) { - if (enabled.getValue().value()) { - throw new IllegalArgumentException("[_all] is disabled in this version."); - } - return new AllFieldMapper(enabled.getValue()); - } - } - - public static final TypeParser PARSER = new ConfigurableTypeParser( - c -> new AllFieldMapper(new Explicit<>(false, false)), - c -> new Builder() - ); - - static final class AllFieldType extends StringFieldType { - AllFieldType() { - super(NAME, false, false, false, TextSearchInfo.NONE, Collections.emptyMap()); - } - - @Override - public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { - throw new UnsupportedOperationException(); - } - - @Override - public String typeName() { - return CONTENT_TYPE; - } - - @Override - public Query existsQuery(QueryShardContext context) { - return new MatchNoDocsQuery(); - } - } - - private final Explicit enabled; - - private AllFieldMapper(Explicit enabled) { - super(new AllFieldType()); - this.enabled = enabled; - } - - @Override - protected String contentType() { - return CONTENT_TYPE; - } - - @Override - public ParametrizedFieldMapper.Builder getMergeBuilder() { - return new Builder().init(this); - } -} diff --git a/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java b/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java index d37f82c7a484f..f56b2f98f0f6e 100644 --- a/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java +++ b/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java @@ -32,9 +32,7 @@ package org.opensearch.indices.mapper; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; -import org.opensearch.index.mapper.AllFieldMapper; import org.opensearch.index.mapper.Mapper; import org.opensearch.index.mapper.MetadataFieldMapper; import org.opensearch.plugins.MapperPlugin; @@ -52,7 +50,6 @@ public final class MapperRegistry { private final Map mapperParsers; private final Map metadataMapperParsers; - private final Map metadataMapperParsers6x; private final Function> fieldFilter; public MapperRegistry( @@ -62,11 +59,6 @@ public MapperRegistry( ) { this.mapperParsers = Collections.unmodifiableMap(new LinkedHashMap<>(mapperParsers)); this.metadataMapperParsers = Collections.unmodifiableMap(new LinkedHashMap<>(metadataMapperParsers)); - // add the _all field mapper for indices created in 6x - Map metadata6x = new LinkedHashMap<>(); - metadata6x.put(AllFieldMapper.NAME, AllFieldMapper.PARSER); - metadata6x.putAll(metadataMapperParsers); - this.metadataMapperParsers6x = Collections.unmodifiableMap(metadata6x); this.fieldFilter = fieldFilter; } @@ -83,7 +75,7 @@ public Map getMapperParsers() { * returned map uses the name of the field as a key. */ public Map getMetadataMapperParsers(Version indexCreatedVersion) { - return indexCreatedVersion.onOrAfter(LegacyESVersion.V_7_0_0) ? metadataMapperParsers : metadataMapperParsers6x; + return metadataMapperParsers; } /** diff --git a/server/src/test/java/org/opensearch/index/mapper/AllFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/AllFieldMapperTests.java deleted file mode 100644 index 625cfbb81f8bc..0000000000000 --- a/server/src/test/java/org/opensearch/index/mapper/AllFieldMapperTests.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.index.mapper; - -import org.opensearch.common.Strings; -import org.opensearch.common.compress.CompressedXContent; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.index.IndexService; -import org.opensearch.index.mapper.MapperService.MergeReason; -import org.opensearch.test.OpenSearchSingleNodeTestCase; - -public class AllFieldMapperTests extends OpenSearchSingleNodeTestCase { - - @Override - protected boolean forbidPrivateIndexSettings() { - return false; - } - - public void testUpdateDefaultSearchAnalyzer() throws Exception { - IndexService indexService = createIndex( - "test", - Settings.builder() - .put("index.analysis.analyzer.default_search.type", "custom") - .put("index.analysis.analyzer.default_search.tokenizer", "standard") - .build() - ); - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc").endObject().endObject()); - indexService.mapperService().merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); - assertEquals(mapping, indexService.mapperService().documentMapper().mapping().toString()); - } - -} diff --git a/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java b/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java index ef78b24be4c08..c2298f60e4a2b 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java @@ -33,7 +33,6 @@ package org.opensearch.indices; import org.opensearch.Version; -import org.opensearch.index.mapper.AllFieldMapper; import org.opensearch.index.mapper.DataStreamFieldMapper; import org.opensearch.index.mapper.FieldNamesFieldMapper; import org.opensearch.index.mapper.IdFieldMapper; @@ -101,19 +100,6 @@ public Map getMetadataMappers() { SeqNoFieldMapper.NAME, FieldNamesFieldMapper.NAME }; - private static String[] EXPECTED_METADATA_FIELDS_6x = new String[] { - AllFieldMapper.NAME, - IgnoredFieldMapper.NAME, - IdFieldMapper.NAME, - RoutingFieldMapper.NAME, - IndexFieldMapper.NAME, - DataStreamFieldMapper.NAME, - SourceFieldMapper.NAME, - TypeFieldMapper.NAME, - VersionFieldMapper.NAME, - SeqNoFieldMapper.NAME, - FieldNamesFieldMapper.NAME }; - public void testBuiltinMappers() { IndicesModule module = new IndicesModule(Collections.emptyList()); { From 3b7e6547572b23eda7bb7313850d4a3cb049a9be Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 20 Apr 2022 17:14:02 -0400 Subject: [PATCH 104/653] Remove JavaVersion, use builtin Runtime.Version to deal with runtime versions (#3006) Signed-off-by: Andriy Redko --- .../org/opensearch/bootstrap/JarHell.java | 17 +- .../org/opensearch/bootstrap/JavaVersion.java | 146 ------------------ .../opensearch/bootstrap/JarHellTests.java | 11 +- .../core/internal/net/NetUtilsTests.java | 5 +- .../common/ssl/SslConfigurationLoader.java | 40 ++--- .../ingest/common/DateFormatTests.java | 9 -- .../org/opensearch/painless/ArrayTests.java | 7 +- .../netty4/SimpleNetty4TransportTests.java | 6 +- .../AzureDiscoveryClusterFormationTests.java | 12 +- .../ingest/attachment/TikaImpl.java | 9 -- ...eCloudStorageBlobStoreRepositoryTests.java | 18 --- ...CloudStorageBlobContainerRetriesTests.java | 17 -- .../repositories/hdfs/HdfsTests.java | 2 - .../nio/SimpleNioTransportTests.java | 6 +- .../aggregations/bucket/DateHistogramIT.java | 10 +- .../search/query/SearchQueryIT.java | 2 - .../org/opensearch/bootstrap/Bootstrap.java | 10 -- .../opensearch/bootstrap/BootstrapChecks.java | 57 ------- .../bootstrap/BootstrapChecksTests.java | 76 --------- .../bootstrap/JavaVersionTests.java | 87 ++++------- .../common/LocalTimeOffsetTests.java | 4 +- .../joda/JavaJodaTimeDuellingTests.java | 22 --- .../common/time/DateFormattersTests.java | 6 - .../common/time/JavaDateMathParserTests.java | 6 - .../index/mapper/DateFieldMapperTests.java | 3 - .../opensearch/monitor/jvm/JvmInfoTests.java | 4 +- .../plugins/IndexStorePluginTests.java | 42 ++--- .../plugins/PluginsServiceTests.java | 2 +- .../test/OpenSearchIntegTestCase.java | 4 +- .../opensearch/test/OpenSearchTestCase.java | 13 +- .../junit/listeners/ReproduceInfoPrinter.java | 3 +- .../opensearch/test/rest/yaml/Features.java | 4 +- 32 files changed, 92 insertions(+), 568 deletions(-) delete mode 100644 libs/core/src/main/java/org/opensearch/bootstrap/JavaVersion.java diff --git a/libs/core/src/main/java/org/opensearch/bootstrap/JarHell.java b/libs/core/src/main/java/org/opensearch/bootstrap/JarHell.java index 843a6b982d7ff..d945697b21c0b 100644 --- a/libs/core/src/main/java/org/opensearch/bootstrap/JarHell.java +++ b/libs/core/src/main/java/org/opensearch/bootstrap/JarHell.java @@ -36,6 +36,7 @@ import org.opensearch.common.io.PathUtils; import java.io.IOException; +import java.lang.Runtime.Version; import java.net.MalformedURLException; import java.net.URISyntaxException; import java.net.URL; @@ -250,7 +251,9 @@ private static void checkManifest(Manifest manifest, Path jar) { } public static void checkVersionFormat(String targetVersion) { - if (!JavaVersion.isValid(targetVersion)) { + try { + Version.parse(targetVersion); + } catch (final IllegalArgumentException ex) { throw new IllegalStateException( String.format( Locale.ROOT, @@ -267,16 +270,10 @@ public static void checkVersionFormat(String targetVersion) { * required by {@code resource} is compatible with the current installation. */ public static void checkJavaVersion(String resource, String targetVersion) { - JavaVersion version = JavaVersion.parse(targetVersion); - if (JavaVersion.current().compareTo(version) < 0) { + Version version = Version.parse(targetVersion); + if (Runtime.version().compareTo(version) < 0) { throw new IllegalStateException( - String.format( - Locale.ROOT, - "%s requires Java %s:, your system: %s", - resource, - targetVersion, - JavaVersion.current().toString() - ) + String.format(Locale.ROOT, "%s requires Java %s:, your system: %s", resource, targetVersion, Runtime.version().toString()) ); } } diff --git a/libs/core/src/main/java/org/opensearch/bootstrap/JavaVersion.java b/libs/core/src/main/java/org/opensearch/bootstrap/JavaVersion.java deleted file mode 100644 index 236563bf8bd89..0000000000000 --- a/libs/core/src/main/java/org/opensearch/bootstrap/JavaVersion.java +++ /dev/null @@ -1,146 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.bootstrap; - -import java.math.BigInteger; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Objects; -import java.util.stream.Collectors; - -public class JavaVersion implements Comparable { - - private final List version; - private final String prePart; - - public List getVersion() { - return version; - } - - private JavaVersion(List version, String prePart) { - this.prePart = prePart; - if (version.size() >= 2 && version.get(0) == 1 && version.get(1) == 8) { - // for Java 8 there is ambiguity since both 1.8 and 8 are supported, - // so we rewrite the former to the latter - version = new ArrayList<>(version.subList(1, version.size())); - } - this.version = Collections.unmodifiableList(version); - } - - /** - * Parses the Java version as it can be retrieved as the value of java.version or - * java.specification.version according to JEP 223. - * - * @param value The version String - */ - public static JavaVersion parse(String value) { - Objects.requireNonNull(value); - String prePart = null; - if (!isValid(value)) { - throw new IllegalArgumentException("Java version string [" + value + "] could not be parsed."); - } - List version = new ArrayList<>(); - String[] parts = value.split("-"); - String[] numericComponents; - if (parts.length == 1) { - numericComponents = value.split("\\."); - } else if (parts.length == 2) { - numericComponents = parts[0].split("\\."); - prePart = parts[1]; - } else { - throw new IllegalArgumentException("Java version string [" + value + "] could not be parsed."); - } - - for (String component : numericComponents) { - version.add(Integer.valueOf(component)); - } - return new JavaVersion(version, prePart); - } - - public static boolean isValid(String value) { - return value.matches("^0*[0-9]+(\\.[0-9]+)*(-[a-zA-Z0-9]+)?$"); - } - - private static final JavaVersion CURRENT = parse(System.getProperty("java.specification.version")); - - public static JavaVersion current() { - return CURRENT; - } - - @Override - public int compareTo(JavaVersion o) { - int len = Math.max(version.size(), o.version.size()); - for (int i = 0; i < len; i++) { - int d = (i < version.size() ? version.get(i) : 0); - int s = (i < o.version.size() ? o.version.get(i) : 0); - if (s < d) return 1; - if (s > d) return -1; - } - if (prePart != null && o.prePart == null) { - return -1; - } else if (prePart == null && o.prePart != null) { - return 1; - } else if (prePart != null && o.prePart != null) { - return comparePrePart(prePart, o.prePart); - } - return 0; - } - - private int comparePrePart(String prePart, String otherPrePart) { - if (prePart.matches("\\d+")) { - return otherPrePart.matches("\\d+") ? (new BigInteger(prePart)).compareTo(new BigInteger(otherPrePart)) : -1; - } else { - return otherPrePart.matches("\\d+") ? 1 : prePart.compareTo(otherPrePart); - } - } - - @Override - public boolean equals(Object o) { - if (o == null || o.getClass() != getClass()) { - return false; - } - return compareTo((JavaVersion) o) == 0; - } - - @Override - public int hashCode() { - return version.hashCode(); - } - - @Override - public String toString() { - final String versionString = version.stream().map(v -> Integer.toString(v)).collect(Collectors.joining(".")); - return prePart != null ? versionString + "-" + prePart : versionString; - } -} diff --git a/libs/core/src/test/java/org/opensearch/bootstrap/JarHellTests.java b/libs/core/src/test/java/org/opensearch/bootstrap/JarHellTests.java index d0e411ae8e3c2..57f5f393ce49f 100644 --- a/libs/core/src/test/java/org/opensearch/bootstrap/JarHellTests.java +++ b/libs/core/src/test/java/org/opensearch/bootstrap/JarHellTests.java @@ -37,6 +37,7 @@ import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; +import java.lang.Runtime.Version; import java.net.URL; import java.nio.file.Files; import java.nio.file.Path; @@ -156,12 +157,12 @@ public void testXmlBeansLeniency() throws Exception { public void testRequiredJDKVersionTooOld() throws Exception { Path dir = createTempDir(); - List current = JavaVersion.current().getVersion(); + List current = Runtime.version().version(); List target = new ArrayList<>(current.size()); for (int i = 0; i < current.size(); i++) { target.add(current.get(i) + 1); } - JavaVersion targetVersion = JavaVersion.parse(Strings.collectionToDelimitedString(target, ".")); + Version targetVersion = Version.parse(Strings.collectionToDelimitedString(target, ".")); Manifest manifest = new Manifest(); Attributes attributes = manifest.getMainAttributes(); @@ -173,7 +174,7 @@ public void testRequiredJDKVersionTooOld() throws Exception { fail("did not get expected exception"); } catch (IllegalStateException e) { assertTrue(e.getMessage().contains("requires Java " + targetVersion.toString())); - assertTrue(e.getMessage().contains("your system: " + JavaVersion.current().toString())); + assertTrue(e.getMessage().contains("your system: " + Runtime.version().toString())); } } @@ -209,7 +210,7 @@ public void testRequiredJDKVersionIsOK() throws Exception { } public void testValidVersions() { - String[] versions = new String[] { "1.7", "1.7.0", "0.1.7", "1.7.0.80" }; + String[] versions = new String[] { "12-ea", "13.0.2.3-ea", "14-something", "11.0.2-21002", "11.0.14.1+1", "17.0.2+8" }; for (String version : versions) { try { JarHell.checkVersionFormat(version); @@ -220,7 +221,7 @@ public void testValidVersions() { } public void testInvalidVersions() { - String[] versions = new String[] { "", "1.7.0_80", "1.7." }; + String[] versions = new String[] { "", "1.7.0_80", "1.7.", "11.2+something-else" }; for (String version : versions) { try { JarHell.checkVersionFormat(version); diff --git a/libs/core/src/test/java/org/opensearch/core/internal/net/NetUtilsTests.java b/libs/core/src/test/java/org/opensearch/core/internal/net/NetUtilsTests.java index 91236b4f8fb84..448c39731def5 100644 --- a/libs/core/src/test/java/org/opensearch/core/internal/net/NetUtilsTests.java +++ b/libs/core/src/test/java/org/opensearch/core/internal/net/NetUtilsTests.java @@ -33,15 +33,16 @@ package org.opensearch.core.internal.net; import org.apache.lucene.util.Constants; -import org.opensearch.bootstrap.JavaVersion; import org.opensearch.core.internal.io.IOUtils; import org.opensearch.test.OpenSearchTestCase; +import java.lang.Runtime.Version; + public class NetUtilsTests extends OpenSearchTestCase { public void testExtendedSocketOptions() { assumeTrue("JDK possibly not supported", Constants.JVM_NAME.contains("HotSpot") || Constants.JVM_NAME.contains("OpenJDK")); - assumeTrue("JDK version not supported", JavaVersion.current().compareTo(JavaVersion.parse("11")) >= 0); + assumeTrue("JDK version not supported", Runtime.version().compareTo(Version.parse("11")) >= 0); assumeTrue("Platform possibly not supported", IOUtils.LINUX || IOUtils.MAC_OS_X); assertNotNull(NetUtils.getTcpKeepIdleSocketOptionOrNull()); assertNotNull(NetUtils.getTcpKeepIntervalSocketOptionOrNull()); diff --git a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfigurationLoader.java b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfigurationLoader.java index 6f2670d285e84..2cd9f4f31fc7f 100644 --- a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfigurationLoader.java +++ b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfigurationLoader.java @@ -32,8 +32,6 @@ package org.opensearch.common.ssl; -import org.opensearch.bootstrap.JavaVersion; - import javax.crypto.Cipher; import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.TrustManagerFactory; @@ -361,7 +359,6 @@ private List resolveListSetting(String key, Function parser, L private static List loadDefaultCiphers() { final boolean has256BitAES = has256BitAES(); - final boolean useGCM = JavaVersion.current().compareTo(JavaVersion.parse("11")) >= 0; final boolean tlsV13Supported = DEFAULT_PROTOCOLS.contains("TLSv1.3"); List ciphers = new ArrayList<>(); if (tlsV13Supported) { // TLSv1.3 cipher has PFS, AEAD, hardware support @@ -370,19 +367,18 @@ private static List loadDefaultCiphers() { } ciphers.add("TLS_AES_128_GCM_SHA256"); } - if (useGCM) { // PFS, AEAD, hardware support - if (has256BitAES) { - ciphers.addAll( - Arrays.asList( - "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" - ) - ); - } else { - ciphers.addAll(Arrays.asList("TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256")); - } + // use GCM: PFS, AEAD, hardware support + if (has256BitAES) { + ciphers.addAll( + Arrays.asList( + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + ) + ); + } else { + ciphers.addAll(Arrays.asList("TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256")); } // PFS, hardware support @@ -410,13 +406,11 @@ private static List loadDefaultCiphers() { ); } - // AEAD, hardware support - if (useGCM) { - if (has256BitAES) { - ciphers.addAll(Arrays.asList("TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256")); - } else { - ciphers.add("TLS_RSA_WITH_AES_128_GCM_SHA256"); - } + // use GCM: AEAD, hardware support + if (has256BitAES) { + ciphers.addAll(Arrays.asList("TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256")); + } else { + ciphers.add("TLS_RSA_WITH_AES_128_GCM_SHA256"); } // hardware support diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DateFormatTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DateFormatTests.java index 951b93deb6e8b..04900fe6f7496 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DateFormatTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DateFormatTests.java @@ -32,7 +32,6 @@ package org.opensearch.ingest.common; -import org.opensearch.bootstrap.JavaVersion; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateUtils; import org.opensearch.test.OpenSearchTestCase; @@ -96,10 +95,6 @@ public void testParseJavaDefaultYear() { } public void testParseWeekBased() { - assumeFalse( - "won't work in jdk8 " + "because SPI mechanism is not looking at classpath - needs ISOCalendarDataProvider in jre's ext/libs", - JavaVersion.current().equals(JavaVersion.parse("8")) - ); String format = randomFrom("YYYY-ww"); ZoneId timezone = DateUtils.of("Europe/Amsterdam"); Function javaFunction = DateFormat.Java.getFunction(format, timezone, Locale.ROOT); @@ -108,10 +103,6 @@ public void testParseWeekBased() { } public void testParseWeekBasedWithLocale() { - assumeFalse( - "won't work in jdk8 " + "because SPI mechanism is not looking at classpath - needs ISOCalendarDataProvider in jre's ext/libs", - JavaVersion.current().equals(JavaVersion.parse("8")) - ); String format = randomFrom("YYYY-ww"); ZoneId timezone = DateUtils.of("Europe/Amsterdam"); Function javaFunction = DateFormat.Java.getFunction(format, timezone, Locale.US); diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/ArrayTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/ArrayTests.java index 7563ab87fd5e6..0b83a4c558ef6 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/ArrayTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/ArrayTests.java @@ -33,7 +33,6 @@ package org.opensearch.painless; import org.apache.lucene.util.Constants; -import org.opensearch.bootstrap.JavaVersion; import org.hamcrest.Matcher; import java.lang.invoke.MethodHandle; @@ -55,11 +54,7 @@ protected String valueCtorCall(String valueType, int size) { @Override protected Matcher outOfBoundsExceptionMessageMatcher(int index, int size) { - if (JavaVersion.current().compareTo(JavaVersion.parse("11")) < 0) { - return equalTo(Integer.toString(index)); - } else { - return equalTo("Index " + Integer.toString(index) + " out of bounds for length " + Integer.toString(size)); - } + return equalTo("Index " + Integer.toString(index) + " out of bounds for length " + Integer.toString(size)); } public void testArrayLengthHelper() throws Throwable { diff --git a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/SimpleNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/SimpleNetty4TransportTests.java index b690ba9e35e35..848819ee5b840 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/SimpleNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/SimpleNetty4TransportTests.java @@ -34,7 +34,6 @@ import org.opensearch.Version; import org.opensearch.action.ActionListener; -import org.opensearch.bootstrap.JavaVersion; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkService; @@ -121,10 +120,7 @@ public void testConnectException() throws UnknownHostException { } public void testDefaultKeepAliveSettings() throws IOException { - assumeTrue( - "setting default keepalive options not supported on this platform", - (IOUtils.LINUX || IOUtils.MAC_OS_X) && JavaVersion.current().compareTo(JavaVersion.parse("11")) >= 0 - ); + assumeTrue("setting default keepalive options not supported on this platform", (IOUtils.LINUX || IOUtils.MAC_OS_X)); try ( MockTransportService serviceC = buildService("TS_C", Version.CURRENT, Settings.EMPTY); MockTransportService serviceD = buildService("TS_D", Version.CURRENT, Settings.EMPTY) diff --git a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java index c4f533fd2ee36..570aa98cd9f55 100644 --- a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java +++ b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java @@ -38,7 +38,6 @@ import com.sun.net.httpserver.HttpsConfigurator; import com.sun.net.httpserver.HttpsServer; import org.apache.logging.log4j.LogManager; -import org.opensearch.bootstrap.JavaVersion; import org.opensearch.cloud.azure.classic.management.AzureComputeService; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.FileSystemUtils; @@ -67,6 +66,7 @@ import java.io.InputStream; import java.io.OutputStream; import java.io.StringWriter; +import java.lang.Runtime.Version; import java.net.InetAddress; import java.net.InetSocketAddress; import java.nio.charset.StandardCharsets; @@ -295,15 +295,13 @@ private static SSLContext getSSLContext() throws Exception { * 12.0.1 so we pin to TLSv1.2 when running on an earlier JDK */ private static String getProtocol() { - if (JavaVersion.current().compareTo(JavaVersion.parse("11")) < 0) { - return "TLS"; - } else if (JavaVersion.current().compareTo(JavaVersion.parse("12")) < 0) { + if (Runtime.version().compareTo(Version.parse("12")) < 0) { return "TLSv1.2"; } else { - JavaVersion full = AccessController.doPrivileged( - (PrivilegedAction) () -> JavaVersion.parse(System.getProperty("java.version")) + Version full = AccessController.doPrivileged( + (PrivilegedAction) () -> Version.parse(System.getProperty("java.version")) ); - if (full.compareTo(JavaVersion.parse("12.0.1")) < 0) { + if (full.compareTo(Version.parse("12.0.1")) < 0) { return "TLSv1.2"; } } diff --git a/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java b/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java index 2451eee8e984b..aad490924d311 100644 --- a/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java +++ b/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java @@ -42,7 +42,6 @@ import org.opensearch.SpecialPermission; import org.opensearch.bootstrap.FilePermissionUtils; import org.opensearch.bootstrap.JarHell; -import org.opensearch.bootstrap.JavaVersion; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.PathUtils; @@ -181,14 +180,6 @@ static PermissionCollection getRestrictedPermissions() { perms.add(new RuntimePermission("accessClassInPackage.sun.java2d.cmm.kcms")); // xmlbeans, use by POI, needs to get the context classloader perms.add(new RuntimePermission("getClassLoader")); - // ZipFile needs accessDeclaredMembers on JDK 10; cf. https://bugs.openjdk.java.net/browse/JDK-8187485 - if (JavaVersion.current().compareTo(JavaVersion.parse("10")) >= 0) { - if (JavaVersion.current().compareTo(JavaVersion.parse("11")) < 0) { - // TODO remove this and from plugin-security.policy when JDK 11 is the only one we support - // this is needed pre 11, but it's fixed in 11 : https://bugs.openjdk.java.net/browse/JDK-8187485 - perms.add(new RuntimePermission("accessDeclaredMembers")); - } - } perms.setReadOnly(); return perms; } diff --git a/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index 4bc59b6ae6553..274a416d57431 100644 --- a/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -44,7 +44,6 @@ import org.apache.lucene.util.BytesRefBuilder; import org.opensearch.action.ActionRunnable; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.bootstrap.JavaVersion; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SuppressForbidden; @@ -67,7 +66,6 @@ import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.OpenSearchMockAPIBasedRepositoryIntegTestCase; -import org.junit.BeforeClass; import org.threeten.bp.Duration; import java.io.IOException; @@ -88,22 +86,6 @@ @SuppressForbidden(reason = "this test uses a HttpServer to emulate a Google Cloud Storage endpoint") public class GoogleCloudStorageBlobStoreRepositoryTests extends OpenSearchMockAPIBasedRepositoryIntegTestCase { - - public static void assumeNotJava8() { - assumeFalse( - "This test is flaky on jdk8 - we suspect a JDK bug to trigger some assertion in the HttpServer implementation used " - + "to emulate the server side logic of Google Cloud Storage. See https://bugs.openjdk.java.net/browse/JDK-8180754, " - + "https://github.com/elastic/elasticsearch/pull/51933 and https://github.com/elastic/elasticsearch/issues/52906 " - + "for more background on this issue.", - JavaVersion.current().equals(JavaVersion.parse("8")) - ); - } - - @BeforeClass - public static void skipJava8() { - assumeNotJava8(); - } - @Override protected String repositoryType() { return GoogleCloudStorageRepository.TYPE; diff --git a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java index 6a589126a9466..616a1ae9feb4f 100644 --- a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java +++ b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java @@ -39,7 +39,6 @@ import fixture.gcs.FakeOAuth2HttpHandler; import org.apache.http.HttpStatus; -import org.opensearch.bootstrap.JavaVersion; import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.SuppressForbidden; @@ -62,7 +61,6 @@ import org.opensearch.repositories.blobstore.OpenSearchMockAPIBasedRepositoryIntegTestCase; import org.opensearch.rest.RestStatus; import org.opensearch.rest.RestUtils; -import org.junit.BeforeClass; import org.threeten.bp.Duration; import java.io.IOException; @@ -107,21 +105,6 @@ private String httpServerUrl() { return "http://" + InetAddresses.toUriString(address.getAddress()) + ":" + address.getPort(); } - public static void assumeNotJava8() { - assumeFalse( - "This test is flaky on jdk8 - we suspect a JDK bug to trigger some assertion in the HttpServer implementation used " - + "to emulate the server side logic of Google Cloud Storage. See https://bugs.openjdk.java.net/browse/JDK-8180754, " - + "https://github.com/elastic/elasticsearch/pull/51933 and https://github.com/elastic/elasticsearch/issues/52906 " - + "for more background on this issue.", - JavaVersion.current().equals(JavaVersion.parse("8")) - ); - } - - @BeforeClass - public static void skipJava8() { - assumeNotJava8(); - } - @Override protected String downloadStorageEndpoint(String blob) { return "/download/storage/v1/b/bucket/o/" + blob; diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java index 46d97f41b604f..d46d0b2092d2a 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java @@ -36,7 +36,6 @@ import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.bootstrap.JavaVersion; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterState; import org.opensearch.common.settings.Settings; @@ -63,7 +62,6 @@ protected Collection> getPlugins() { } public void testSimpleWorkflow() { - assumeFalse("https://github.com/elastic/elasticsearch/issues/31498", JavaVersion.current().equals(JavaVersion.parse("11"))); Client client = client(); AcknowledgedResponse putRepositoryResponse = client.admin() diff --git a/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java b/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java index c8b9fa63383bf..230f89bbafe9f 100644 --- a/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java @@ -34,7 +34,6 @@ import org.opensearch.Version; import org.opensearch.action.ActionListener; -import org.opensearch.bootstrap.JavaVersion; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkService; @@ -122,10 +121,7 @@ public void testConnectException() throws UnknownHostException { } public void testDefaultKeepAliveSettings() throws IOException { - assumeTrue( - "setting default keepalive options not supported on this platform", - (IOUtils.LINUX || IOUtils.MAC_OS_X) && JavaVersion.current().compareTo(JavaVersion.parse("11")) >= 0 - ); + assumeTrue("setting default keepalive options not supported on this platform", (IOUtils.LINUX || IOUtils.MAC_OS_X)); try ( MockTransportService serviceC = buildService("TS_C", Version.CURRENT, Settings.EMPTY); MockTransportService serviceD = buildService("TS_D", Version.CURRENT, Settings.EMPTY) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java index 782bcde39ce8d..617c5745c9bba 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java @@ -35,7 +35,6 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; -import org.opensearch.bootstrap.JavaVersion; import org.opensearch.common.Strings; import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; @@ -386,9 +385,6 @@ public void testSingleValued_timeZone_epoch() throws Exception { ZonedDateTime expectedKey = keyIterator.next(); String bucketKey = bucket.getKeyAsString(); String expectedBucketName = Long.toString(expectedKey.toInstant().toEpochMilli() / millisDivider); - if (JavaVersion.current().getVersion().get(0) == 8 && bucket.getKeyAsString().endsWith(".0")) { - expectedBucketName = expectedBucketName + ".0"; - } assertThat(bucketKey, equalTo(expectedBucketName)); assertThat(((ZonedDateTime) bucket.getKey()), equalTo(expectedKey)); assertThat(bucket.getDocCount(), equalTo(1L)); @@ -1509,11 +1505,7 @@ public void testRewriteTimeZone_EpochMillisFormat() throws InterruptedException, assertSearchResponse(response); Histogram histo = response.getAggregations().get("histo"); assertThat(histo.getBuckets().size(), equalTo(1)); - if (JavaVersion.current().getVersion().get(0) == 8 && histo.getBuckets().get(0).getKeyAsString().endsWith(".0")) { - assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("1477954800000.0")); - } else { - assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("1477954800000")); - } + assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("1477954800000")); assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(1L)); response = client().prepareSearch(index) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java index fed5561c1df64..c51043f02174d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java @@ -44,7 +44,6 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; -import org.opensearch.bootstrap.JavaVersion; import org.opensearch.common.document.DocumentField; import org.opensearch.common.lucene.search.SpanBooleanQueryRewriteWithMaxClause; import org.opensearch.common.regex.Regex; @@ -1860,7 +1859,6 @@ public void testRangeQueryWithTimeZone() throws Exception { * on "Configuring IDEs And Running Tests". */ public void testRangeQueryWithLocaleMapping() throws Exception { - assumeTrue("need java 9 for testing ", JavaVersion.current().compareTo(JavaVersion.parse("9")) >= 0); assert ("SPI,COMPAT".equals(System.getProperty("java.locale.providers"))) : "`-Djava.locale.providers=SPI,COMPAT` needs to be set"; assertAcked( diff --git a/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java b/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java index 58ca3cdf78033..c0c0251538d01 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java +++ b/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java @@ -48,7 +48,6 @@ import org.opensearch.common.PidFile; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.inject.CreationException; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.logging.LogConfigurator; import org.opensearch.common.logging.Loggers; import org.opensearch.common.network.IfConfig; @@ -78,7 +77,6 @@ import java.security.NoSuchAlgorithmException; import java.util.Collections; import java.util.List; -import java.util.Locale; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -374,14 +372,6 @@ static void init(final boolean foreground, final Path pidFile, final boolean qui } catch (IOException e) { throw new BootstrapException(e); } - if (JavaVersion.current().compareTo(JavaVersion.parse("11")) < 0) { - final String message = String.format( - Locale.ROOT, - "future versions of OpenSearch will require Java 11; " + "your Java version from [%s] does not meet this requirement", - System.getProperty("java.home") - ); - DeprecationLogger.getLogger(Bootstrap.class).deprecate("java_version_11_required", message); - } if (environment.pidFile() != null) { try { PidFile.create(environment.pidFile(), true); diff --git a/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java index 79019a73c69e3..6b75f2306431c 100644 --- a/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java +++ b/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java @@ -59,8 +59,6 @@ import java.util.List; import java.util.Locale; import java.util.function.Predicate; -import java.util.regex.Matcher; -import java.util.regex.Pattern; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -224,7 +222,6 @@ static List checks() { checks.add(new OnErrorCheck()); checks.add(new OnOutOfMemoryErrorCheck()); checks.add(new EarlyAccessCheck()); - checks.add(new G1GCCheck()); checks.add(new AllPermissionCheck()); checks.add(new DiscoveryConfiguredCheck()); return Collections.unmodifiableList(checks); @@ -683,60 +680,6 @@ String javaVersion() { } - /** - * Bootstrap check for versions of HotSpot that are known to have issues that can lead to index corruption when G1GC is enabled. - */ - static class G1GCCheck implements BootstrapCheck { - - @Override - public BootstrapCheckResult check(BootstrapContext context) { - if ("Oracle Corporation".equals(jvmVendor()) && isJava8() && isG1GCEnabled()) { - final String jvmVersion = jvmVersion(); - // HotSpot versions on Java 8 match this regular expression; note that this changes with Java 9 after JEP-223 - final Pattern pattern = Pattern.compile("(\\d+)\\.(\\d+)-b\\d+"); - final Matcher matcher = pattern.matcher(jvmVersion); - final boolean matches = matcher.matches(); - assert matches : jvmVersion; - final int major = Integer.parseInt(matcher.group(1)); - final int update = Integer.parseInt(matcher.group(2)); - // HotSpot versions for Java 8 have major version 25, the bad versions are all versions prior to update 40 - if (major == 25 && update < 40) { - final String message = String.format( - Locale.ROOT, - "JVM version [%s] can cause data corruption when used with G1GC; upgrade to at least Java 8u40", - jvmVersion - ); - return BootstrapCheckResult.failure(message); - } - } - return BootstrapCheckResult.success(); - } - - // visible for testing - String jvmVendor() { - return Constants.JVM_VENDOR; - } - - // visible for testing - boolean isG1GCEnabled() { - assert "Oracle Corporation".equals(jvmVendor()); - return JvmInfo.jvmInfo().useG1GC().equals("true"); - } - - // visible for testing - String jvmVersion() { - assert "Oracle Corporation".equals(jvmVendor()); - return Constants.JVM_VERSION; - } - - // visible for testing - boolean isJava8() { - assert "Oracle Corporation".equals(jvmVendor()); - return JavaVersion.current().equals(JavaVersion.parse("1.8")); - } - - } - static class AllPermissionCheck implements BootstrapCheck { @Override diff --git a/server/src/test/java/org/opensearch/bootstrap/BootstrapChecksTests.java b/server/src/test/java/org/opensearch/bootstrap/BootstrapChecksTests.java index c59ca1dd60dc7..88f2047ffaa0f 100644 --- a/server/src/test/java/org/opensearch/bootstrap/BootstrapChecksTests.java +++ b/server/src/test/java/org/opensearch/bootstrap/BootstrapChecksTests.java @@ -52,7 +52,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; -import java.util.Locale; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; @@ -654,81 +653,6 @@ String javaVersion() { } - public void testG1GCCheck() throws NodeValidationException { - final AtomicBoolean isG1GCEnabled = new AtomicBoolean(true); - final AtomicBoolean isJava8 = new AtomicBoolean(true); - final AtomicReference jvmVersion = new AtomicReference<>( - String.format(Locale.ROOT, "25.%d-b%d", randomIntBetween(0, 39), randomIntBetween(1, 128)) - ); - final BootstrapChecks.G1GCCheck g1GCCheck = new BootstrapChecks.G1GCCheck() { - - @Override - String jvmVendor() { - return "Oracle Corporation"; - } - - @Override - boolean isG1GCEnabled() { - return isG1GCEnabled.get(); - } - - @Override - String jvmVersion() { - return jvmVersion.get(); - } - - @Override - boolean isJava8() { - return isJava8.get(); - } - - }; - - final NodeValidationException e = expectThrows( - NodeValidationException.class, - () -> BootstrapChecks.check(emptyContext, true, Collections.singletonList(g1GCCheck)) - ); - assertThat( - e.getMessage(), - containsString( - "JVM version [" + jvmVersion.get() + "] can cause data corruption when used with G1GC; upgrade to at least Java 8u40" - ) - ); - - // if G1GC is disabled, nothing should happen - isG1GCEnabled.set(false); - BootstrapChecks.check(emptyContext, true, Collections.singletonList(g1GCCheck)); - - // if on or after update 40, nothing should happen independent of whether or not G1GC is enabled - isG1GCEnabled.set(randomBoolean()); - jvmVersion.set(String.format(Locale.ROOT, "25.%d-b%d", randomIntBetween(40, 112), randomIntBetween(1, 128))); - BootstrapChecks.check(emptyContext, true, Collections.singletonList(g1GCCheck)); - - final BootstrapChecks.G1GCCheck nonOracleCheck = new BootstrapChecks.G1GCCheck() { - - @Override - String jvmVendor() { - return randomAlphaOfLength(8); - } - - }; - - // if not on an Oracle JVM, nothing should happen - BootstrapChecks.check(emptyContext, true, Collections.singletonList(nonOracleCheck)); - - final BootstrapChecks.G1GCCheck nonJava8Check = new BootstrapChecks.G1GCCheck() { - - @Override - boolean isJava8() { - return false; - } - - }; - - // if not Java 8, nothing should happen - BootstrapChecks.check(emptyContext, true, Collections.singletonList(nonJava8Check)); - } - public void testAllPermissionCheck() throws NodeValidationException { final AtomicBoolean isAllPermissionGranted = new AtomicBoolean(true); final BootstrapChecks.AllPermissionCheck allPermissionCheck = new BootstrapChecks.AllPermissionCheck() { diff --git a/server/src/test/java/org/opensearch/bootstrap/JavaVersionTests.java b/server/src/test/java/org/opensearch/bootstrap/JavaVersionTests.java index 24d78e0986342..b651afd253d11 100644 --- a/server/src/test/java/org/opensearch/bootstrap/JavaVersionTests.java +++ b/server/src/test/java/org/opensearch/bootstrap/JavaVersionTests.java @@ -34,6 +34,7 @@ import org.opensearch.test.OpenSearchTestCase; +import java.lang.Runtime.Version; import java.util.List; import static org.hamcrest.CoreMatchers.equalTo; @@ -41,29 +42,22 @@ public class JavaVersionTests extends OpenSearchTestCase { public void testParse() { - JavaVersion javaVersion = JavaVersion.parse("1.7.0"); - List version = javaVersion.getVersion(); - assertThat(version.size(), is(3)); - assertThat(version.get(0), is(1)); - assertThat(version.get(1), is(7)); - assertThat(version.get(2), is(0)); - - JavaVersion javaVersionEarlyAccess = JavaVersion.parse("14.0.1-ea"); - List version14 = javaVersionEarlyAccess.getVersion(); + Version javaVersionEarlyAccess = Version.parse("14.0.1-ea"); + List version14 = javaVersionEarlyAccess.version(); assertThat(version14.size(), is(3)); assertThat(version14.get(0), is(14)); assertThat(version14.get(1), is(0)); assertThat(version14.get(2), is(1)); - JavaVersion javaVersionOtherPrePart = JavaVersion.parse("13.2.4-somethingElseHere"); - List version13 = javaVersionOtherPrePart.getVersion(); + Version javaVersionOtherPrePart = Version.parse("13.2.4-somethingElseHere"); + List version13 = javaVersionOtherPrePart.version(); assertThat(version13.size(), is(3)); assertThat(version13.get(0), is(13)); assertThat(version13.get(1), is(2)); assertThat(version13.get(2), is(4)); - JavaVersion javaVersionNumericPrePart = JavaVersion.parse("13.2.4-something124443"); - List version11 = javaVersionNumericPrePart.getVersion(); + Version javaVersionNumericPrePart = Version.parse("13.2.4-something124443"); + List version11 = javaVersionNumericPrePart.version(); assertThat(version11.size(), is(3)); assertThat(version11.get(0), is(13)); assertThat(version11.get(1), is(2)); @@ -71,51 +65,36 @@ public void testParse() { } public void testParseInvalidVersions() { - final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> JavaVersion.parse("11.2-something-else")); - assertThat(e.getMessage(), equalTo("Java version string [11.2-something-else] could not be parsed.")); - final IllegalArgumentException e1 = expectThrows(IllegalArgumentException.class, () -> JavaVersion.parse("11.0.")); - assertThat(e1.getMessage(), equalTo("Java version string [11.0.] could not be parsed.")); - final IllegalArgumentException e2 = expectThrows(IllegalArgumentException.class, () -> JavaVersion.parse("11.a.3")); - assertThat(e2.getMessage(), equalTo("Java version string [11.a.3] could not be parsed.")); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> Version.parse("11.2+something-else")); + assertThat(e.getMessage(), equalTo("Invalid version string: '11.2+something-else'")); + final IllegalArgumentException e1 = expectThrows(IllegalArgumentException.class, () -> Version.parse("11.0.")); + assertThat(e1.getMessage(), equalTo("Invalid version string: '11.0.'")); + final IllegalArgumentException e2 = expectThrows(IllegalArgumentException.class, () -> Version.parse("11.a.3")); + assertThat(e2.getMessage(), equalTo("Invalid version string: '11.a.3'")); } public void testToString() { - JavaVersion javaVersion170 = JavaVersion.parse("1.7.0"); - assertThat(javaVersion170.toString(), is("1.7.0")); - JavaVersion javaVersion9 = JavaVersion.parse("9"); + Version javaVersion9 = Version.parse("9"); assertThat(javaVersion9.toString(), is("9")); - JavaVersion javaVersion11 = JavaVersion.parse("11.0.1-something09random"); + Version javaVersion11 = Version.parse("11.0.1-something09random"); assertThat(javaVersion11.toString(), is("11.0.1-something09random")); - JavaVersion javaVersion12 = JavaVersion.parse("12.2-2019"); + Version javaVersion12 = Version.parse("12.2-2019"); assertThat(javaVersion12.toString(), is("12.2-2019")); - JavaVersion javaVersion13ea = JavaVersion.parse("13.1-ea"); + Version javaVersion13ea = Version.parse("13.1-ea"); assertThat(javaVersion13ea.toString(), is("13.1-ea")); } public void testCompare() { - JavaVersion onePointSix = JavaVersion.parse("1.6"); - JavaVersion onePointSeven = JavaVersion.parse("1.7"); - JavaVersion onePointSevenPointZero = JavaVersion.parse("1.7.0"); - JavaVersion onePointSevenPointOne = JavaVersion.parse("1.7.1"); - JavaVersion onePointSevenPointTwo = JavaVersion.parse("1.7.2"); - JavaVersion onePointSevenPointOnePointOne = JavaVersion.parse("1.7.1.1"); - JavaVersion onePointSevenPointTwoPointOne = JavaVersion.parse("1.7.2.1"); - JavaVersion thirteen = JavaVersion.parse("13"); - JavaVersion thirteenPointTwoPointOne = JavaVersion.parse("13.2.1"); - JavaVersion thirteenPointTwoPointOneTwoThousand = JavaVersion.parse("13.2.1-2000"); - JavaVersion thirteenPointTwoPointOneThreeThousand = JavaVersion.parse("13.2.1-3000"); - JavaVersion thirteenPointTwoPointOneA = JavaVersion.parse("13.2.1-aaa"); - JavaVersion thirteenPointTwoPointOneB = JavaVersion.parse("13.2.1-bbb"); - JavaVersion fourteen = JavaVersion.parse("14"); - JavaVersion fourteenPointTwoPointOne = JavaVersion.parse("14.2.1"); - JavaVersion fourteenPointTwoPointOneEarlyAccess = JavaVersion.parse("14.2.1-ea"); - - assertTrue(onePointSix.compareTo(onePointSeven) < 0); - assertTrue(onePointSeven.compareTo(onePointSix) > 0); - assertTrue(onePointSix.compareTo(onePointSix) == 0); - assertTrue(onePointSeven.compareTo(onePointSevenPointZero) == 0); - assertTrue(onePointSevenPointOnePointOne.compareTo(onePointSevenPointOne) > 0); - assertTrue(onePointSevenPointTwo.compareTo(onePointSevenPointTwoPointOne) < 0); + Version thirteen = Version.parse("13"); + Version thirteenPointTwoPointOne = Version.parse("13.2.1"); + Version thirteenPointTwoPointOneTwoThousand = Version.parse("13.2.1-2000"); + Version thirteenPointTwoPointOneThreeThousand = Version.parse("13.2.1-3000"); + Version thirteenPointTwoPointOneA = Version.parse("13.2.1-aaa"); + Version thirteenPointTwoPointOneB = Version.parse("13.2.1-bbb"); + Version fourteen = Version.parse("14"); + Version fourteenPointTwoPointOne = Version.parse("14.2.1"); + Version fourteenPointTwoPointOneEarlyAccess = Version.parse("14.2.1-ea"); + assertTrue(thirteen.compareTo(thirteenPointTwoPointOne) < 0); assertTrue(thirteen.compareTo(fourteen) < 0); assertTrue(thirteenPointTwoPointOneThreeThousand.compareTo(thirteenPointTwoPointOneTwoThousand) > 0); @@ -129,20 +108,16 @@ public void testCompare() { } public void testValidVersions() { - String[] versions = new String[] { "1.7", "1.7.0", "0.1.7", "1.7.0.80", "12-ea", "13.0.2.3-ea", "14-something", "11.0.2-21002" }; + String[] versions = new String[] { "12-ea", "13.0.2.3-ea", "14-something", "11.0.2-21002", "11.0.14.1+1", "17.0.2+8" }; for (String version : versions) { - assertTrue(JavaVersion.isValid(version)); + assertNotNull(Version.parse(version)); } } public void testInvalidVersions() { - String[] versions = new String[] { "", "1.7.0_80", "1.7.", "11.2-something-else" }; + String[] versions = new String[] { "", "1.7.0_80", "1.7.", "11.2+something-else" }; for (String version : versions) { - assertFalse(JavaVersion.isValid(version)); + assertThrows(IllegalArgumentException.class, () -> Version.parse(version)); } } - - public void testJava8Compat() { - assertEquals(JavaVersion.parse("1.8"), JavaVersion.parse("8")); - } } diff --git a/server/src/test/java/org/opensearch/common/LocalTimeOffsetTests.java b/server/src/test/java/org/opensearch/common/LocalTimeOffsetTests.java index 12810241e3904..b032e27397f2d 100644 --- a/server/src/test/java/org/opensearch/common/LocalTimeOffsetTests.java +++ b/server/src/test/java/org/opensearch/common/LocalTimeOffsetTests.java @@ -32,12 +32,12 @@ package org.opensearch.common; -import org.opensearch.bootstrap.JavaVersion; import org.opensearch.common.LocalTimeOffset.Gap; import org.opensearch.common.LocalTimeOffset.Overlap; import org.opensearch.common.time.DateFormatter; import org.opensearch.test.OpenSearchTestCase; +import java.lang.Runtime.Version; import java.time.Instant; import java.time.ZoneId; import java.time.ZoneOffset; @@ -278,7 +278,7 @@ public void testKnownMovesBackToPreviousDay() { assertKnownMovesBacktoPreviousDay("America/Moncton", "2005-10-29T03:01:00"); assertKnownMovesBacktoPreviousDay("America/St_Johns", "2010-11-07T02:31:00"); assertKnownMovesBacktoPreviousDay("Canada/Newfoundland", "2010-11-07T02:31:00"); - if (JavaVersion.current().compareTo(JavaVersion.parse("11")) > 0) { + if (Runtime.version().compareTo(Version.parse("11")) > 0) { // Added in java 12 assertKnownMovesBacktoPreviousDay("Pacific/Guam", "1969-01-25T13:01:00"); assertKnownMovesBacktoPreviousDay("Pacific/Saipan", "1969-01-25T13:01:00"); diff --git a/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java b/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java index 30fcf4bb32989..94ddfd7e7f100 100644 --- a/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java +++ b/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java @@ -33,7 +33,6 @@ package org.opensearch.common.joda; import org.opensearch.OpenSearchParseException; -import org.opensearch.bootstrap.JavaVersion; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateFormatters; import org.opensearch.common.time.DateMathParser; @@ -43,7 +42,6 @@ import org.joda.time.DateTimeZone; import org.joda.time.format.DateTimeFormat; import org.joda.time.format.ISODateTimeFormat; -import org.junit.BeforeClass; import java.time.ZoneId; import java.time.ZoneOffset; @@ -62,18 +60,6 @@ protected boolean enableWarningsCheck() { return false; } - @BeforeClass - public static void checkJvmProperties() { - boolean runtimeJdk8 = JavaVersion.current().getVersion().get(0) == 8; - assert (runtimeJdk8 && ("SPI,JRE".equals(System.getProperty("java.locale.providers")))) - || (false == runtimeJdk8 && ("SPI,COMPAT".equals(System.getProperty("java.locale.providers")))) - : "`-Djava.locale.providers` needs to be set"; - assumeFalse( - "won't work in jdk8 " + "because SPI mechanism is not looking at classpath - needs ISOCalendarDataProvider in jre's ext/libs", - runtimeJdk8 - ); - } - public void testTimezoneParsing() { /** this testcase won't work in joda. See comment in {@link #testPartialTimeParsing()} * assertSameDateAs("2016-11-30T+01", "strict_date_optional_time", "strict_date_optional_time"); @@ -906,14 +892,6 @@ private void assertSamePrinterOutput( String jodaTimeOut = jodaDateFormatter.formatJoda(jodaDate); assertThat(jodaDate.getMillis(), is(javaDate.toInstant().toEpochMilli())); - - if (JavaVersion.current().getVersion().get(0) == 8 - && javaTimeOut.endsWith(".0") - && (format.equals("epoch_second") || format.equals("epoch_millis"))) { - // java 8 has a bug in DateTimeFormatter usage when printing dates that rely on isSupportedBy for fields, which is - // what we use for epoch time. This change accounts for that bug. It should be removed when java 8 support is removed - jodaTimeOut += ".0"; - } String message = String.format( Locale.ROOT, "expected string representation to be equal for format [%s]: joda [%s], java [%s]", diff --git a/server/src/test/java/org/opensearch/common/time/DateFormattersTests.java b/server/src/test/java/org/opensearch/common/time/DateFormattersTests.java index 1e57f9fe88d9c..681daf1755890 100644 --- a/server/src/test/java/org/opensearch/common/time/DateFormattersTests.java +++ b/server/src/test/java/org/opensearch/common/time/DateFormattersTests.java @@ -32,7 +32,6 @@ package org.opensearch.common.time; -import org.opensearch.bootstrap.JavaVersion; import org.opensearch.common.joda.Joda; import org.opensearch.test.OpenSearchTestCase; @@ -56,11 +55,6 @@ public class DateFormattersTests extends OpenSearchTestCase { public void testWeekBasedDates() { - assumeFalse( - "won't work in jdk8 " + "because SPI mechanism is not looking at classpath - needs ISOCalendarDataProvider in jre's ext/libs", - JavaVersion.current().equals(JavaVersion.parse("8")) - ); - // as per WeekFields.ISO first week starts on Monday and has minimum 4 days DateFormatter dateFormatter = DateFormatters.forPattern("YYYY-ww"); diff --git a/server/src/test/java/org/opensearch/common/time/JavaDateMathParserTests.java b/server/src/test/java/org/opensearch/common/time/JavaDateMathParserTests.java index a26a3a298b360..504741f56efed 100644 --- a/server/src/test/java/org/opensearch/common/time/JavaDateMathParserTests.java +++ b/server/src/test/java/org/opensearch/common/time/JavaDateMathParserTests.java @@ -33,7 +33,6 @@ package org.opensearch.common.time; import org.opensearch.OpenSearchParseException; -import org.opensearch.bootstrap.JavaVersion; import org.opensearch.test.OpenSearchTestCase; import java.time.Instant; @@ -111,11 +110,6 @@ public void testOverridingLocaleOrZoneAndCompositeRoundUpParser() { } public void testWeekDates() { - assumeFalse( - "won't work in jdk8 " + "because SPI mechanism is not looking at classpath - needs ISOCalendarDataProvider in jre's ext/libs", - JavaVersion.current().equals(JavaVersion.parse("8")) - ); - DateFormatter formatter = DateFormatter.forPattern("YYYY-ww"); assertDateMathEquals(formatter.toDateMathParser(), "2016-01", "2016-01-04T23:59:59.999Z", 0, true, ZoneOffset.UTC); diff --git a/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java index cdc5c9567e581..918b86761fe86 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java @@ -34,7 +34,6 @@ import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexableField; -import org.opensearch.bootstrap.JavaVersion; import org.opensearch.common.collect.List; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.xcontent.XContentBuilder; @@ -186,8 +185,6 @@ public void testChangeFormat() throws IOException { } public void testChangeLocale() throws IOException { - assumeTrue("need java 9 for testing ", JavaVersion.current().compareTo(JavaVersion.parse("9")) >= 0); - DocumentMapper mapper = createDocumentMapper( fieldMapping(b -> b.field("type", "date").field("format", "E, d MMM yyyy HH:mm:ss Z").field("locale", "de")) ); diff --git a/server/src/test/java/org/opensearch/monitor/jvm/JvmInfoTests.java b/server/src/test/java/org/opensearch/monitor/jvm/JvmInfoTests.java index d86971adcc992..3d02a4797497e 100644 --- a/server/src/test/java/org/opensearch/monitor/jvm/JvmInfoTests.java +++ b/server/src/test/java/org/opensearch/monitor/jvm/JvmInfoTests.java @@ -33,7 +33,6 @@ package org.opensearch.monitor.jvm; import org.apache.lucene.util.Constants; -import org.opensearch.bootstrap.JavaVersion; import org.opensearch.test.OpenSearchTestCase; public class JvmInfoTests extends OpenSearchTestCase { @@ -53,14 +52,13 @@ private boolean isG1GCEnabled() { final String argline = System.getProperty("tests.jvm.argline"); final boolean g1GCEnabled = flagIsEnabled(argline, "UseG1GC"); // for JDK 9 the default collector when no collector is specified is G1 GC - final boolean versionIsAtLeastJava9 = JavaVersion.current().compareTo(JavaVersion.parse("9")) >= 0; final boolean noOtherCollectorSpecified = argline == null || (!flagIsEnabled(argline, "UseParNewGC") && !flagIsEnabled(argline, "UseParallelGC") && !flagIsEnabled(argline, "UseParallelOldGC") && !flagIsEnabled(argline, "UseSerialGC") && !flagIsEnabled(argline, "UseConcMarkSweepGC")); - return g1GCEnabled || (versionIsAtLeastJava9 && noOtherCollectorSpecified); + return g1GCEnabled || noOtherCollectorSpecified; } private boolean flagIsEnabled(String argline, String flag) { diff --git a/server/src/test/java/org/opensearch/plugins/IndexStorePluginTests.java b/server/src/test/java/org/opensearch/plugins/IndexStorePluginTests.java index 165e3aaf3f171..5fd76fc90568c 100644 --- a/server/src/test/java/org/opensearch/plugins/IndexStorePluginTests.java +++ b/server/src/test/java/org/opensearch/plugins/IndexStorePluginTests.java @@ -32,7 +32,6 @@ package org.opensearch.plugins; -import org.opensearch.bootstrap.JavaVersion; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.settings.Settings; @@ -134,25 +133,16 @@ public void testDuplicateIndexStoreFactories() { IllegalStateException.class, () -> new MockNode(settings, Arrays.asList(BarStorePlugin.class, FooStorePlugin.class)) ); - if (JavaVersion.current().compareTo(JavaVersion.parse("9")) >= 0) { - assertThat( - e, - hasToString( - matches( - "java.lang.IllegalStateException: Duplicate key store \\(attempted merging values " - + "org.opensearch.index.store.FsDirectoryFactory@[\\w\\d]+ " - + "and org.opensearch.index.store.FsDirectoryFactory@[\\w\\d]+\\)" - ) - ) - ); - } else { - assertThat( - e, - hasToString( - matches("java.lang.IllegalStateException: Duplicate key org.opensearch.index.store.FsDirectoryFactory@[\\w\\d]+") + assertThat( + e, + hasToString( + matches( + "java.lang.IllegalStateException: Duplicate key store \\(attempted merging values " + + "org.opensearch.index.store.FsDirectoryFactory@[\\w\\d]+ " + + "and org.opensearch.index.store.FsDirectoryFactory@[\\w\\d]+\\)" ) - ); - } + ) + ); } public void testDuplicateIndexStoreRecoveryStateFactories() { @@ -161,18 +151,6 @@ public void testDuplicateIndexStoreRecoveryStateFactories() { IllegalStateException.class, () -> new MockNode(settings, Arrays.asList(FooCustomRecoveryStore.class, BarCustomRecoveryStore.class)) ); - if (JavaVersion.current().compareTo(JavaVersion.parse("9")) >= 0) { - assertThat(e.getMessage(), containsString("Duplicate key recovery-type")); - } else { - assertThat( - e, - hasToString( - matches( - "java.lang.IllegalStateException: Duplicate key " - + "org.opensearch.plugins.IndexStorePluginTests\\$RecoveryFactory@[\\w\\d]+" - ) - ) - ); - } + assertThat(e.getMessage(), containsString("Duplicate key recovery-type")); } } diff --git a/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java index d22776cf01f0e..de8adf3539fe6 100644 --- a/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java @@ -723,7 +723,7 @@ public void testIncompatibleJavaVersion() throws Exception { "desc", "1.0", Version.CURRENT, - "1000000.0", + "1000000", "FakePlugin", Collections.emptyList(), false diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 1c09fb2ff8c04..8a3a5bcb5bb50 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -70,7 +70,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.bootstrap.JavaVersion; import org.opensearch.client.AdminClient; import org.opensearch.client.Client; import org.opensearch.client.ClusterAdminClient; @@ -166,6 +165,7 @@ import org.junit.BeforeClass; import java.io.IOException; +import java.lang.Runtime.Version; import java.lang.annotation.Annotation; import java.lang.annotation.ElementType; import java.lang.annotation.Inherited; @@ -2403,7 +2403,7 @@ protected boolean willSufferDebian8MemoryProblem() { final boolean anyDebian8Nodes = response.getNodes() .stream() .anyMatch(ni -> ni.getInfo(OsInfo.class).getPrettyName().equals("Debian GNU/Linux 8 (jessie)")); - boolean java15Plus = JavaVersion.current().compareTo(JavaVersion.parse("15")) >= 0; + boolean java15Plus = Runtime.version().compareTo(Version.parse("15")) >= 0; return anyDebian8Nodes && java15Plus == false; } } diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java index 96698036fca55..ecf0de521f36a 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java @@ -62,7 +62,6 @@ import org.apache.lucene.tests.util.TimeUnits; import org.opensearch.Version; import org.opensearch.bootstrap.BootstrapForTesting; -import org.opensearch.bootstrap.JavaVersion; import org.opensearch.client.Requests; import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.metadata.IndexMetadata; @@ -967,17 +966,7 @@ public static TimeZone randomTimeZone() { * generate a random TimeZone from the ones available in java.time */ public static ZoneId randomZone() { - // work around a JDK bug, where java 8 cannot parse the timezone GMT0 back into a temporal accessor - // see https://bugs.openjdk.java.net/browse/JDK-8138664 - if (JavaVersion.current().getVersion().get(0) == 8) { - ZoneId timeZone; - do { - timeZone = ZoneId.of(randomJodaAndJavaSupportedTimezone(JAVA_ZONE_IDS)); - } while (timeZone.equals(ZoneId.of("GMT0"))); - return timeZone; - } else { - return ZoneId.of(randomJodaAndJavaSupportedTimezone(JAVA_ZONE_IDS)); - } + return ZoneId.of(randomJodaAndJavaSupportedTimezone(JAVA_ZONE_IDS)); } /** diff --git a/test/framework/src/main/java/org/opensearch/test/junit/listeners/ReproduceInfoPrinter.java b/test/framework/src/main/java/org/opensearch/test/junit/listeners/ReproduceInfoPrinter.java index 3d5a906e50836..b2d0705b937e2 100644 --- a/test/framework/src/main/java/org/opensearch/test/junit/listeners/ReproduceInfoPrinter.java +++ b/test/framework/src/main/java/org/opensearch/test/junit/listeners/ReproduceInfoPrinter.java @@ -35,7 +35,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; -import org.opensearch.bootstrap.JavaVersion; import org.opensearch.common.Strings; import org.opensearch.common.SuppressForbidden; import org.opensearch.test.OpenSearchIntegTestCase; @@ -192,7 +191,7 @@ private ReproduceErrorMessageBuilder appendESProperties() { } appendOpt("tests.locale", Locale.getDefault().toLanguageTag()); appendOpt("tests.timezone", TimeZone.getDefault().getID()); - appendOpt("runtime.java", Integer.toString(JavaVersion.current().getVersion().get(0))); + appendOpt("runtime.java", Integer.toString(Runtime.version().version().get(0))); appendOpt(OpenSearchTestCase.FIPS_SYSPROP, System.getProperty(OpenSearchTestCase.FIPS_SYSPROP)); return this; } diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/Features.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/Features.java index 6e59e35dcb0b4..10fb1e52259a9 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/Features.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/Features.java @@ -32,8 +32,6 @@ package org.opensearch.test.rest.yaml; -import org.opensearch.bootstrap.JavaVersion; - import java.util.Arrays; import java.util.List; @@ -86,7 +84,7 @@ public static boolean areAllSupported(List features) { } private static boolean isSupported(String feature) { - if (feature.equals(SPI_ON_CLASSPATH_SINCE_JDK_9) && JavaVersion.current().compareTo(JavaVersion.parse("9")) >= 0) { + if (feature.equals(SPI_ON_CLASSPATH_SINCE_JDK_9)) { return true; } return SUPPORTED.contains(feature); From c7c410a06311ea4b1ad61ca1a214ec89c898335a Mon Sep 17 00:00:00 2001 From: Kartik Ganesh Date: Wed, 20 Apr 2022 15:02:25 -0700 Subject: [PATCH 105/653] Refactoring GatedAutoCloseable and moving RecoveryState.Timer (#2965) * Refactoring GatedAutoCloseable to AutoCloseableRefCounted This is a part of the process of merging our feature branch - feature/segment-replication - back into main by re-PRing our changes from the feature branch. GatedAutoCloseable currently wraps a subclass of RefCounted. Segment replication adds another subclass, but this also wraps RefCounted. Both subclasses have the same shutdown hook - decRef. This change makes the superclass less generic to increase code convergence. The breakdown of the plan to merge segment-replication to main is detailed in #2355 Segment replication design proposal - #2229 Signed-off-by: Kartik Ganesh * Minor refactoring in RecoveryState This change makes two minor updates to RecoveryState - 1. The readRecoveryState API is removed because it can be replaced by an invocation of the constructor 2. The class members of the Timer inner class are changed to private, and accesses are only through the public APIs Signed-off-by: Kartik Ganesh * Update RecoveryTargetTests to test Timer subclasses deterministically This change removes the use of RandomBoolean in testing the Timer classes and creates a dedicated unit test for each. The common test logic is shared via a private method. Signed-off-by: Kartik Ganesh * Move the RecoveryState.Timer class to a top-level class This will eventually be reused across both replication use-cases - peer recovery and segment replication. Signed-off-by: Kartik Ganesh * Further update of timer tests in RecoveryTargetTests Removes a non-deterministic code path around stopping the timer, and avoids assertThat (deprecated) Signed-off-by: Kartik Ganesh * Rename to ReplicationTimer Signed-off-by: Kartik Ganesh * Remove RecoveryTargetTests assert on a running timer Trying to serialize and deserialize a running Timer instance, and then checking for equality leads to flaky test failures when the ser/deser takes time. Signed-off-by: Kartik Ganesh --- .../recovery/TransportRecoveryAction.java | 2 +- ...able.java => AutoCloseableRefCounted.java} | 15 +- .../common/concurrent/GatedCloseable.java | 2 +- .../recovery/PeerRecoveryTargetService.java | 7 +- .../recovery/RecoveriesCollection.java | 6 +- .../indices/recovery/RecoveryState.java | 96 ++----------- .../replication/common/ReplicationTimer.java | 97 +++++++++++++ ...java => AutoCloseableRefCountedTests.java} | 21 +-- .../indices/recovery/RecoveryTargetTests.java | 129 +++++++++--------- .../action/cat/RestRecoveryActionTests.java | 3 +- 10 files changed, 206 insertions(+), 172 deletions(-) rename server/src/main/java/org/opensearch/common/concurrent/{GatedAutoCloseable.java => AutoCloseableRefCounted.java} (57%) create mode 100644 server/src/main/java/org/opensearch/indices/replication/common/ReplicationTimer.java rename server/src/test/java/org/opensearch/common/concurrent/{GatedAutoCloseableTests.java => AutoCloseableRefCountedTests.java} (50%) diff --git a/server/src/main/java/org/opensearch/action/admin/indices/recovery/TransportRecoveryAction.java b/server/src/main/java/org/opensearch/action/admin/indices/recovery/TransportRecoveryAction.java index dd5ae31c01e56..7c3666e44f093 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/recovery/TransportRecoveryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/recovery/TransportRecoveryAction.java @@ -87,7 +87,7 @@ public TransportRecoveryAction( @Override protected RecoveryState readShardResult(StreamInput in) throws IOException { - return RecoveryState.readRecoveryState(in); + return new RecoveryState(in); } @Override diff --git a/server/src/main/java/org/opensearch/common/concurrent/GatedAutoCloseable.java b/server/src/main/java/org/opensearch/common/concurrent/AutoCloseableRefCounted.java similarity index 57% rename from server/src/main/java/org/opensearch/common/concurrent/GatedAutoCloseable.java rename to server/src/main/java/org/opensearch/common/concurrent/AutoCloseableRefCounted.java index cb819c0320e91..795d352542881 100644 --- a/server/src/main/java/org/opensearch/common/concurrent/GatedAutoCloseable.java +++ b/server/src/main/java/org/opensearch/common/concurrent/AutoCloseableRefCounted.java @@ -13,20 +13,19 @@ package org.opensearch.common.concurrent; +import org.opensearch.common.util.concurrent.RefCounted; + /** - * Decorator class that wraps an object reference with a {@link Runnable} that is - * invoked when {@link #close()} is called. The internal {@link OneWayGate} instance ensures - * that this is invoked only once. See also {@link GatedCloseable} + * Adapter class that enables a {@link RefCounted} implementation to function like an {@link AutoCloseable}. + * The {@link #close()} API invokes {@link RefCounted#decRef()} and ensures idempotency using a {@link OneWayGate}. */ -public class GatedAutoCloseable implements AutoCloseable { +public class AutoCloseableRefCounted implements AutoCloseable { private final T ref; - private final Runnable onClose; private final OneWayGate gate; - public GatedAutoCloseable(T ref, Runnable onClose) { + public AutoCloseableRefCounted(T ref) { this.ref = ref; - this.onClose = onClose; gate = new OneWayGate(); } @@ -37,7 +36,7 @@ public T get() { @Override public void close() { if (gate.close()) { - onClose.run(); + ref.decRef(); } } } diff --git a/server/src/main/java/org/opensearch/common/concurrent/GatedCloseable.java b/server/src/main/java/org/opensearch/common/concurrent/GatedCloseable.java index d98e4cca8d561..467b5e4cfb3ea 100644 --- a/server/src/main/java/org/opensearch/common/concurrent/GatedCloseable.java +++ b/server/src/main/java/org/opensearch/common/concurrent/GatedCloseable.java @@ -21,7 +21,7 @@ /** * Decorator class that wraps an object reference with a {@link CheckedRunnable} that is * invoked when {@link #close()} is called. The internal {@link OneWayGate} instance ensures - * that this is invoked only once. See also {@link GatedAutoCloseable} + * that this is invoked only once. See also {@link AutoCloseableRefCounted} */ public class GatedCloseable implements Closeable { diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java index d7c3421b1de93..9348988f8edcc 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java @@ -70,6 +70,7 @@ import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogCorruptedException; import org.opensearch.indices.recovery.RecoveriesCollection.RecoveryRef; +import org.opensearch.indices.replication.common.ReplicationTimer; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.ConnectTransportException; @@ -215,7 +216,7 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi final String actionName; final TransportRequest requestToSend; final StartRecoveryRequest startRequest; - final RecoveryState.Timer timer; + final ReplicationTimer timer; try (RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId)) { if (recoveryRef == null) { logger.trace("not running recovery with id [{}] - can not find it (probably finished)", recoveryId); @@ -622,9 +623,9 @@ private class RecoveryResponseHandler implements TransportResponseHandler { + public static class RecoveryRef extends AutoCloseableRefCounted { /** * Important: {@link RecoveryTarget#tryIncRef()} should * be *successfully* called on status before */ public RecoveryRef(RecoveryTarget status) { - super(status, status::decRef); + super(status); status.setLastAccessTime(); } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java index d89d59e2f2c1b..9f57a0ebd4d0f 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java @@ -50,6 +50,7 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; import org.opensearch.index.store.StoreStats; +import org.opensearch.indices.replication.common.ReplicationTimer; import java.io.IOException; import java.util.ArrayList; @@ -122,7 +123,7 @@ public static Stage fromId(byte id) { private final Index index; private final Translog translog; private final VerifyIndex verifyIndex; - private final Timer timer; + private final ReplicationTimer timer; private RecoverySource recoverySource; private ShardId shardId; @@ -149,12 +150,12 @@ public RecoveryState(ShardRouting shardRouting, DiscoveryNode targetNode, @Nulla this.index = index; translog = new Translog(); verifyIndex = new VerifyIndex(); - timer = new Timer(); + timer = new ReplicationTimer(); timer.start(); } public RecoveryState(StreamInput in) throws IOException { - timer = new Timer(in); + timer = new ReplicationTimer(in); stage = Stage.fromId(in.readByte()); shardId = new ShardId(in); recoverySource = RecoverySource.readFrom(in); @@ -256,7 +257,7 @@ public Translog getTranslog() { return translog; } - public Timer getTimer() { + public ReplicationTimer getTimer() { return timer; } @@ -280,10 +281,6 @@ public boolean getPrimary() { return primary; } - public static RecoveryState readRecoveryState(StreamInput in) throws IOException { - return new RecoveryState(in); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { @@ -291,9 +288,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(Fields.TYPE, recoverySource.getType()); builder.field(Fields.STAGE, stage.toString()); builder.field(Fields.PRIMARY, primary); - builder.timeField(Fields.START_TIME_IN_MILLIS, Fields.START_TIME, timer.startTime); - if (timer.stopTime > 0) { - builder.timeField(Fields.STOP_TIME_IN_MILLIS, Fields.STOP_TIME, timer.stopTime); + builder.timeField(Fields.START_TIME_IN_MILLIS, Fields.START_TIME, timer.startTime()); + if (timer.stopTime() > 0) { + builder.timeField(Fields.STOP_TIME_IN_MILLIS, Fields.STOP_TIME, timer.stopTime()); } builder.humanReadableField(Fields.TOTAL_TIME_IN_MILLIS, Fields.TOTAL_TIME, new TimeValue(timer.time())); @@ -375,78 +372,7 @@ static final class Fields { static final String TARGET_THROTTLE_TIME_IN_MILLIS = "target_throttle_time_in_millis"; } - public static class Timer implements Writeable { - protected long startTime = 0; - protected long startNanoTime = 0; - protected long time = -1; - protected long stopTime = 0; - - public Timer() {} - - public Timer(StreamInput in) throws IOException { - startTime = in.readVLong(); - startNanoTime = in.readVLong(); - stopTime = in.readVLong(); - time = in.readVLong(); - } - - @Override - public synchronized void writeTo(StreamOutput out) throws IOException { - out.writeVLong(startTime); - out.writeVLong(startNanoTime); - out.writeVLong(stopTime); - // write a snapshot of current time, which is not per se the time field - out.writeVLong(time()); - } - - public synchronized void start() { - assert startTime == 0 : "already started"; - startTime = System.currentTimeMillis(); - startNanoTime = System.nanoTime(); - } - - /** Returns start time in millis */ - public synchronized long startTime() { - return startTime; - } - - /** Returns elapsed time in millis, or 0 if timer was not started */ - public synchronized long time() { - if (startNanoTime == 0) { - return 0; - } - if (time >= 0) { - return time; - } - return Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startNanoTime)); - } - - /** Returns stop time in millis */ - public synchronized long stopTime() { - return stopTime; - } - - public synchronized void stop() { - assert stopTime == 0 : "already stopped"; - stopTime = Math.max(System.currentTimeMillis(), startTime); - time = TimeValue.nsecToMSec(System.nanoTime() - startNanoTime); - assert time >= 0; - } - - public synchronized void reset() { - startTime = 0; - startNanoTime = 0; - time = -1; - stopTime = 0; - } - - // for tests - public long getStartNanoTime() { - return startNanoTime; - } - } - - public static class VerifyIndex extends Timer implements ToXContentFragment, Writeable { + public static class VerifyIndex extends ReplicationTimer implements ToXContentFragment, Writeable { private volatile long checkIndexTime; public VerifyIndex() {} @@ -483,7 +409,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } - public static class Translog extends Timer implements ToXContentFragment, Writeable { + public static class Translog extends ReplicationTimer implements ToXContentFragment, Writeable { public static final int UNKNOWN = -1; private int recovered; @@ -819,7 +745,7 @@ public boolean isComplete() { } } - public static class Index extends Timer implements ToXContentFragment, Writeable { + public static class Index extends ReplicationTimer implements ToXContentFragment, Writeable { private final RecoveryFilesDetails fileDetails; public static final long UNKNOWN = -1L; diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTimer.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTimer.java new file mode 100644 index 0000000000000..976df28265d9a --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTimer.java @@ -0,0 +1,97 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication.common; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.unit.TimeValue; + +import java.io.IOException; + +/** + * A serializable timer that is used to measure the time taken for + * file replication operations like recovery. + */ +public class ReplicationTimer implements Writeable { + private long startTime = 0; + private long startNanoTime = 0; + private long time = -1; + private long stopTime = 0; + + public ReplicationTimer() {} + + public ReplicationTimer(StreamInput in) throws IOException { + startTime = in.readVLong(); + startNanoTime = in.readVLong(); + stopTime = in.readVLong(); + time = in.readVLong(); + } + + @Override + public synchronized void writeTo(StreamOutput out) throws IOException { + out.writeVLong(startTime); + out.writeVLong(startNanoTime); + out.writeVLong(stopTime); + // write a snapshot of current time, which is not per se the time field + out.writeVLong(time()); + } + + public synchronized void start() { + assert startTime == 0 : "already started"; + startTime = System.currentTimeMillis(); + startNanoTime = System.nanoTime(); + } + + /** + * Returns start time in millis + */ + public synchronized long startTime() { + return startTime; + } + + /** + * Returns elapsed time in millis, or 0 if timer was not started + */ + public synchronized long time() { + if (startNanoTime == 0) { + return 0; + } + if (time >= 0) { + return time; + } + return Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startNanoTime)); + } + + /** + * Returns stop time in millis + */ + public synchronized long stopTime() { + return stopTime; + } + + public synchronized void stop() { + assert stopTime == 0 : "already stopped"; + stopTime = Math.max(System.currentTimeMillis(), startTime); + time = TimeValue.nsecToMSec(System.nanoTime() - startNanoTime); + assert time >= 0; + } + + public synchronized void reset() { + startTime = 0; + startNanoTime = 0; + time = -1; + stopTime = 0; + } + + // only used in tests + public long getStartNanoTime() { + return startNanoTime; + } +} diff --git a/server/src/test/java/org/opensearch/common/concurrent/GatedAutoCloseableTests.java b/server/src/test/java/org/opensearch/common/concurrent/AutoCloseableRefCountedTests.java similarity index 50% rename from server/src/test/java/org/opensearch/common/concurrent/GatedAutoCloseableTests.java rename to server/src/test/java/org/opensearch/common/concurrent/AutoCloseableRefCountedTests.java index 63058da8f163a..344368988f5ff 100644 --- a/server/src/test/java/org/opensearch/common/concurrent/GatedAutoCloseableTests.java +++ b/server/src/test/java/org/opensearch/common/concurrent/AutoCloseableRefCountedTests.java @@ -14,33 +14,36 @@ package org.opensearch.common.concurrent; import org.junit.Before; +import org.opensearch.common.util.concurrent.RefCounted; import org.opensearch.test.OpenSearchTestCase; -import java.util.concurrent.atomic.AtomicInteger; +import static org.mockito.Mockito.atMostOnce; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; -public class GatedAutoCloseableTests extends OpenSearchTestCase { +public class AutoCloseableRefCountedTests extends OpenSearchTestCase { - private AtomicInteger testRef; - private GatedAutoCloseable testObject; + private RefCounted mockRefCounted; + private AutoCloseableRefCounted testObject; @Before public void setup() { - testRef = new AtomicInteger(0); - testObject = new GatedAutoCloseable<>(testRef, testRef::incrementAndGet); + mockRefCounted = mock(RefCounted.class); + testObject = new AutoCloseableRefCounted<>(mockRefCounted); } public void testGet() { - assertEquals(0, testObject.get().get()); + assertEquals(mockRefCounted, testObject.get()); } public void testClose() { testObject.close(); - assertEquals(1, testObject.get().get()); + verify(mockRefCounted, atMostOnce()).decRef(); } public void testIdempotent() { testObject.close(); testObject.close(); - assertEquals(1, testObject.get().get()); + verify(mockRefCounted, atMostOnce()).decRef(); } } diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTargetTests.java b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTargetTests.java index 5d0d9bca8b3fb..dd4b17fbac5de 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTargetTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTargetTests.java @@ -44,9 +44,9 @@ import org.opensearch.indices.recovery.RecoveryState.FileDetail; import org.opensearch.indices.recovery.RecoveryState.Index; import org.opensearch.indices.recovery.RecoveryState.Stage; -import org.opensearch.indices.recovery.RecoveryState.Timer; import org.opensearch.indices.recovery.RecoveryState.Translog; import org.opensearch.indices.recovery.RecoveryState.VerifyIndex; +import org.opensearch.indices.replication.common.ReplicationTimer; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -63,9 +63,7 @@ import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.startsWith; @@ -124,72 +122,81 @@ public void run() { } } - public void testTimers() throws Throwable { - final Timer timer; - Streamer streamer; + public void testTimer() throws Throwable { AtomicBoolean stop = new AtomicBoolean(); - if (randomBoolean()) { - timer = new Timer(); - streamer = new Streamer(stop, timer) { - @Override - Timer createObj(StreamInput in) throws IOException { - return new Timer(in); - } - }; - } else if (randomBoolean()) { - timer = new Index(); - streamer = new Streamer(stop, timer) { - @Override - Timer createObj(StreamInput in) throws IOException { - return new Index(in); - } - }; - } else if (randomBoolean()) { - timer = new VerifyIndex(); - streamer = new Streamer(stop, timer) { - @Override - Timer createObj(StreamInput in) throws IOException { - return new VerifyIndex(in); - } - }; - } else { - timer = new Translog(); - streamer = new Streamer(stop, timer) { - @Override - Timer createObj(StreamInput in) throws IOException { - return new Translog(in); - } - }; - } + final ReplicationTimer timer = new ReplicationTimer(); + Streamer streamer = new Streamer<>(stop, timer) { + @Override + ReplicationTimer createObj(StreamInput in) throws IOException { + return new ReplicationTimer(in); + } + }; + doTimerTest(timer, streamer); + } + + public void testIndexTimer() throws Throwable { + AtomicBoolean stop = new AtomicBoolean(); + Index index = new Index(); + Streamer streamer = new Streamer<>(stop, index) { + @Override + Index createObj(StreamInput in) throws IOException { + return new Index(in); + } + }; + doTimerTest(index, streamer); + } + public void testVerifyIndexTimer() throws Throwable { + AtomicBoolean stop = new AtomicBoolean(); + VerifyIndex verifyIndex = new VerifyIndex(); + Streamer streamer = new Streamer<>(stop, verifyIndex) { + @Override + VerifyIndex createObj(StreamInput in) throws IOException { + return new VerifyIndex(in); + } + }; + doTimerTest(verifyIndex, streamer); + } + + public void testTranslogTimer() throws Throwable { + AtomicBoolean stop = new AtomicBoolean(); + Translog translog = new Translog(); + Streamer streamer = new Streamer<>(stop, translog) { + @Override + Translog createObj(StreamInput in) throws IOException { + return new Translog(in); + } + }; + doTimerTest(translog, streamer); + } + + private void doTimerTest(ReplicationTimer timer, Streamer streamer) throws Exception { timer.start(); - assertThat(timer.startTime(), greaterThan(0L)); - assertThat(timer.stopTime(), equalTo(0L)); - Timer lastRead = streamer.serializeDeserialize(); + assertTrue(timer.startTime() > 0); + assertEquals(0, timer.stopTime()); + ReplicationTimer lastRead = streamer.serializeDeserialize(); final long time = lastRead.time(); - assertThat(time, lessThanOrEqualTo(timer.time())); - assertBusy(() -> assertThat("timer timer should progress compared to captured one ", time, lessThan(timer.time()))); - assertThat("captured time shouldn't change", lastRead.time(), equalTo(time)); + assertBusy(() -> assertTrue("timer timer should progress compared to captured one ", time < timer.time())); + assertEquals("captured time shouldn't change", time, lastRead.time()); - if (randomBoolean()) { - timer.stop(); - assertThat(timer.stopTime(), greaterThanOrEqualTo(timer.startTime())); - assertThat(timer.time(), greaterThan(0L)); - lastRead = streamer.serializeDeserialize(); - assertThat(lastRead.startTime(), equalTo(timer.startTime())); - assertThat(lastRead.time(), equalTo(timer.time())); - assertThat(lastRead.stopTime(), equalTo(timer.stopTime())); - } + timer.stop(); + assertTrue(timer.stopTime() >= timer.startTime()); + assertTrue(timer.time() > 0); + // validate captured time + lastRead = streamer.serializeDeserialize(); + assertEquals(timer.startTime(), lastRead.startTime()); + assertEquals(timer.time(), lastRead.time()); + assertEquals(timer.stopTime(), lastRead.stopTime()); timer.reset(); - assertThat(timer.startTime(), equalTo(0L)); - assertThat(timer.time(), equalTo(0L)); - assertThat(timer.stopTime(), equalTo(0L)); + assertEquals(0, timer.startTime()); + assertEquals(0, timer.time()); + assertEquals(0, timer.stopTime()); + // validate captured time lastRead = streamer.serializeDeserialize(); - assertThat(lastRead.startTime(), equalTo(0L)); - assertThat(lastRead.time(), equalTo(0L)); - assertThat(lastRead.stopTime(), equalTo(0L)); - + assertEquals(0, lastRead.startTime()); + assertEquals(0, lastRead.time()); + assertEquals(0, lastRead.stopTime()); } public void testIndex() throws Throwable { diff --git a/server/src/test/java/org/opensearch/rest/action/cat/RestRecoveryActionTests.java b/server/src/test/java/org/opensearch/rest/action/cat/RestRecoveryActionTests.java index 7966d2961c29a..e7eb9cbf24015 100644 --- a/server/src/test/java/org/opensearch/rest/action/cat/RestRecoveryActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/cat/RestRecoveryActionTests.java @@ -45,6 +45,7 @@ import org.opensearch.index.Index; import org.opensearch.index.shard.ShardId; import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.indices.replication.common.ReplicationTimer; import org.opensearch.test.OpenSearchTestCase; import java.util.ArrayList; @@ -72,7 +73,7 @@ public void testRestRecoveryAction() { for (int i = 0; i < successfulShards; i++) { final RecoveryState state = mock(RecoveryState.class); when(state.getShardId()).thenReturn(new ShardId(new Index("index", "_na_"), i)); - final RecoveryState.Timer timer = mock(RecoveryState.Timer.class); + final ReplicationTimer timer = mock(ReplicationTimer.class); final long startTime = randomLongBetween(0, new Date().getTime()); when(timer.startTime()).thenReturn(startTime); final long time = randomLongBetween(1000000, 10 * 1000000); From a34d11f15a2c0f14ac8ebda4d85da92000719b7f Mon Sep 17 00:00:00 2001 From: Wenjun Ruan Date: Thu, 21 Apr 2022 11:14:44 +0800 Subject: [PATCH 106/653] Remove usages of MultiTermQuery.setRewriteMethodsetRewriteMethod (#2997) Remove usages of MultiTermQuery.setRewriteMethod which is removed in latest versions of Lucene. Signed-off-by: ruanwenjun --- .../mapper/SearchAsYouTypeFieldMapper.java | 4 +- .../search/query/QueryPhaseTests.java | 12 ++--- .../lucene/search/AutomatonQueries.java | 48 +++++++++++++++---- .../index/mapper/StringFieldType.java | 44 ++++++++--------- .../index/mapper/TextFieldMapper.java | 5 +- .../index/query/RegexpQueryBuilder.java | 13 ++--- .../index/search/QueryStringQueryParser.java | 11 +++-- .../deps/lucene/VectorHighlighterTests.java | 2 +- .../search/query/QueryPhaseTests.java | 15 +++--- 9 files changed, 89 insertions(+), 65 deletions(-) diff --git a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java index 7394993448bbf..68b887c4c4a43 100644 --- a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java @@ -60,6 +60,7 @@ import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.Operations; import org.opensearch.common.collect.Iterators; +import org.opensearch.common.lucene.search.AutomatonQueries; import org.opensearch.index.analysis.AnalyzerScope; import org.opensearch.index.analysis.IndexAnalyzers; import org.opensearch.index.analysis.NamedAnalyzer; @@ -431,8 +432,7 @@ public Query prefixQuery(String value, MultiTermQuery.RewriteMethod method, bool automata.add(Automata.makeAnyChar()); } Automaton automaton = Operations.concatenate(automata); - AutomatonQuery query = new AutomatonQuery(new Term(name(), value + "*"), automaton); - query.setRewriteMethod(method); + AutomatonQuery query = AutomatonQueries.createAutomatonQuery(new Term(name(), value + "*"), automaton, method); return new BooleanQuery.Builder().add(query, BooleanClause.Occur.SHOULD) .add(new TermQuery(new Term(parentField, value)), BooleanClause.Occur.SHOULD) .build(); diff --git a/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryPhaseTests.java b/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryPhaseTests.java index 83a0a63a6a5c8..74cd4754efe44 100644 --- a/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryPhaseTests.java +++ b/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryPhaseTests.java @@ -33,7 +33,6 @@ package org.opensearch.search.query; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field.Store; @@ -52,6 +51,8 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.Term; +import org.apache.lucene.queries.spans.SpanNearQuery; +import org.apache.lucene.queries.spans.SpanTermQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; @@ -82,8 +83,6 @@ import org.apache.lucene.search.grouping.CollapseTopFieldDocs; import org.apache.lucene.search.join.BitSetProducer; import org.apache.lucene.search.join.ScoreMode; -import org.apache.lucene.queries.spans.SpanNearQuery; -import org.apache.lucene.queries.spans.SpanTermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; @@ -122,7 +121,6 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; -import static org.opensearch.search.query.TopDocsCollectorContext.hasInfMaxScore; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -130,8 +128,9 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; +import static org.opensearch.search.query.TopDocsCollectorContext.hasInfMaxScore; public class QueryPhaseTests extends IndexShardTestCase { @@ -1114,8 +1113,7 @@ public void testCancellationDuringPreprocess() throws IOException { indexShard, newContextSearcher(reader, executor) ); - PrefixQuery prefixQuery = new PrefixQuery(new Term("foo", "a")); - prefixQuery.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE); + PrefixQuery prefixQuery = new PrefixQuery(new Term("foo", "a"), MultiTermQuery.SCORING_BOOLEAN_REWRITE); context.parsedQuery(new ParsedQuery(prefixQuery)); SearchShardTask task = mock(SearchShardTask.class); when(task.isCancelled()).thenReturn(true); diff --git a/server/src/main/java/org/opensearch/common/lucene/search/AutomatonQueries.java b/server/src/main/java/org/opensearch/common/lucene/search/AutomatonQueries.java index 9755a99fecc08..12dec26090b48 100644 --- a/server/src/main/java/org/opensearch/common/lucene/search/AutomatonQueries.java +++ b/server/src/main/java/org/opensearch/common/lucene/search/AutomatonQueries.java @@ -34,6 +34,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.AutomatonQuery; +import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.Automata; import org.apache.lucene.util.automaton.Automaton; @@ -63,29 +64,58 @@ public static Automaton caseInsensitivePrefix(String s) { return a; } - /** Build an automaton query accepting all terms with the specified prefix, ASCII case insensitive. */ + /** + * Build an automaton query accepting all terms with the specified prefix, ASCII case insensitive. + */ public static AutomatonQuery caseInsensitivePrefixQuery(Term prefix) { - return new AutomatonQuery(prefix, caseInsensitivePrefix(prefix.text())); + return caseInsensitivePrefixQuery(prefix, MultiTermQuery.CONSTANT_SCORE_REWRITE); } - /** Build an automaton accepting all terms ASCII case insensitive. */ + /** + * Build an automaton query accepting all terms with the specified prefix, ASCII case insensitive. + */ + public static AutomatonQuery caseInsensitivePrefixQuery(Term prefix, MultiTermQuery.RewriteMethod method) { + return createAutomatonQuery(prefix, caseInsensitivePrefix(prefix.text()), method); + } + + /** + * Build an automaton accepting all terms ASCII case insensitive. + */ public static AutomatonQuery caseInsensitiveTermQuery(Term term) { BytesRef prefix = term.bytes(); return new AutomatonQuery(term, toCaseInsensitiveString(prefix, Integer.MAX_VALUE)); } - /** Build an automaton matching a wildcard pattern, ASCII case insensitive. */ - public static AutomatonQuery caseInsensitiveWildcardQuery(Term wildcardquery) { - return new AutomatonQuery(wildcardquery, toCaseInsensitiveWildcardAutomaton(wildcardquery, Integer.MAX_VALUE)); + /** + * Build an automaton matching a wildcard pattern, ASCII case insensitive, if the method is null, then will use {@link MultiTermQuery#CONSTANT_SCORE_REWRITE}. + */ + public static AutomatonQuery caseInsensitiveWildcardQuery(Term wildcardquery, MultiTermQuery.RewriteMethod method) { + return createAutomatonQuery(wildcardquery, toCaseInsensitiveWildcardAutomaton(wildcardquery, Integer.MAX_VALUE), method); + } + + /** + * Build an automaton matching a given pattern with rewrite method, if the rewrite method is null, then will use {@link MultiTermQuery#CONSTANT_SCORE_REWRITE}. + */ + public static AutomatonQuery createAutomatonQuery(Term term, Automaton automaton, MultiTermQuery.RewriteMethod method) { + if (method == null) { + method = MultiTermQuery.CONSTANT_SCORE_REWRITE; + } + return new AutomatonQuery(term, automaton, Operations.DEFAULT_DETERMINIZE_WORK_LIMIT, false, method); } - /** String equality with support for wildcards */ + /** + * String equality with support for wildcards + */ public static final char WILDCARD_STRING = '*'; - /** Char equality with support for wildcards */ + /** + * Char equality with support for wildcards + */ public static final char WILDCARD_CHAR = '?'; - /** Escape character */ + /** + * Escape character + */ public static final char WILDCARD_ESCAPE = '\\'; /** diff --git a/server/src/main/java/org/opensearch/index/mapper/StringFieldType.java b/server/src/main/java/org/opensearch/index/mapper/StringFieldType.java index 42bd52dddf520..9aa6bf09a1176 100644 --- a/server/src/main/java/org/opensearch/index/mapper/StringFieldType.java +++ b/server/src/main/java/org/opensearch/index/mapper/StringFieldType.java @@ -34,7 +34,6 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.index.Term; -import org.apache.lucene.search.AutomatonQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.PrefixQuery; @@ -44,12 +43,12 @@ import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; +import org.apache.lucene.util.automaton.Operations; import org.opensearch.OpenSearchException; import org.opensearch.common.lucene.BytesRefs; import org.opensearch.common.lucene.search.AutomatonQueries; import org.opensearch.common.unit.Fuzziness; import org.opensearch.index.query.QueryShardContext; -import org.opensearch.index.query.support.QueryParsers; import java.util.Map; import java.util.regex.Matcher; @@ -111,19 +110,13 @@ public Query prefixQuery(String value, MultiTermQuery.RewriteMethod method, bool ); } failIfNotIndexed(); - if (caseInsensitive) { - AutomatonQuery query = AutomatonQueries.caseInsensitivePrefixQuery((new Term(name(), indexedValueForSearch(value)))); - if (method != null) { - query.setRewriteMethod(method); - } - return query; - + if (method == null) { + method = MultiTermQuery.CONSTANT_SCORE_REWRITE; } - PrefixQuery query = new PrefixQuery(new Term(name(), indexedValueForSearch(value))); - if (method != null) { - query.setRewriteMethod(method); + if (caseInsensitive) { + return AutomatonQueries.caseInsensitivePrefixQuery((new Term(name(), indexedValueForSearch(value))), method); } - return query; + return new PrefixQuery(new Term(name(), indexedValueForSearch(value)), method); } public static final String normalizeWildcardPattern(String fieldname, String value, Analyzer normalizer) { @@ -173,13 +166,12 @@ public Query wildcardQuery(String value, MultiTermQuery.RewriteMethod method, bo term = new Term(name(), indexedValueForSearch(value)); } if (caseInsensitive) { - AutomatonQuery query = AutomatonQueries.caseInsensitiveWildcardQuery(term); - QueryParsers.setRewriteMethod(query, method); - return query; + return AutomatonQueries.caseInsensitiveWildcardQuery(term, method); } - WildcardQuery query = new WildcardQuery(term); - QueryParsers.setRewriteMethod(query, method); - return query; + if (method == null) { + method = MultiTermQuery.CONSTANT_SCORE_REWRITE; + } + return new WildcardQuery(term, Operations.DEFAULT_DETERMINIZE_WORK_LIMIT, method); } @Override @@ -197,11 +189,17 @@ public Query regexpQuery( ); } failIfNotIndexed(); - RegexpQuery query = new RegexpQuery(new Term(name(), indexedValueForSearch(value)), syntaxFlags, matchFlags, maxDeterminizedStates); - if (method != null) { - query.setRewriteMethod(method); + if (method == null) { + method = MultiTermQuery.CONSTANT_SCORE_REWRITE; } - return query; + return new RegexpQuery( + new Term(name(), indexedValueForSearch(value)), + syntaxFlags, + matchFlags, + RegexpQuery.DEFAULT_PROVIDER, + maxDeterminizedStates, + method + ); } @Override diff --git a/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java index 4b2c20586834d..360240eb9d59c 100644 --- a/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java @@ -582,10 +582,7 @@ public Query prefixQuery(String value, MultiTermQuery.RewriteMethod method, bool automata.add(Automata.makeAnyChar()); } Automaton automaton = Operations.concatenate(automata); - AutomatonQuery query = new AutomatonQuery(new Term(name(), value + "*"), automaton); - if (method != null) { - query.setRewriteMethod(method); - } + AutomatonQuery query = AutomatonQueries.createAutomatonQuery(new Term(name(), value + "*"), automaton, method); return new BooleanQuery.Builder().add(query, BooleanClause.Occur.SHOULD) .add(new TermQuery(new Term(parentField.name(), value)), BooleanClause.Occur.SHOULD) .build(); diff --git a/server/src/main/java/org/opensearch/index/query/RegexpQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/RegexpQueryBuilder.java index dc6546a3fd3a2..c8192557ef266 100644 --- a/server/src/main/java/org/opensearch/index/query/RegexpQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/RegexpQueryBuilder.java @@ -309,16 +309,17 @@ protected Query doToQuery(QueryShardContext context) throws QueryShardException, query = fieldType.regexpQuery(value, sanitisedSyntaxFlag, matchFlagsValue, maxDeterminizedStates, method, context); } if (query == null) { - RegexpQuery regexpQuery = new RegexpQuery( + if (method == null) { + method = MultiTermQuery.CONSTANT_SCORE_REWRITE; + } + query = new RegexpQuery( new Term(fieldName, BytesRefs.toBytesRef(value)), sanitisedSyntaxFlag, matchFlagsValue, - maxDeterminizedStates + RegexpQuery.DEFAULT_PROVIDER, + maxDeterminizedStates, + method ); - if (method != null) { - regexpQuery.setRewriteMethod(method); - } - query = regexpQuery; } return query; } diff --git a/server/src/main/java/org/opensearch/index/search/QueryStringQueryParser.java b/server/src/main/java/org/opensearch/index/search/QueryStringQueryParser.java index e9437f5704851..980a42163c9c9 100644 --- a/server/src/main/java/org/opensearch/index/search/QueryStringQueryParser.java +++ b/server/src/main/java/org/opensearch/index/search/QueryStringQueryParser.java @@ -70,7 +70,6 @@ import org.opensearch.index.query.ExistsQueryBuilder; import org.opensearch.index.query.MultiMatchQueryBuilder; import org.opensearch.index.query.QueryShardContext; -import org.opensearch.index.query.support.QueryParsers; import java.io.IOException; import java.time.ZoneId; @@ -110,7 +109,7 @@ public class QueryStringQueryParser extends XQueryParser { private ZoneId timeZone; private Fuzziness fuzziness = Fuzziness.AUTO; private int fuzzyMaxExpansions = FuzzyQuery.defaultMaxExpansions; - private MultiTermQuery.RewriteMethod fuzzyRewriteMethod; + private MultiTermQuery.RewriteMethod fuzzyRewriteMethod = MultiTermQuery.CONSTANT_SCORE_REWRITE; private boolean fuzzyTranspositions = FuzzyQuery.defaultTranspositions; /** @@ -527,9 +526,11 @@ private Query getFuzzyQuerySingle(String field, String termStr, float minSimilar @Override protected Query newFuzzyQuery(Term term, float minimumSimilarity, int prefixLength) { int numEdits = Fuzziness.build(minimumSimilarity).asDistance(term.text()); - FuzzyQuery query = new FuzzyQuery(term, numEdits, prefixLength, fuzzyMaxExpansions, fuzzyTranspositions); - QueryParsers.setRewriteMethod(query, fuzzyRewriteMethod); - return query; + if (fuzzyRewriteMethod != null) { + return new FuzzyQuery(term, numEdits, prefixLength, fuzzyMaxExpansions, fuzzyTranspositions, fuzzyRewriteMethod); + } else { + return new FuzzyQuery(term, numEdits, prefixLength, fuzzyMaxExpansions, fuzzyTranspositions); + } } @Override diff --git a/server/src/test/java/org/opensearch/deps/lucene/VectorHighlighterTests.java b/server/src/test/java/org/opensearch/deps/lucene/VectorHighlighterTests.java index e3a4c8a3e890d..e91da4f5ee46e 100644 --- a/server/src/test/java/org/opensearch/deps/lucene/VectorHighlighterTests.java +++ b/server/src/test/java/org/opensearch/deps/lucene/VectorHighlighterTests.java @@ -121,7 +121,7 @@ public void testVectorHighlighterPrefixQuery() throws Exception { ); assertThat(fragment, nullValue()); - prefixQuery.setRewriteMethod(PrefixQuery.SCORING_BOOLEAN_REWRITE); + prefixQuery = new PrefixQuery(new Term("content", "ba"), PrefixQuery.SCORING_BOOLEAN_REWRITE); Query rewriteQuery = prefixQuery.rewrite(reader); fragment = highlighter.getBestFragment(highlighter.getFieldQuery(rewriteQuery), reader, topDocs.scoreDocs[0].doc, "content", 30); assertThat(fragment, notNullValue()); diff --git a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java index 1232347edea64..2234c8a980923 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java @@ -49,9 +49,7 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NoMergePolicy; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.index.Term; -import org.opensearch.lucene.queries.MinDocQuery; import org.apache.lucene.queries.spans.SpanNearQuery; import org.apache.lucene.queries.spans.SpanTermQuery; import org.apache.lucene.search.BooleanClause.Occur; @@ -82,6 +80,7 @@ import org.apache.lucene.search.join.BitSetProducer; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.FixedBitSet; import org.opensearch.action.search.SearchShardTask; @@ -97,6 +96,7 @@ import org.opensearch.index.search.OpenSearchToParentBlockJoinQuery; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; +import org.opensearch.lucene.queries.MinDocQuery; import org.opensearch.search.DocValueFormat; import org.opensearch.search.collapse.CollapseBuilder; import org.opensearch.search.internal.ContextIndexSearcher; @@ -111,16 +111,16 @@ import java.util.Collections; import java.util.List; -import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.instanceOf; -import static org.opensearch.search.query.TopDocsCollectorContext.hasInfMaxScore; +import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; +import static org.opensearch.search.query.TopDocsCollectorContext.hasInfMaxScore; public class QueryPhaseTests extends IndexShardTestCase { @@ -1069,8 +1069,7 @@ public void testCancellationDuringPreprocess() throws IOException { try (IndexReader reader = DirectoryReader.open(dir)) { TestSearchContext context = new TestSearchContextWithRewriteAndCancellation(null, indexShard, newContextSearcher(reader)); - PrefixQuery prefixQuery = new PrefixQuery(new Term("foo", "a")); - prefixQuery.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE); + PrefixQuery prefixQuery = new PrefixQuery(new Term("foo", "a"), MultiTermQuery.SCORING_BOOLEAN_REWRITE); context.parsedQuery(new ParsedQuery(prefixQuery)); SearchShardTask task = mock(SearchShardTask.class); when(task.isCancelled()).thenReturn(true); From dbdee30a376b2dcbb938790ad99959338bfa1c8e Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Wed, 20 Apr 2022 20:22:11 -0700 Subject: [PATCH 107/653] [Type Removal] Remove TypeFieldMapper usage, remove support of `_type` in searches and from LeafFieldsLookup (#3016) Removes TypeFieldMapper and _type support from searches Signed-off-by: Suraj Singh --- .../PercolatorFieldMapperTests.java | 4 +- .../test/search/160_exists_query.yml | 13 --- .../document/DocumentActionsIT.java | 9 +- .../aggregations/metrics/TopHitsIT.java | 4 +- .../search/fields/SearchFieldsIT.java | 36 ------ .../index/mapper/DocumentMapper.java | 1 - .../index/mapper/MapperService.java | 5 - .../index/mapper/TypeFieldMapper.java | 15 +-- .../org/opensearch/indices/IndicesModule.java | 2 - .../search/lookup/LeafFieldsLookup.java | 24 +--- .../index/engine/InternalEngineTests.java | 2 +- .../opensearch/index/get/GetResultTests.java | 6 +- .../index/mapper/TypeFieldMapperTests.java | 106 ------------------ .../index/mapper/TypeFieldTypeTests.java | 66 ----------- .../index/query/TermQueryBuilderTests.java | 7 -- .../index/query/TermsQueryBuilderTests.java | 7 -- .../query/WildcardQueryBuilderTests.java | 7 -- .../indices/IndicesModuleTests.java | 2 - .../support/ValuesSourceConfigTests.java | 20 ---- .../fetch/subphase/FieldFetcherTests.java | 5 + 20 files changed, 21 insertions(+), 320 deletions(-) delete mode 100644 server/src/test/java/org/opensearch/index/mapper/TypeFieldMapperTests.java delete mode 100644 server/src/test/java/org/opensearch/index/mapper/TypeFieldTypeTests.java diff --git a/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorFieldMapperTests.java b/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorFieldMapperTests.java index ca6f3a78b27d7..fe9c486b68166 100644 --- a/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorFieldMapperTests.java +++ b/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorFieldMapperTests.java @@ -862,7 +862,7 @@ public void testUnsupportedQueries() { PercolatorFieldMapper.verifyQuery(rangeQuery1); PercolatorFieldMapper.verifyQuery(rangeQuery2); - HasChildQueryBuilder hasChildQuery = new HasChildQueryBuilder("_type", new MatchAllQueryBuilder(), ScoreMode.None); + HasChildQueryBuilder hasChildQuery = new HasChildQueryBuilder("parent", new MatchAllQueryBuilder(), ScoreMode.None); expectThrows(IllegalArgumentException.class, () -> PercolatorFieldMapper.verifyQuery(new BoolQueryBuilder().must(hasChildQuery))); expectThrows(IllegalArgumentException.class, () -> PercolatorFieldMapper.verifyQuery(new DisMaxQueryBuilder().add(hasChildQuery))); PercolatorFieldMapper.verifyQuery(new ConstantScoreQueryBuilder((rangeQuery1))); @@ -881,7 +881,7 @@ public void testUnsupportedQueries() { expectThrows(IllegalArgumentException.class, () -> PercolatorFieldMapper.verifyQuery(hasChildQuery)); expectThrows(IllegalArgumentException.class, () -> PercolatorFieldMapper.verifyQuery(new BoolQueryBuilder().must(hasChildQuery))); - HasParentQueryBuilder hasParentQuery = new HasParentQueryBuilder("_type", new MatchAllQueryBuilder(), false); + HasParentQueryBuilder hasParentQuery = new HasParentQueryBuilder("parent", new MatchAllQueryBuilder(), false); expectThrows(IllegalArgumentException.class, () -> PercolatorFieldMapper.verifyQuery(hasParentQuery)); expectThrows(IllegalArgumentException.class, () -> PercolatorFieldMapper.verifyQuery(new BoolQueryBuilder().must(hasParentQuery))); } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query.yml index 201e456be2cdd..be97930d41eb9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query.yml @@ -561,19 +561,6 @@ setup: - match: {hits.total: 4} ---- -"Test exists query on _type field": - - do: - search: - rest_total_hits_as_int: true - index: test - body: - query: - exists: - field: _type - - - match: {hits.total: 4} - --- "Test exists query on _routing field": - do: diff --git a/server/src/internalClusterTest/java/org/opensearch/document/DocumentActionsIT.java b/server/src/internalClusterTest/java/org/opensearch/document/DocumentActionsIT.java index 1e40cc14bbb36..fa94d5c1c5024 100644 --- a/server/src/internalClusterTest/java/org/opensearch/document/DocumentActionsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/document/DocumentActionsIT.java @@ -47,7 +47,6 @@ import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentType; -import org.opensearch.index.mapper.MapperService; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; @@ -58,7 +57,7 @@ import static org.opensearch.client.Requests.getRequest; import static org.opensearch.client.Requests.indexRequest; import static org.opensearch.client.Requests.refreshRequest; -import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.equalTo; @@ -181,11 +180,7 @@ public void testIndexActions() throws Exception { // check count for (int i = 0; i < 5; i++) { // test successful - SearchResponse countResponse = client().prepareSearch("test") - .setSize(0) - .setQuery(termQuery("_type", MapperService.SINGLE_MAPPING_NAME)) - .execute() - .actionGet(); + SearchResponse countResponse = client().prepareSearch("test").setSize(0).setQuery(matchAllQuery()).execute().actionGet(); assertNoFailures(countResponse); assertThat(countResponse.getHits().getTotalHits().value, equalTo(2L)); assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TopHitsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TopHitsIT.java index c3240c5eef7c5..4c5c42b773e93 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TopHitsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TopHitsIT.java @@ -1386,7 +1386,7 @@ public void testWithRescore() { SearchResponse response = client().prepareSearch("idx") .addRescorer(new QueryRescorerBuilder(new MatchAllQueryBuilder().boost(3.0f))) .addAggregation( - terms("terms").field(TERMS_AGGS_FIELD).subAggregation(topHits("hits").sort(SortBuilders.fieldSort("_type"))) + terms("terms").field(TERMS_AGGS_FIELD).subAggregation(topHits("hits").sort(SortBuilders.fieldSort("_index"))) ) .get(); Terms terms = response.getAggregations().get("terms"); @@ -1403,7 +1403,7 @@ public void testWithRescore() { .addRescorer(new QueryRescorerBuilder(new MatchAllQueryBuilder().boost(3.0f))) .addAggregation( terms("terms").field(TERMS_AGGS_FIELD) - .subAggregation(topHits("hits").sort(SortBuilders.scoreSort()).sort(SortBuilders.fieldSort("_type"))) + .subAggregation(topHits("hits").sort(SortBuilders.scoreSort()).sort(SortBuilders.fieldSort("_index"))) ) .get(); Terms terms = response.getAggregations().get("terms"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java index 25782f8dc18db..941f4982af9cc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java @@ -471,42 +471,6 @@ public void testIdBasedScriptFields() throws Exception { assertThat(fields, equalTo(singleton("id"))); assertThat(response.getHits().getAt(i).getFields().get("id").getValue(), equalTo(Integer.toString(i))); } - - response = client().prepareSearch() - .setQuery(matchAllQuery()) - .addSort("num1", SortOrder.ASC) - .setSize(numDocs) - .addScriptField("type", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_fields._type.value", Collections.emptyMap())) - .get(); - - assertNoFailures(response); - - assertThat(response.getHits().getTotalHits().value, equalTo((long) numDocs)); - for (int i = 0; i < numDocs; i++) { - assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); - Set fields = new HashSet<>(response.getHits().getAt(i).getFields().keySet()); - assertThat(fields, equalTo(singleton("type"))); - assertThat(response.getHits().getAt(i).getFields().get("type").getValue(), equalTo(MapperService.SINGLE_MAPPING_NAME)); - } - - response = client().prepareSearch() - .setQuery(matchAllQuery()) - .addSort("num1", SortOrder.ASC) - .setSize(numDocs) - .addScriptField("id", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_fields._id.value", Collections.emptyMap())) - .addScriptField("type", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_fields._type.value", Collections.emptyMap())) - .get(); - - assertNoFailures(response); - - assertThat(response.getHits().getTotalHits().value, equalTo((long) numDocs)); - for (int i = 0; i < numDocs; i++) { - assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); - Set fields = new HashSet<>(response.getHits().getAt(i).getFields().keySet()); - assertThat(fields, equalTo(newHashSet("type", "id"))); - assertThat(response.getHits().getAt(i).getFields().get("type").getValue(), equalTo(MapperService.SINGLE_MAPPING_NAME)); - assertThat(response.getHits().getAt(i).getFields().get("id").getValue(), equalTo(Integer.toString(i))); - } } public void testScriptFieldUsingSource() throws Exception { diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/opensearch/index/mapper/DocumentMapper.java index 0ee0a3cb9a180..0bebfa024e185 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocumentMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocumentMapper.java @@ -161,7 +161,6 @@ public DocumentMapper(MapperService mapperService, Mapping mapping) { final Collection deleteTombstoneMetadataFields = Arrays.asList( VersionFieldMapper.NAME, IdFieldMapper.NAME, - TypeFieldMapper.NAME, SeqNoFieldMapper.NAME, SeqNoFieldMapper.PRIMARY_TERM_NAME, SeqNoFieldMapper.TOMBSTONE_NAME diff --git a/server/src/main/java/org/opensearch/index/mapper/MapperService.java b/server/src/main/java/org/opensearch/index/mapper/MapperService.java index a92647929ff08..819df4a6f396e 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/opensearch/index/mapper/MapperService.java @@ -576,11 +576,6 @@ public DocumentMapperForType documentMapperWithAutoCreate() { * Given the full name of a field, returns its {@link MappedFieldType}. */ public MappedFieldType fieldType(String fullName) { - if (fullName.equals(TypeFieldMapper.NAME)) { - String type = mapper == null ? null : mapper.type(); - return new TypeFieldMapper.TypeFieldType(type); - } - return this.mapper == null ? null : this.mapper.fieldTypes().get(fullName); } diff --git a/server/src/main/java/org/opensearch/index/mapper/TypeFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/TypeFieldMapper.java index 9adb1430b3df0..8d3f1df677040 100644 --- a/server/src/main/java/org/opensearch/index/mapper/TypeFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/TypeFieldMapper.java @@ -41,7 +41,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.opensearch.common.geo.ShapeRelation; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.regex.Regex; import org.opensearch.common.time.DateMathParser; import org.opensearch.index.fielddata.IndexFieldData; @@ -55,17 +54,9 @@ import java.util.Objects; import java.util.function.Supplier; +// Todo: Remove TypeFieldMapper once we have NestedFieldMapper implementation public class TypeFieldMapper extends MetadataFieldMapper { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(TypeFieldType.class); - - public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Using the _type field " - + "in queries and aggregations is deprecated, prefer to use a field instead."; - - public static void emitTypesDeprecationWarning() { - deprecationLogger.deprecate("query_with_types", TYPES_DEPRECATION_MESSAGE); - } - public static final String NAME = "_type"; public static final String CONTENT_TYPE = "_type"; @@ -101,7 +92,6 @@ public String typeName() { @Override public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, Supplier searchLookup) { - emitTypesDeprecationWarning(); return new ConstantIndexFieldData.Builder(type, name(), CoreValuesSourceType.BYTES); } @@ -112,13 +102,11 @@ public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup lookup, @Override public Query existsQuery(QueryShardContext context) { - emitTypesDeprecationWarning(); return new MatchAllDocsQuery(); } @Override protected boolean matches(String pattern, boolean caseInsensitive, QueryShardContext context) { - emitTypesDeprecationWarning(); if (type == null) { return false; } @@ -136,7 +124,6 @@ public Query rangeQuery( DateMathParser parser, QueryShardContext context ) { - emitTypesDeprecationWarning(); BytesRef lower = (BytesRef) lowerTerm; BytesRef upper = (BytesRef) upperTerm; if (includeLower) { diff --git a/server/src/main/java/org/opensearch/indices/IndicesModule.java b/server/src/main/java/org/opensearch/indices/IndicesModule.java index e685ea52aa5ca..9a7b91f020e36 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesModule.java +++ b/server/src/main/java/org/opensearch/indices/IndicesModule.java @@ -64,7 +64,6 @@ import org.opensearch.index.mapper.SeqNoFieldMapper; import org.opensearch.index.mapper.SourceFieldMapper; import org.opensearch.index.mapper.TextFieldMapper; -import org.opensearch.index.mapper.TypeFieldMapper; import org.opensearch.index.mapper.VersionFieldMapper; import org.opensearch.index.seqno.RetentionLeaseBackgroundSyncAction; import org.opensearch.index.seqno.RetentionLeaseSyncAction; @@ -185,7 +184,6 @@ private static Map initBuiltInMetadataMa builtInMetadataMappers.put(IndexFieldMapper.NAME, IndexFieldMapper.PARSER); builtInMetadataMappers.put(DataStreamFieldMapper.NAME, DataStreamFieldMapper.PARSER); builtInMetadataMappers.put(SourceFieldMapper.NAME, SourceFieldMapper.PARSER); - builtInMetadataMappers.put(TypeFieldMapper.NAME, TypeFieldMapper.PARSER); builtInMetadataMappers.put(VersionFieldMapper.NAME, VersionFieldMapper.PARSER); builtInMetadataMappers.put(SeqNoFieldMapper.NAME, SeqNoFieldMapper.PARSER); // _field_names must be added last so that it has a chance to see all the other mappers diff --git a/server/src/main/java/org/opensearch/search/lookup/LeafFieldsLookup.java b/server/src/main/java/org/opensearch/search/lookup/LeafFieldsLookup.java index 14c5dade52c87..62b040dfdc8d7 100644 --- a/server/src/main/java/org/opensearch/search/lookup/LeafFieldsLookup.java +++ b/server/src/main/java/org/opensearch/search/lookup/LeafFieldsLookup.java @@ -34,10 +34,8 @@ import org.apache.lucene.index.LeafReader; import org.opensearch.OpenSearchParseException; import org.opensearch.index.fieldvisitor.SingleFieldsVisitor; -import org.opensearch.index.mapper.DocumentMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; -import org.opensearch.index.mapper.TypeFieldMapper; import java.io.IOException; import java.util.ArrayList; @@ -147,22 +145,12 @@ private FieldLookup loadFieldData(String name) { cachedFieldData.put(name, data); } if (data.fields() == null) { - List values; - if (TypeFieldMapper.NAME.equals(data.fieldType().name())) { - TypeFieldMapper.emitTypesDeprecationWarning(); - values = new ArrayList<>(1); - final DocumentMapper mapper = mapperService.documentMapper(); - if (mapper != null) { - values.add(mapper.type()); - } - } else { - values = new ArrayList(2); - SingleFieldsVisitor visitor = new SingleFieldsVisitor(data.fieldType(), values); - try { - reader.document(docId, visitor); - } catch (IOException e) { - throw new OpenSearchParseException("failed to load field [{}]", e, name); - } + List values = new ArrayList<>(2); + SingleFieldsVisitor visitor = new SingleFieldsVisitor(data.fieldType(), values); + try { + reader.document(docId, visitor); + } catch (IOException e) { + throw new OpenSearchParseException("failed to load field [{}]", e, name); } data.fields(singletonMap(data.fieldType().name(), values)); } diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java index c33adf3bcb558..cbae55a047a1e 100644 --- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java @@ -381,7 +381,7 @@ public void testSegmentsWithMergeFlag() throws Exception { } public void testSegmentsWithIndexSort() throws Exception { - Sort indexSort = new Sort(new SortedSetSortField("_type", false)); + Sort indexSort = new Sort(new SortedSetSortField("field", false)); try ( Store store = createStore(); Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE, null, null, null, indexSort, null) diff --git a/server/src/test/java/org/opensearch/index/get/GetResultTests.java b/server/src/test/java/org/opensearch/index/get/GetResultTests.java index 9519b83fa54b1..03621f83e8af2 100644 --- a/server/src/test/java/org/opensearch/index/get/GetResultTests.java +++ b/server/src/test/java/org/opensearch/index/get/GetResultTests.java @@ -46,7 +46,6 @@ import org.opensearch.index.mapper.IndexFieldMapper; import org.opensearch.index.mapper.SeqNoFieldMapper; import org.opensearch.index.mapper.SourceFieldMapper; -import org.opensearch.index.mapper.TypeFieldMapper; import org.opensearch.index.mapper.VersionFieldMapper; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.RandomObjects; @@ -372,9 +371,8 @@ public static Tuple, Map> rand Map fields = new HashMap<>(numFields); Map expectedFields = new HashMap<>(numFields); // As we are using this to construct a GetResult object that already contains - // index, type, id, version, seqNo, and source fields, we need to exclude them from random fields - Predicate excludeMetaFieldFilter = field -> field.equals(TypeFieldMapper.NAME) - || field.equals(IndexFieldMapper.NAME) + // index, id, version, seqNo, and source fields, we need to exclude them from random fields + Predicate excludeMetaFieldFilter = field -> field.equals(IndexFieldMapper.NAME) || field.equals(IdFieldMapper.NAME) || field.equals(VersionFieldMapper.NAME) || field.equals(SourceFieldMapper.NAME) diff --git a/server/src/test/java/org/opensearch/index/mapper/TypeFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/TypeFieldMapperTests.java deleted file mode 100644 index 89eee655ca9d4..0000000000000 --- a/server/src/test/java/org/opensearch/index/mapper/TypeFieldMapperTests.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.index.mapper; - -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.SortedSetDocValues; -import org.apache.lucene.store.Directory; -import org.apache.lucene.util.BytesRef; -import org.opensearch.common.bytes.BytesArray; -import org.opensearch.common.compress.CompressedXContent; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.index.IndexService; -import org.opensearch.index.fielddata.IndexFieldDataCache; -import org.opensearch.index.fielddata.IndexOrdinalsFieldData; -import org.opensearch.index.fielddata.LeafOrdinalsFieldData; -import org.opensearch.index.mapper.MapperService.MergeReason; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; -import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchSingleNodeTestCase; -import org.opensearch.test.InternalSettingsPlugin; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.function.Function; - -public class TypeFieldMapperTests extends OpenSearchSingleNodeTestCase { - - @Override - protected Collection> getPlugins() { - return pluginList(InternalSettingsPlugin.class); - } - - public void testDocValuesSingleType() throws Exception { - testDocValues(this::createIndex); - assertWarnings("[types removal] Using the _type field in queries and aggregations is deprecated, prefer to use a field instead."); - } - - public static void testDocValues(Function createIndex) throws IOException { - MapperService mapperService = createIndex.apply("test").mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE); - ParsedDocument document = mapper.parse(new SourceToParse("index", "id", new BytesArray("{}"), XContentType.JSON)); - - Directory dir = newDirectory(); - IndexWriter w = new IndexWriter(dir, newIndexWriterConfig()); - w.addDocument(document.rootDoc()); - DirectoryReader r = DirectoryReader.open(w); - w.close(); - - MappedFieldType ft = mapperService.fieldType(TypeFieldMapper.NAME); - IndexOrdinalsFieldData fd = (IndexOrdinalsFieldData) ft.fielddataBuilder( - "test", - () -> { throw new UnsupportedOperationException(); } - ).build(new IndexFieldDataCache.None(), new NoneCircuitBreakerService()); - LeafOrdinalsFieldData afd = fd.load(r.leaves().get(0)); - SortedSetDocValues values = afd.getOrdinalsValues(); - assertTrue(values.advanceExact(0)); - assertEquals(0, values.nextOrd()); - assertEquals(SortedSetDocValues.NO_MORE_ORDS, values.nextOrd()); - assertEquals(new BytesRef("type"), values.lookupOrd(0)); - r.close(); - dir.close(); - } - - public void testDefaults() throws IOException { - Settings indexSettings = Settings.EMPTY; - MapperService mapperService = createIndex("test", indexSettings).mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE); - ParsedDocument document = mapper.parse(new SourceToParse("index", "id", new BytesArray("{}"), XContentType.JSON)); - assertEquals(Collections.emptyList(), Arrays.asList(document.rootDoc().getFields(TypeFieldMapper.NAME))); - } -} diff --git a/server/src/test/java/org/opensearch/index/mapper/TypeFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/TypeFieldTypeTests.java deleted file mode 100644 index 66377a16b90d4..0000000000000 --- a/server/src/test/java/org/opensearch/index/mapper/TypeFieldTypeTests.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.index.mapper; - -import org.apache.lucene.search.MatchAllDocsQuery; -import org.apache.lucene.search.MatchNoDocsQuery; -import org.apache.lucene.search.Query; -import org.opensearch.index.query.QueryShardContext; -import org.opensearch.test.OpenSearchTestCase; -import org.mockito.Mockito; - -import java.util.Arrays; - -public class TypeFieldTypeTests extends OpenSearchTestCase { - - public void testTermsQuery() { - QueryShardContext context = Mockito.mock(QueryShardContext.class); - - TypeFieldMapper.TypeFieldType ft = new TypeFieldMapper.TypeFieldType("_doc"); - Query query = ft.termQuery("my_type", context); - assertEquals(new MatchNoDocsQuery(), query); - - query = ft.termQuery("_doc", context); - assertEquals(new MatchAllDocsQuery(), query); - - query = ft.termsQuery(Arrays.asList("_doc", "type", "foo"), context); - assertEquals(new MatchAllDocsQuery(), query); - - query = ft.termsQuery(Arrays.asList("type", "foo"), context); - assertEquals(new MatchNoDocsQuery(), query); - - query = ft.termQueryCaseInsensitive("_DOC", context); - assertEquals(new MatchAllDocsQuery(), query); - - assertWarnings("[types removal] Using the _type field in queries and aggregations is deprecated, prefer to use a field instead."); - } -} diff --git a/server/src/test/java/org/opensearch/index/query/TermQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/TermQueryBuilderTests.java index 9cac88a256a0a..cc877c7590c6a 100644 --- a/server/src/test/java/org/opensearch/index/query/TermQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/TermQueryBuilderTests.java @@ -41,7 +41,6 @@ import org.apache.lucene.search.TermQuery; import org.opensearch.common.ParsingException; import org.opensearch.index.mapper.MappedFieldType; -import org.opensearch.index.mapper.TypeFieldMapper; import java.io.IOException; @@ -198,12 +197,6 @@ public void testParseAndSerializeBigInteger() throws IOException { assertSerialization(parsedQuery); } - public void testTypeField() throws IOException { - TermQueryBuilder builder = QueryBuilders.termQuery("_type", "value1"); - builder.doToQuery(createShardContext()); - assertWarnings(TypeFieldMapper.TYPES_DEPRECATION_MESSAGE); - } - public void testRewriteIndexQueryToMatchNone() throws IOException { TermQueryBuilder query = QueryBuilders.termQuery("_index", "does_not_exist"); QueryShardContext queryShardContext = createShardContext(); diff --git a/server/src/test/java/org/opensearch/index/query/TermsQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/TermsQueryBuilderTests.java index ea93d7a65b951..e9a285208f1a6 100644 --- a/server/src/test/java/org/opensearch/index/query/TermsQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/TermsQueryBuilderTests.java @@ -50,7 +50,6 @@ import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.index.get.GetResult; -import org.opensearch.index.mapper.TypeFieldMapper; import org.opensearch.indices.TermsLookup; import org.opensearch.test.AbstractQueryTestCase; import org.hamcrest.CoreMatchers; @@ -351,12 +350,6 @@ public void testConversion() { assertEquals(Arrays.asList(5, 42d), TermsQueryBuilder.convertBack(TermsQueryBuilder.convert(list))); } - public void testTypeField() throws IOException { - TermsQueryBuilder builder = QueryBuilders.termsQuery("_type", "value1", "value2"); - builder.doToQuery(createShardContext()); - assertWarnings(TypeFieldMapper.TYPES_DEPRECATION_MESSAGE); - } - public void testRewriteIndexQueryToMatchNone() throws IOException { TermsQueryBuilder query = new TermsQueryBuilder("_index", "does_not_exist", "also_does_not_exist"); QueryShardContext queryShardContext = createShardContext(); diff --git a/server/src/test/java/org/opensearch/index/query/WildcardQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/WildcardQueryBuilderTests.java index c8a4207f21c25..9e99b7667f3e0 100644 --- a/server/src/test/java/org/opensearch/index/query/WildcardQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/WildcardQueryBuilderTests.java @@ -36,7 +36,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.WildcardQuery; import org.opensearch.common.ParsingException; -import org.opensearch.index.mapper.TypeFieldMapper; import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; @@ -150,12 +149,6 @@ public void testParseFailsWithMultipleFields() throws IOException { assertEquals("[wildcard] query doesn't support multiple fields, found [user1] and [user2]", e.getMessage()); } - public void testTypeField() throws IOException { - WildcardQueryBuilder builder = QueryBuilders.wildcardQuery("_type", "doc*"); - builder.doToQuery(createShardContext()); - assertWarnings(TypeFieldMapper.TYPES_DEPRECATION_MESSAGE); - } - public void testRewriteIndexQueryToMatchNone() throws IOException { WildcardQueryBuilder query = new WildcardQueryBuilder("_index", "does_not_exist"); QueryShardContext queryShardContext = createShardContext(); diff --git a/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java b/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java index c2298f60e4a2b..8123f044798bd 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java @@ -45,7 +45,6 @@ import org.opensearch.index.mapper.SeqNoFieldMapper; import org.opensearch.index.mapper.SourceFieldMapper; import org.opensearch.index.mapper.TextFieldMapper; -import org.opensearch.index.mapper.TypeFieldMapper; import org.opensearch.index.mapper.VersionFieldMapper; import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.plugins.MapperPlugin; @@ -95,7 +94,6 @@ public Map getMetadataMappers() { IndexFieldMapper.NAME, DataStreamFieldMapper.NAME, SourceFieldMapper.NAME, - TypeFieldMapper.NAME, VersionFieldMapper.NAME, SeqNoFieldMapper.NAME, FieldNamesFieldMapper.NAME }; diff --git a/server/src/test/java/org/opensearch/search/aggregations/support/ValuesSourceConfigTests.java b/server/src/test/java/org/opensearch/search/aggregations/support/ValuesSourceConfigTests.java index f866d817a7c43..33d9a63f61a35 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/support/ValuesSourceConfigTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/support/ValuesSourceConfigTests.java @@ -40,7 +40,6 @@ import org.opensearch.index.IndexService; import org.opensearch.index.engine.Engine; import org.opensearch.index.fielddata.SortedBinaryDocValues; -import org.opensearch.index.mapper.TypeFieldMapper; import org.opensearch.index.query.QueryShardContext; import org.opensearch.test.OpenSearchSingleNodeTestCase; @@ -310,25 +309,6 @@ public void testUnmappedBoolean() throws Exception { } } - public void testTypeFieldDeprecation() { - IndexService indexService = createIndex("index", Settings.EMPTY, "type"); - try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { - QueryShardContext context = indexService.newQueryShardContext(0, searcher, () -> 42L, null); - - ValuesSourceConfig config = ValuesSourceConfig.resolve( - context, - null, - TypeFieldMapper.NAME, - null, - null, - null, - null, - CoreValuesSourceType.BYTES - ); - assertWarnings(TypeFieldMapper.TYPES_DEPRECATION_MESSAGE); - } - } - public void testFieldAlias() throws Exception { IndexService indexService = createIndex("index", Settings.EMPTY, "type", "field", "type=keyword", "alias", "type=alias,path=field"); client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); diff --git a/server/src/test/java/org/opensearch/search/fetch/subphase/FieldFetcherTests.java b/server/src/test/java/org/opensearch/search/fetch/subphase/FieldFetcherTests.java index 8147d1afb8c15..0aff1efff88ef 100644 --- a/server/src/test/java/org/opensearch/search/fetch/subphase/FieldFetcherTests.java +++ b/server/src/test/java/org/opensearch/search/fetch/subphase/FieldFetcherTests.java @@ -117,6 +117,11 @@ public void testMetadataFields() throws IOException { Map fields = fetchFields(mapperService, source, "_routing"); assertTrue(fields.isEmpty()); + + // The _type field was deprecated in 7.x and is not supported in 2.0. So the behavior + // should be the same as if the field didn't exist. + fields = fetchFields(mapperService, source, "_type"); + assertTrue(fields.isEmpty()); } public void testFetchAllFields() throws IOException { From e9ad90b9f6bce1fafc9eac35ad74fd719a051c01 Mon Sep 17 00:00:00 2001 From: Owais Kazi Date: Wed, 20 Apr 2022 20:36:58 -0700 Subject: [PATCH 108/653] Removed binary file from linelint (#3015) Signed-off-by: Owais Kazi --- .gitignore | 3 +++ .linelint.yml | 1 - 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index e2cb6d8d37a82..8ea328ce2f1e9 100644 --- a/.gitignore +++ b/.gitignore @@ -56,3 +56,6 @@ testfixtures_shared/ # These are generated from .ci/jobs.t .ci/jobs/ + +# build files generated +doc-tools/missing-doclet/bin/ diff --git a/.linelint.yml b/.linelint.yml index 7b7bc162eef28..6240c8b3d7a96 100644 --- a/.linelint.yml +++ b/.linelint.yml @@ -12,7 +12,6 @@ ignore: - 'buildSrc/src/testKit/opensearch.build/NOTICE' - 'server/licenses/apache-log4j-extras-DEPENDENCIES' # Empty files - - 'doc-tools/missing-doclet/bin/main/org/opensearch/missingdoclet/MissingDoclet.class' - 'buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/build.gradle' - 'buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/archives/oss-darwin-tar/build.gradle' - 'buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/bwc/bugfix/build.gradle' From 6517eeca507943757475fbe4427305bfc10b3d17 Mon Sep 17 00:00:00 2001 From: Tushar Kharbanda Date: Thu, 21 Apr 2022 19:21:44 +0530 Subject: [PATCH 109/653] Support task resource tracking in OpenSearch (#2639) * Add Task id in Thread Context Signed-off-by: Tushar Kharbanda * Add resource tracking update support for tasks Signed-off-by: Tushar Kharbanda * List tasks action support for task resource refresh Signed-off-by: Tushar Kharbanda * Handle task unregistration case on same thread Signed-off-by: Tushar Kharbanda * Add lazy initialisation for RunnableTaskExecutionListener Signed-off-by: Tushar Kharbanda * Segregate resource tracking logic to a separate service. Signed-off-by: Tushar Kharbanda * Check for running threads during task unregister Signed-off-by: Tushar Kharbanda * Moved thread context logic to resource tracking service Signed-off-by: Tushar Kharbanda * preserve task id in thread context even after stash Signed-off-by: Tushar Kharbanda * Add null check for resource tracking service Signed-off-by: Tushar Kharbanda * Tracking service tests and minor refactoring Signed-off-by: Tushar Kharbanda * Preserve task id fix with test Signed-off-by: Tushar Kharbanda * Minor test changes and Task tracking call update Signed-off-by: Tushar Kharbanda * Fix Auto Queue executor method's signature Signed-off-by: Tushar Kharbanda * Make task runnable task listener factory implement consumer Signed-off-by: Tushar Kharbanda * Use reflection for ThreadMXBean Signed-off-by: Tushar Kharbanda * Formatting Signed-off-by: Tushar Kharbanda * Replace RunnableTaskExecutionListenerFactory with AtomicReference Signed-off-by: Tushar Kharbanda * Revert "Use reflection for ThreadMXBean" This reverts commit cbcf3c525bf516fb7164f0221491a7b25c1f96ec. Signed-off-by: Tushar Kharbanda * Suppress Warning related to ThreadMXBean Signed-off-by: Tushar Kharbanda * Add separate method for task resource tracking supported check Signed-off-by: Tushar Kharbanda * Enabled setting by default Signed-off-by: Tushar Kharbanda * Add debug logs for stale context id Signed-off-by: Tushar Kharbanda * Remove hardcoded task overhead in tests Signed-off-by: Tushar Kharbanda * Bump stale task id in thread context log level to warn Signed-off-by: Tushar Kharbanda * Improve assertions and logging Signed-off-by: Tushar Kharbanda Co-authored-by: Tushar Kharbanda --- .../admin/cluster/node/tasks/TasksIT.java | 6 + .../tasks/list/TransportListTasksAction.java | 13 +- .../action/search/SearchShardTask.java | 5 + .../opensearch/action/search/SearchTask.java | 5 + .../action/support/TransportAction.java | 78 ++- .../org/opensearch/cluster/ClusterModule.java | 2 + .../common/settings/ClusterSettings.java | 4 +- .../util/concurrent/OpenSearchExecutors.java | 52 +- .../common/util/concurrent/ThreadContext.java | 16 +- .../main/java/org/opensearch/node/Node.java | 13 +- .../main/java/org/opensearch/tasks/Task.java | 17 +- .../org/opensearch/tasks/TaskManager.java | 27 +- .../tasks/TaskResourceTrackingService.java | 255 +++++++ .../opensearch/tasks/ThreadResourceInfo.java | 10 +- .../AutoQueueAdjustingExecutorBuilder.java | 19 +- .../RunnableTaskExecutionListener.java | 33 + .../threadpool/TaskAwareRunnable.java | 90 +++ .../org/opensearch/threadpool/ThreadPool.java | 22 +- .../transport/RequestHandlerRegistry.java | 4 + .../tasks/RecordingTaskManagerListener.java | 3 + .../node/tasks/ResourceAwareTasksTests.java | 633 ++++++++++++++++++ .../node/tasks/TaskManagerTestCase.java | 17 +- .../bulk/TransportBulkActionIngestTests.java | 3 +- .../util/concurrent/ThreadContextTests.java | 10 + .../snapshots/SnapshotResiliencyTests.java | 3 + .../opensearch/tasks/TaskManagerTests.java | 6 +- .../TaskResourceTrackingServiceTests.java | 97 +++ .../test/tasks/MockTaskManager.java | 16 + .../test/tasks/MockTaskManagerListener.java | 3 + .../opensearch/threadpool/TestThreadPool.java | 20 +- 30 files changed, 1421 insertions(+), 61 deletions(-) create mode 100644 server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java create mode 100644 server/src/main/java/org/opensearch/threadpool/RunnableTaskExecutionListener.java create mode 100644 server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java create mode 100644 server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java create mode 100644 server/src/test/java/org/opensearch/tasks/TaskResourceTrackingServiceTests.java diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java index ac0ae44eb732e..c74f992970545 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java @@ -470,6 +470,9 @@ public void onTaskUnregistered(Task task) {} @Override public void waitForTaskCompletion(Task task) {} + + @Override + public void taskExecutionStarted(Task task, Boolean closeableInvoked) {} }); } // Need to run the task in a separate thread because node client's .execute() is blocked by our task listener @@ -651,6 +654,9 @@ public void waitForTaskCompletion(Task task) { waitForWaitingToStart.countDown(); } + @Override + public void taskExecutionStarted(Task task, Boolean closeableInvoked) {} + @Override public void onTaskRegistered(Task task) {} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java index b7875c5f99774..df448d2665434 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java @@ -42,6 +42,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskInfo; +import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -60,8 +61,15 @@ public static long waitForCompletionTimeout(TimeValue timeout) { private static final TimeValue DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT = timeValueSeconds(30); + private final TaskResourceTrackingService taskResourceTrackingService; + @Inject - public TransportListTasksAction(ClusterService clusterService, TransportService transportService, ActionFilters actionFilters) { + public TransportListTasksAction( + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + TaskResourceTrackingService taskResourceTrackingService + ) { super( ListTasksAction.NAME, clusterService, @@ -72,6 +80,7 @@ public TransportListTasksAction(ClusterService clusterService, TransportService TaskInfo::new, ThreadPool.Names.MANAGEMENT ); + this.taskResourceTrackingService = taskResourceTrackingService; } @Override @@ -101,6 +110,8 @@ protected void processTasks(ListTasksRequest request, Consumer operation) } taskManager.waitForTaskCompletion(task, timeoutNanos); }); + } else { + operation = operation.andThen(taskResourceTrackingService::refreshResourceStats); } super.processTasks(request, operation); } diff --git a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java index 2e506c6fe181b..f09701c7769eb 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java @@ -49,6 +49,11 @@ public SearchShardTask(long id, String type, String action, String description, super(id, type, action, description, parentTaskId, headers); } + @Override + public boolean supportsResourceTracking() { + return true; + } + @Override public boolean shouldCancelChildrenOnCancellation() { return false; diff --git a/server/src/main/java/org/opensearch/action/search/SearchTask.java b/server/src/main/java/org/opensearch/action/search/SearchTask.java index 7f80f7836be6c..bf6f141a3e829 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTask.java @@ -78,6 +78,11 @@ public final String getDescription() { return descriptionSupplier.get(); } + @Override + public boolean supportsResourceTracking() { + return true; + } + /** * Attach a {@link SearchProgressListener} to this task. */ diff --git a/server/src/main/java/org/opensearch/action/support/TransportAction.java b/server/src/main/java/org/opensearch/action/support/TransportAction.java index 84ece8cfec530..83fca715c7e28 100644 --- a/server/src/main/java/org/opensearch/action/support/TransportAction.java +++ b/server/src/main/java/org/opensearch/action/support/TransportAction.java @@ -40,6 +40,7 @@ import org.opensearch.action.ActionResponse; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskCancelledException; import org.opensearch.tasks.TaskId; @@ -88,31 +89,39 @@ public final Task execute(Request request, ActionListener listener) { */ final Releasable unregisterChildNode = registerChildNode(request.getParentTask()); final Task task; + try { task = taskManager.register("transport", actionName, request); } catch (TaskCancelledException e) { unregisterChildNode.close(); throw e; } - execute(task, request, new ActionListener() { - @Override - public void onResponse(Response response) { - try { - Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); - } finally { - listener.onResponse(response); + + ThreadContext.StoredContext storedContext = taskManager.taskExecutionStarted(task); + try { + execute(task, request, new ActionListener() { + @Override + public void onResponse(Response response) { + try { + Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); + } finally { + listener.onResponse(response); + } } - } - @Override - public void onFailure(Exception e) { - try { - Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); - } finally { - listener.onFailure(e); + @Override + public void onFailure(Exception e) { + try { + Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); + } finally { + listener.onFailure(e); + } } - } - }); + }); + } finally { + storedContext.close(); + } + return task; } @@ -129,25 +138,30 @@ public final Task execute(Request request, TaskListener listener) { unregisterChildNode.close(); throw e; } - execute(task, request, new ActionListener() { - @Override - public void onResponse(Response response) { - try { - Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); - } finally { - listener.onResponse(task, response); + ThreadContext.StoredContext storedContext = taskManager.taskExecutionStarted(task); + try { + execute(task, request, new ActionListener() { + @Override + public void onResponse(Response response) { + try { + Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); + } finally { + listener.onResponse(task, response); + } } - } - @Override - public void onFailure(Exception e) { - try { - Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); - } finally { - listener.onFailure(task, e); + @Override + public void onFailure(Exception e) { + try { + Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); + } finally { + listener.onFailure(task, e); + } } - } - }); + }); + } finally { + storedContext.close(); + } return task; } diff --git a/server/src/main/java/org/opensearch/cluster/ClusterModule.java b/server/src/main/java/org/opensearch/cluster/ClusterModule.java index c85691b80d7c3..b9f3a2a99f0b7 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterModule.java @@ -94,6 +94,7 @@ import org.opensearch.script.ScriptMetadata; import org.opensearch.snapshots.SnapshotsInfoService; import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.tasks.TaskResultsService; import java.util.ArrayList; @@ -394,6 +395,7 @@ protected void configure() { bind(NodeMappingRefreshAction.class).asEagerSingleton(); bind(MappingUpdatedAction.class).asEagerSingleton(); bind(TaskResultsService.class).asEagerSingleton(); + bind(TaskResourceTrackingService.class).asEagerSingleton(); bind(AllocationDeciders.class).toInstance(allocationDeciders); bind(ShardsAllocator.class).toInstance(shardsAllocator); } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index c758b7d2918e7..4cacc3bcf37eb 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -40,6 +40,7 @@ import org.opensearch.index.ShardIndexingPressureMemoryManager; import org.opensearch.index.ShardIndexingPressureSettings; import org.opensearch.index.ShardIndexingPressureStore; +import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.watcher.ResourceWatcherService; import org.opensearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; import org.opensearch.action.admin.indices.close.TransportCloseIndexAction; @@ -568,7 +569,8 @@ public void apply(Settings value, Settings current, Settings previous) { ShardIndexingPressureMemoryManager.THROUGHPUT_DEGRADATION_LIMITS, ShardIndexingPressureMemoryManager.SUCCESSFUL_REQUEST_ELAPSED_TIMEOUT, ShardIndexingPressureMemoryManager.MAX_OUTSTANDING_REQUESTS, - IndexingPressure.MAX_INDEXING_BYTES + IndexingPressure.MAX_INDEXING_BYTES, + TaskResourceTrackingService.TASK_RESOURCE_TRACKING_ENABLED ) ) ); diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java b/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java index 5a967528a6ae2..9e28bb2b795c3 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java @@ -40,6 +40,8 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.node.Node; +import org.opensearch.threadpool.RunnableTaskExecutionListener; +import org.opensearch.threadpool.TaskAwareRunnable; import java.util.List; import java.util.Optional; @@ -55,6 +57,7 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; public class OpenSearchExecutors { @@ -172,14 +175,39 @@ public static OpenSearchThreadPoolExecutor newFixed( ); } + public static OpenSearchThreadPoolExecutor newAutoQueueFixed( + String name, + int size, + int initialQueueCapacity, + int minQueueSize, + int maxQueueSize, + int frameSize, + TimeValue targetedResponseTime, + ThreadFactory threadFactory, + ThreadContext contextHolder + ) { + return newAutoQueueFixed( + name, + size, + initialQueueCapacity, + minQueueSize, + maxQueueSize, + frameSize, + targetedResponseTime, + threadFactory, + contextHolder, + null + ); + } + /** * Return a new executor that will automatically adjust the queue size based on queue throughput. * - * @param size number of fixed threads to use for executing tasks + * @param size number of fixed threads to use for executing tasks * @param initialQueueCapacity initial size of the executor queue - * @param minQueueSize minimum queue size that the queue can be adjusted to - * @param maxQueueSize maximum queue size that the queue can be adjusted to - * @param frameSize number of tasks during which stats are collected before adjusting queue size + * @param minQueueSize minimum queue size that the queue can be adjusted to + * @param maxQueueSize maximum queue size that the queue can be adjusted to + * @param frameSize number of tasks during which stats are collected before adjusting queue size */ public static OpenSearchThreadPoolExecutor newAutoQueueFixed( String name, @@ -190,7 +218,8 @@ public static OpenSearchThreadPoolExecutor newAutoQueueFixed( int frameSize, TimeValue targetedResponseTime, ThreadFactory threadFactory, - ThreadContext contextHolder + ThreadContext contextHolder, + AtomicReference runnableTaskListener ) { if (initialQueueCapacity <= 0) { throw new IllegalArgumentException( @@ -201,6 +230,17 @@ public static OpenSearchThreadPoolExecutor newAutoQueueFixed( ConcurrentCollections.newBlockingQueue(), initialQueueCapacity ); + + Function runnableWrapper; + if (runnableTaskListener != null) { + runnableWrapper = (runnable) -> { + TaskAwareRunnable taskAwareRunnable = new TaskAwareRunnable(contextHolder, runnable, runnableTaskListener); + return new TimedRunnable(taskAwareRunnable); + }; + } else { + runnableWrapper = TimedRunnable::new; + } + return new QueueResizingOpenSearchThreadPoolExecutor( name, size, @@ -210,7 +250,7 @@ public static OpenSearchThreadPoolExecutor newAutoQueueFixed( queue, minQueueSize, maxQueueSize, - TimedRunnable::new, + runnableWrapper, frameSize, targetedResponseTime, threadFactory, diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java index d844a8f158ea4..35d7d925ce106 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java @@ -66,6 +66,7 @@ import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_COUNT; import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE; +import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; /** * A ThreadContext is a map of string headers and a transient map of keyed objects that are associated with @@ -134,16 +135,23 @@ public StoredContext stashContext() { * This is needed so the DeprecationLogger in another thread can see the value of X-Opaque-ID provided by a user. * Otherwise when context is stash, it should be empty. */ + + ThreadContextStruct threadContextStruct = DEFAULT_CONTEXT; + if (context.requestHeaders.containsKey(Task.X_OPAQUE_ID)) { - ThreadContextStruct threadContextStruct = DEFAULT_CONTEXT.putHeaders( + threadContextStruct = threadContextStruct.putHeaders( MapBuilder.newMapBuilder() .put(Task.X_OPAQUE_ID, context.requestHeaders.get(Task.X_OPAQUE_ID)) .immutableMap() ); - threadLocal.set(threadContextStruct); - } else { - threadLocal.set(DEFAULT_CONTEXT); } + + if (context.transientHeaders.containsKey(TASK_ID)) { + threadContextStruct = threadContextStruct.putTransient(TASK_ID, context.transientHeaders.get(TASK_ID)); + } + + threadLocal.set(threadContextStruct); + return () -> { // If the node and thus the threadLocal get closed while this task // is still executing, we don't want this runnable to fail with an diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 46400e5c8d269..c929c7c013b13 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -37,6 +37,8 @@ import org.apache.lucene.util.Constants; import org.apache.lucene.util.SetOnce; import org.opensearch.index.IndexingPressureService; +import org.opensearch.tasks.TaskResourceTrackingService; +import org.opensearch.threadpool.RunnableTaskExecutionListener; import org.opensearch.watcher.ResourceWatcherService; import org.opensearch.Assertions; import org.opensearch.Build; @@ -213,6 +215,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import java.util.function.UnaryOperator; import java.util.stream.Collectors; @@ -324,6 +327,7 @@ public static class DiscoverySettings { private final LocalNodeFactory localNodeFactory; private final NodeService nodeService; final NamedWriteableRegistry namedWriteableRegistry; + private final AtomicReference runnableTaskListener; public Node(Environment environment) { this(environment, Collections.emptyList(), true); @@ -433,7 +437,8 @@ protected Node( final List> executorBuilders = pluginsService.getExecutorBuilders(settings); - final ThreadPool threadPool = new ThreadPool(settings, executorBuilders.toArray(new ExecutorBuilder[0])); + runnableTaskListener = new AtomicReference<>(); + final ThreadPool threadPool = new ThreadPool(settings, runnableTaskListener, executorBuilders.toArray(new ExecutorBuilder[0])); resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS)); final ResourceWatcherService resourceWatcherService = new ResourceWatcherService(settings, threadPool); resourcesToClose.add(resourceWatcherService); @@ -1057,6 +1062,11 @@ public Node start() throws NodeValidationException { TransportService transportService = injector.getInstance(TransportService.class); transportService.getTaskManager().setTaskResultsService(injector.getInstance(TaskResultsService.class)); transportService.getTaskManager().setTaskCancellationService(new TaskCancellationService(transportService)); + + TaskResourceTrackingService taskResourceTrackingService = injector.getInstance(TaskResourceTrackingService.class); + transportService.getTaskManager().setTaskResourceTrackingService(taskResourceTrackingService); + runnableTaskListener.set(taskResourceTrackingService); + transportService.start(); assert localNodeFactory.getNode() != null; assert transportService.getLocalNode().equals(localNodeFactory.getNode()) @@ -1490,4 +1500,5 @@ DiscoveryNode getNode() { return localNode.get(); } } + } diff --git a/server/src/main/java/org/opensearch/tasks/Task.java b/server/src/main/java/org/opensearch/tasks/Task.java index 62453d08724ce..a51af17ae8ea2 100644 --- a/server/src/main/java/org/opensearch/tasks/Task.java +++ b/server/src/main/java/org/opensearch/tasks/Task.java @@ -32,8 +32,6 @@ package org.opensearch.tasks; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.opensearch.action.ActionResponse; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.io.stream.NamedWriteable; @@ -53,8 +51,6 @@ */ public class Task { - private static final Logger logger = LogManager.getLogger(Task.class); - /** * The request header to mark tasks with specific ids */ @@ -289,7 +285,7 @@ public void startThreadResourceTracking(long threadId, ResourceStatsType statsTy ); } } - threadResourceInfoList.add(new ThreadResourceInfo(statsType, resourceUsageMetrics)); + threadResourceInfoList.add(new ThreadResourceInfo(threadId, statsType, resourceUsageMetrics)); } /** @@ -336,6 +332,17 @@ public void stopThreadResourceTracking(long threadId, ResourceStatsType statsTyp throw new IllegalStateException("cannot update final values if active thread resource entry is not present"); } + /** + * Individual tasks can override this if they want to support task resource tracking. We just need to make sure that + * the ThreadPool on which the task runs on have runnable wrapper similar to + * {@link org.opensearch.common.util.concurrent.OpenSearchExecutors#newAutoQueueFixed} + * + * @return true if resource tracking is supported by the task + */ + public boolean supportsResourceTracking() { + return false; + } + /** * Report of the internal status of a task. These can vary wildly from task * to task because each task is implemented differently but we should try diff --git a/server/src/main/java/org/opensearch/tasks/TaskManager.java b/server/src/main/java/org/opensearch/tasks/TaskManager.java index 1f6169768f245..37c10dfc0e6ab 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskManager.java +++ b/server/src/main/java/org/opensearch/tasks/TaskManager.java @@ -89,7 +89,9 @@ public class TaskManager implements ClusterStateApplier { private static final TimeValue WAIT_FOR_COMPLETION_POLL = timeValueMillis(100); - /** Rest headers that are copied to the task */ + /** + * Rest headers that are copied to the task + */ private final List taskHeaders; private final ThreadPool threadPool; @@ -103,6 +105,7 @@ public class TaskManager implements ClusterStateApplier { private final Map banedParents = new ConcurrentHashMap<>(); private TaskResultsService taskResultsService; + private final SetOnce taskResourceTrackingService = new SetOnce<>(); private volatile DiscoveryNodes lastDiscoveryNodes = DiscoveryNodes.EMPTY_NODES; @@ -125,6 +128,10 @@ public void setTaskCancellationService(TaskCancellationService taskCancellationS this.cancellationService.set(taskCancellationService); } + public void setTaskResourceTrackingService(TaskResourceTrackingService taskResourceTrackingService) { + this.taskResourceTrackingService.set(taskResourceTrackingService); + } + /** * Registers a task without parent task */ @@ -202,6 +209,11 @@ public void cancel(CancellableTask task, String reason, Runnable listener) { */ public Task unregister(Task task) { logger.trace("unregister task for id: {}", task.getId()); + + if (taskResourceTrackingService.get() != null && task.supportsResourceTracking()) { + taskResourceTrackingService.get().stopTracking(task); + } + if (task instanceof CancellableTask) { CancellableTaskHolder holder = cancellableTasks.remove(task.getId()); if (holder != null) { @@ -361,6 +373,7 @@ public int getBanCount() { * Bans all tasks with the specified parent task from execution, cancels all tasks that are currently executing. *

* This method is called when a parent task that has children is cancelled. + * * @return a list of pending cancellable child tasks */ public List setBan(TaskId parentTaskId, String reason) { @@ -448,6 +461,18 @@ public void waitForTaskCompletion(Task task, long untilInNanos) { throw new OpenSearchTimeoutException("Timed out waiting for completion of [{}]", task); } + /** + * Takes actions when a task is registered and its execution starts + * + * @param task getting executed. + * @return AutoCloseable to free up resources (clean up thread context) when task execution block returns + */ + public ThreadContext.StoredContext taskExecutionStarted(Task task) { + if (taskResourceTrackingService.get() == null) return () -> {}; + + return taskResourceTrackingService.get().startTracking(task); + } + private static class CancellableTaskHolder { private final CancellableTask task; private boolean finished = false; diff --git a/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java b/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java new file mode 100644 index 0000000000000..71b829e023385 --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java @@ -0,0 +1,255 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks; + +import com.sun.management.ThreadMXBean; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.common.util.concurrent.ConcurrentMapLong; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.threadpool.RunnableTaskExecutionListener; +import org.opensearch.threadpool.ThreadPool; + +import java.lang.management.ManagementFactory; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.opensearch.tasks.ResourceStatsType.WORKER_STATS; + +/** + * Service that helps track resource usage of tasks running on a node. + */ +@SuppressForbidden(reason = "ThreadMXBean#getThreadAllocatedBytes") +public class TaskResourceTrackingService implements RunnableTaskExecutionListener { + + private static final Logger logger = LogManager.getLogger(TaskManager.class); + + public static final Setting TASK_RESOURCE_TRACKING_ENABLED = Setting.boolSetting( + "task_resource_tracking.enabled", + true, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + public static final String TASK_ID = "TASK_ID"; + + private static final ThreadMXBean threadMXBean = (ThreadMXBean) ManagementFactory.getThreadMXBean(); + + private final ConcurrentMapLong resourceAwareTasks = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency(); + private final ThreadPool threadPool; + private volatile boolean taskResourceTrackingEnabled; + + @Inject + public TaskResourceTrackingService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { + this.taskResourceTrackingEnabled = TASK_RESOURCE_TRACKING_ENABLED.get(settings); + this.threadPool = threadPool; + clusterSettings.addSettingsUpdateConsumer(TASK_RESOURCE_TRACKING_ENABLED, this::setTaskResourceTrackingEnabled); + } + + public void setTaskResourceTrackingEnabled(boolean taskResourceTrackingEnabled) { + this.taskResourceTrackingEnabled = taskResourceTrackingEnabled; + } + + public boolean isTaskResourceTrackingEnabled() { + return taskResourceTrackingEnabled; + } + + public boolean isTaskResourceTrackingSupported() { + return threadMXBean.isThreadAllocatedMemorySupported() && threadMXBean.isThreadAllocatedMemoryEnabled(); + } + + /** + * Executes logic only if task supports resource tracking and resource tracking setting is enabled. + *

+ * 1. Starts tracking the task in map of resourceAwareTasks. + * 2. Adds Task Id in thread context to make sure it's available while task is processed across multiple threads. + * + * @param task for which resources needs to be tracked + * @return Autocloseable stored context to restore ThreadContext to the state before this method changed it. + */ + public ThreadContext.StoredContext startTracking(Task task) { + if (task.supportsResourceTracking() == false + || isTaskResourceTrackingEnabled() == false + || isTaskResourceTrackingSupported() == false) { + return () -> {}; + } + + logger.debug("Starting resource tracking for task: {}", task.getId()); + resourceAwareTasks.put(task.getId(), task); + return addTaskIdToThreadContext(task); + } + + /** + * Stops tracking task registered earlier for tracking. + *

+ * It doesn't have feature enabled check to avoid any issues if setting was disable while the task was in progress. + *

+ * It's also responsible to stop tracking the current thread's resources against this task if not already done. + * This happens when the thread executing the request logic itself calls the unregister method. So in this case unregister + * happens before runnable finishes. + * + * @param task task which has finished and doesn't need resource tracking. + */ + public void stopTracking(Task task) { + logger.debug("Stopping resource tracking for task: {}", task.getId()); + try { + if (isCurrentThreadWorkingOnTask(task)) { + taskExecutionFinishedOnThread(task.getId(), Thread.currentThread().getId()); + } + + List threadsWorkingOnTask = getThreadsWorkingOnTask(task); + if (threadsWorkingOnTask.size() > 0) { + logger.warn("No thread should be active when task finishes. Active threads: {}", threadsWorkingOnTask); + assert false : "No thread should be marked active when task finishes"; + } + } catch (Exception e) { + logger.warn("Failed while trying to mark the task execution on current thread completed.", e); + assert false; + } finally { + resourceAwareTasks.remove(task.getId()); + } + } + + /** + * Refreshes the resource stats for the tasks provided by looking into which threads are actively working on these + * and how much resources these have consumed till now. + * + * @param tasks for which resource stats needs to be refreshed. + */ + public void refreshResourceStats(Task... tasks) { + if (isTaskResourceTrackingEnabled() == false || isTaskResourceTrackingSupported() == false) { + return; + } + + for (Task task : tasks) { + if (task.supportsResourceTracking() && resourceAwareTasks.containsKey(task.getId())) { + refreshResourceStats(task); + } + } + } + + private void refreshResourceStats(Task resourceAwareTask) { + try { + logger.debug("Refreshing resource stats for Task: {}", resourceAwareTask.getId()); + List threadsWorkingOnTask = getThreadsWorkingOnTask(resourceAwareTask); + threadsWorkingOnTask.forEach( + threadId -> resourceAwareTask.updateThreadResourceStats(threadId, WORKER_STATS, getResourceUsageMetricsForThread(threadId)) + ); + } catch (IllegalStateException e) { + logger.debug("Resource stats already updated."); + } + + } + + /** + * Called when a thread starts working on a task's runnable. + * + * @param taskId of the task for which runnable is starting + * @param threadId of the thread which will be executing the runnable and we need to check resource usage for this + * thread + */ + @Override + public void taskExecutionStartedOnThread(long taskId, long threadId) { + try { + if (resourceAwareTasks.containsKey(taskId)) { + logger.debug("Task execution started on thread. Task: {}, Thread: {}", taskId, threadId); + + resourceAwareTasks.get(taskId) + .startThreadResourceTracking(threadId, WORKER_STATS, getResourceUsageMetricsForThread(threadId)); + } + } catch (Exception e) { + logger.warn(new ParameterizedMessage("Failed to mark thread execution started for task: [{}]", taskId), e); + assert false; + } + + } + + /** + * Called when a thread finishes working on a task's runnable. + * + * @param taskId of the task for which runnable is complete + * @param threadId of the thread which executed the runnable and we need to check resource usage for this thread + */ + @Override + public void taskExecutionFinishedOnThread(long taskId, long threadId) { + try { + if (resourceAwareTasks.containsKey(taskId)) { + logger.debug("Task execution finished on thread. Task: {}, Thread: {}", taskId, threadId); + resourceAwareTasks.get(taskId) + .stopThreadResourceTracking(threadId, WORKER_STATS, getResourceUsageMetricsForThread(threadId)); + } + } catch (Exception e) { + logger.warn(new ParameterizedMessage("Failed to mark thread execution finished for task: [{}]", taskId), e); + assert false; + } + } + + public Map getResourceAwareTasks() { + return Collections.unmodifiableMap(resourceAwareTasks); + } + + private ResourceUsageMetric[] getResourceUsageMetricsForThread(long threadId) { + ResourceUsageMetric currentMemoryUsage = new ResourceUsageMetric( + ResourceStats.MEMORY, + threadMXBean.getThreadAllocatedBytes(threadId) + ); + ResourceUsageMetric currentCPUUsage = new ResourceUsageMetric(ResourceStats.CPU, threadMXBean.getThreadCpuTime(threadId)); + return new ResourceUsageMetric[] { currentMemoryUsage, currentCPUUsage }; + } + + private boolean isCurrentThreadWorkingOnTask(Task task) { + long threadId = Thread.currentThread().getId(); + List threadResourceInfos = task.getResourceStats().getOrDefault(threadId, Collections.emptyList()); + + for (ThreadResourceInfo threadResourceInfo : threadResourceInfos) { + if (threadResourceInfo.isActive()) { + return true; + } + } + return false; + } + + private List getThreadsWorkingOnTask(Task task) { + List activeThreads = new ArrayList<>(); + for (List threadResourceInfos : task.getResourceStats().values()) { + for (ThreadResourceInfo threadResourceInfo : threadResourceInfos) { + if (threadResourceInfo.isActive()) { + activeThreads.add(threadResourceInfo.getThreadId()); + } + } + } + return activeThreads; + } + + /** + * Adds Task Id in the ThreadContext. + *

+ * Stashes the existing ThreadContext and preserves all the existing ThreadContext's data in the new ThreadContext + * as well. + * + * @param task for which Task Id needs to be added in ThreadContext. + * @return StoredContext reference to restore the ThreadContext from which we created a new one. + * Caller can call context.restore() to get the existing ThreadContext back. + */ + private ThreadContext.StoredContext addTaskIdToThreadContext(Task task) { + ThreadContext threadContext = threadPool.getThreadContext(); + ThreadContext.StoredContext storedContext = threadContext.newStoredContext(true, Collections.singletonList(TASK_ID)); + threadContext.putTransient(TASK_ID, task.getId()); + return storedContext; + } + +} diff --git a/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java b/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java index 8b45c38c8fb63..9ee683e3928f6 100644 --- a/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java +++ b/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java @@ -15,11 +15,13 @@ * for a specific stats type like worker_stats or response_stats etc., */ public class ThreadResourceInfo { + private final long threadId; private volatile boolean isActive = true; private final ResourceStatsType statsType; private final ResourceUsageInfo resourceUsageInfo; - public ThreadResourceInfo(ResourceStatsType statsType, ResourceUsageMetric... resourceUsageMetrics) { + public ThreadResourceInfo(long threadId, ResourceStatsType statsType, ResourceUsageMetric... resourceUsageMetrics) { + this.threadId = threadId; this.statsType = statsType; this.resourceUsageInfo = new ResourceUsageInfo(resourceUsageMetrics); } @@ -43,12 +45,16 @@ public ResourceStatsType getStatsType() { return statsType; } + public long getThreadId() { + return threadId; + } + public ResourceUsageInfo getResourceUsageInfo() { return resourceUsageInfo; } @Override public String toString() { - return resourceUsageInfo + ", stats_type=" + statsType + ", is_active=" + isActive; + return resourceUsageInfo + ", stats_type=" + statsType + ", is_active=" + isActive + ", threadId=" + threadId; } } diff --git a/server/src/main/java/org/opensearch/threadpool/AutoQueueAdjustingExecutorBuilder.java b/server/src/main/java/org/opensearch/threadpool/AutoQueueAdjustingExecutorBuilder.java index 2bac5eba9fc28..55b92c5d8bfcb 100644 --- a/server/src/main/java/org/opensearch/threadpool/AutoQueueAdjustingExecutorBuilder.java +++ b/server/src/main/java/org/opensearch/threadpool/AutoQueueAdjustingExecutorBuilder.java @@ -48,6 +48,7 @@ import java.util.Map; import java.util.concurrent.ExecutorService; import java.util.concurrent.ThreadFactory; +import java.util.concurrent.atomic.AtomicReference; /** * A builder for executors that automatically adjust the queue length as needed, depending on @@ -61,6 +62,7 @@ public final class AutoQueueAdjustingExecutorBuilder extends ExecutorBuilder maxQueueSizeSetting; private final Setting targetedResponseTimeSetting; private final Setting frameSizeSetting; + private final AtomicReference runnableTaskListener; AutoQueueAdjustingExecutorBuilder( final Settings settings, @@ -70,6 +72,19 @@ public final class AutoQueueAdjustingExecutorBuilder extends ExecutorBuilder runnableTaskListener ) { super(name); final String prefix = "thread_pool." + name; @@ -184,6 +199,7 @@ public Iterator> settings() { Setting.Property.Deprecated, Setting.Property.Deprecated ); + this.runnableTaskListener = runnableTaskListener; } @Override @@ -230,7 +246,8 @@ ThreadPool.ExecutorHolder build(final AutoExecutorSettings settings, final Threa frameSize, targetedResponseTime, threadFactory, - threadContext + threadContext, + runnableTaskListener ); // TODO: in a subsequent change we hope to extend ThreadPool.Info to be more specific for the thread pool type final ThreadPool.Info info = new ThreadPool.Info( diff --git a/server/src/main/java/org/opensearch/threadpool/RunnableTaskExecutionListener.java b/server/src/main/java/org/opensearch/threadpool/RunnableTaskExecutionListener.java new file mode 100644 index 0000000000000..03cd66f80d044 --- /dev/null +++ b/server/src/main/java/org/opensearch/threadpool/RunnableTaskExecutionListener.java @@ -0,0 +1,33 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.threadpool; + +/** + * Listener for events when a runnable execution starts or finishes on a thread and is aware of the task for which the + * runnable is associated to. + */ +public interface RunnableTaskExecutionListener { + + /** + * Sends an update when ever a task's execution start on a thread + * + * @param taskId of task which has started + * @param threadId of thread which is executing the task + */ + void taskExecutionStartedOnThread(long taskId, long threadId); + + /** + * + * Sends an update when task execution finishes on a thread + * + * @param taskId of task which has finished + * @param threadId of thread which executed the task + */ + void taskExecutionFinishedOnThread(long taskId, long threadId); +} diff --git a/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java b/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java new file mode 100644 index 0000000000000..183b9b2f4cf9a --- /dev/null +++ b/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.threadpool; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.ExceptionsHelper; +import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.WrappedRunnable; +import org.opensearch.tasks.TaskManager; + +import java.util.Objects; +import java.util.concurrent.atomic.AtomicReference; + +import static java.lang.Thread.currentThread; +import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; + +/** + * Responsible for wrapping the original task's runnable and sending updates on when it starts and finishes to + * entities listening to the events. + * + * It's able to associate runnable with a task with the help of task Id available in thread context. + */ +public class TaskAwareRunnable extends AbstractRunnable implements WrappedRunnable { + + private static final Logger logger = LogManager.getLogger(TaskManager.class); + + private final Runnable original; + private final ThreadContext threadContext; + private final AtomicReference runnableTaskListener; + + public TaskAwareRunnable( + final ThreadContext threadContext, + final Runnable original, + final AtomicReference runnableTaskListener + ) { + this.original = original; + this.threadContext = threadContext; + this.runnableTaskListener = runnableTaskListener; + } + + @Override + public void onFailure(Exception e) { + ExceptionsHelper.reThrowIfNotNull(e); + } + + @Override + public boolean isForceExecution() { + return original instanceof AbstractRunnable && ((AbstractRunnable) original).isForceExecution(); + } + + @Override + public void onRejection(final Exception e) { + if (original instanceof AbstractRunnable) { + ((AbstractRunnable) original).onRejection(e); + } else { + ExceptionsHelper.reThrowIfNotNull(e); + } + } + + @Override + protected void doRun() throws Exception { + assert runnableTaskListener.get() != null : "Listener should be attached"; + Long taskId = threadContext.getTransient(TASK_ID); + if (Objects.nonNull(taskId)) { + runnableTaskListener.get().taskExecutionStartedOnThread(taskId, currentThread().getId()); + } else { + logger.debug("Task Id not available in thread context. Skipping update. Thread Info: {}", Thread.currentThread()); + } + try { + original.run(); + } finally { + if (Objects.nonNull(taskId)) { + runnableTaskListener.get().taskExecutionFinishedOnThread(taskId, currentThread().getId()); + } + } + } + + @Override + public Runnable unwrap() { + return original; + } +} diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index c2530ccee5588..5e8f515f6c577 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -68,6 +68,7 @@ import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import static java.util.Collections.unmodifiableMap; @@ -184,6 +185,14 @@ public Collection builders() { ); public ThreadPool(final Settings settings, final ExecutorBuilder... customBuilders) { + this(settings, null, customBuilders); + } + + public ThreadPool( + final Settings settings, + final AtomicReference runnableTaskListener, + final ExecutorBuilder... customBuilders + ) { assert Node.NODE_NAME_SETTING.exists(settings); final Map builders = new HashMap<>(); @@ -197,11 +206,20 @@ public ThreadPool(final Settings settings, final ExecutorBuilder... customBui builders.put(Names.ANALYZE, new FixedExecutorBuilder(settings, Names.ANALYZE, 1, 16)); builders.put( Names.SEARCH, - new AutoQueueAdjustingExecutorBuilder(settings, Names.SEARCH, searchThreadPoolSize(allocatedProcessors), 1000, 1000, 1000, 2000) + new AutoQueueAdjustingExecutorBuilder( + settings, + Names.SEARCH, + searchThreadPoolSize(allocatedProcessors), + 1000, + 1000, + 1000, + 2000, + runnableTaskListener + ) ); builders.put( Names.SEARCH_THROTTLED, - new AutoQueueAdjustingExecutorBuilder(settings, Names.SEARCH_THROTTLED, 1, 100, 100, 100, 200) + new AutoQueueAdjustingExecutorBuilder(settings, Names.SEARCH_THROTTLED, 1, 100, 100, 100, 200, runnableTaskListener) ); builders.put(Names.MANAGEMENT, new ScalingExecutorBuilder(Names.MANAGEMENT, 1, 5, TimeValue.timeValueMinutes(5))); // no queue as this means clients will need to handle rejections on listener queue even if the operation succeeded diff --git a/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java b/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java index dcb021531f0ac..73be6e5b601e9 100644 --- a/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java +++ b/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java @@ -37,6 +37,7 @@ import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.search.internal.ShardSearchRequest; +import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; @@ -81,6 +82,8 @@ public Request newRequest(StreamInput in) throws IOException { public void processMessageReceived(Request request, TransportChannel channel) throws Exception { final Task task = taskManager.register(channel.getChannelType(), action, request); + ThreadContext.StoredContext contextToRestore = taskManager.taskExecutionStarted(task); + Releasable unregisterTask = () -> taskManager.unregister(task); try { if (channel instanceof TcpTransportChannel && task instanceof CancellableTask) { @@ -99,6 +102,7 @@ public void processMessageReceived(Request request, TransportChannel channel) th unregisterTask = null; } finally { Releasables.close(unregisterTask); + contextToRestore.restore(); } } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/RecordingTaskManagerListener.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/RecordingTaskManagerListener.java index 7756eb12bb3f4..9bd44185baf24 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/RecordingTaskManagerListener.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/RecordingTaskManagerListener.java @@ -75,6 +75,9 @@ public synchronized void onTaskUnregistered(Task task) { @Override public void waitForTaskCompletion(Task task) {} + @Override + public void taskExecutionStarted(Task task, Boolean closeableInvoked) {} + public synchronized List> getEvents() { return Collections.unmodifiableList(new ArrayList<>(events)); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java new file mode 100644 index 0000000000000..23877ac0b7395 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java @@ -0,0 +1,633 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.node.tasks; + +import com.sun.management.ThreadMXBean; +import org.opensearch.ExceptionsHelper; +import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; +import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.opensearch.action.support.ActionTestUtils; +import org.opensearch.action.support.nodes.BaseNodeRequest; +import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.tasks.CancellableTask; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskCancelledException; +import org.opensearch.tasks.TaskId; +import org.opensearch.tasks.TaskInfo; +import org.opensearch.test.tasks.MockTaskManager; +import org.opensearch.test.tasks.MockTaskManagerListener; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; + +@SuppressForbidden(reason = "ThreadMXBean#getThreadAllocatedBytes") +public class ResourceAwareTasksTests extends TaskManagerTestCase { + + private static final ThreadMXBean threadMXBean = (ThreadMXBean) ManagementFactory.getThreadMXBean(); + + public static class ResourceAwareNodeRequest extends BaseNodeRequest { + protected String requestName; + + public ResourceAwareNodeRequest() { + super(); + } + + public ResourceAwareNodeRequest(StreamInput in) throws IOException { + super(in); + requestName = in.readString(); + } + + public ResourceAwareNodeRequest(NodesRequest request) { + requestName = request.requestName; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(requestName); + } + + @Override + public String getDescription() { + return "ResourceAwareNodeRequest[" + requestName + "]"; + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, getDescription(), parentTaskId, headers) { + @Override + public boolean shouldCancelChildrenOnCancellation() { + return false; + } + + @Override + public boolean supportsResourceTracking() { + return true; + } + }; + } + } + + public static class NodesRequest extends BaseNodesRequest { + private final String requestName; + + private NodesRequest(StreamInput in) throws IOException { + super(in); + requestName = in.readString(); + } + + public NodesRequest(String requestName, String... nodesIds) { + super(nodesIds); + this.requestName = requestName; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(requestName); + } + + @Override + public String getDescription() { + return "NodesRequest[" + requestName + "]"; + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, getDescription(), parentTaskId, headers) { + @Override + public boolean shouldCancelChildrenOnCancellation() { + return true; + } + }; + } + } + + /** + * Simulates a task which executes work on search executor. + */ + class ResourceAwareNodesAction extends AbstractTestNodesAction { + private final TaskTestContext taskTestContext; + private final boolean blockForCancellation; + + ResourceAwareNodesAction( + String actionName, + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + boolean shouldBlock, + TaskTestContext taskTestContext + ) { + super(actionName, threadPool, clusterService, transportService, NodesRequest::new, ResourceAwareNodeRequest::new); + this.taskTestContext = taskTestContext; + this.blockForCancellation = shouldBlock; + } + + @Override + protected ResourceAwareNodeRequest newNodeRequest(NodesRequest request) { + return new ResourceAwareNodeRequest(request); + } + + @Override + protected NodeResponse nodeOperation(ResourceAwareNodeRequest request, Task task) { + assert task.supportsResourceTracking(); + + AtomicLong threadId = new AtomicLong(); + Future result = threadPool.executor(ThreadPool.Names.SEARCH).submit(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + ExceptionsHelper.reThrowIfNotNull(e); + } + + @Override + @SuppressForbidden(reason = "ThreadMXBean#getThreadAllocatedBytes") + protected void doRun() { + taskTestContext.memoryConsumptionWhenExecutionStarts = threadMXBean.getThreadAllocatedBytes( + Thread.currentThread().getId() + ); + threadId.set(Thread.currentThread().getId()); + + if (taskTestContext.operationStartValidator != null) { + try { + taskTestContext.operationStartValidator.accept(threadId.get()); + } catch (AssertionError error) { + throw new RuntimeException(error); + } + } + + Object[] allocation1 = new Object[1000000]; // 4MB + + if (blockForCancellation) { + // Simulate a job that takes forever to finish + // Using periodic checks method to identify that the task was cancelled + try { + boolean taskCancelled = waitUntil(((CancellableTask) task)::isCancelled); + if (taskCancelled) { + throw new TaskCancelledException("Task Cancelled"); + } else { + fail("It should have thrown an exception"); + } + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + } + + } + + Object[] allocation2 = new Object[1000000]; // 4MB + } + }); + + try { + result.get(); + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException(e.getCause()); + } finally { + if (taskTestContext.operationFinishedValidator != null) { + taskTestContext.operationFinishedValidator.accept(threadId.get()); + } + } + + return new NodeResponse(clusterService.localNode()); + } + + @Override + protected NodeResponse nodeOperation(ResourceAwareNodeRequest request) { + throw new UnsupportedOperationException("the task parameter is required"); + } + } + + private TaskTestContext startResourceAwareNodesAction( + TestNode node, + boolean blockForCancellation, + TaskTestContext taskTestContext, + ActionListener listener + ) { + NodesRequest request = new NodesRequest("Test Request", node.getNodeId()); + + taskTestContext.requestCompleteLatch = new CountDownLatch(1); + + ResourceAwareNodesAction action = new ResourceAwareNodesAction( + "internal:resourceAction", + threadPool, + node.clusterService, + node.transportService, + blockForCancellation, + taskTestContext + ); + taskTestContext.mainTask = action.execute(request, listener); + return taskTestContext; + } + + private static class TaskTestContext { + private Task mainTask; + private CountDownLatch requestCompleteLatch; + private Consumer operationStartValidator; + private Consumer operationFinishedValidator; + private long memoryConsumptionWhenExecutionStarts; + } + + public void testBasicTaskResourceTracking() throws Exception { + setup(true, false); + + final AtomicReference throwableReference = new AtomicReference<>(); + final AtomicReference responseReference = new AtomicReference<>(); + TaskTestContext taskTestContext = new TaskTestContext(); + + Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); + + taskTestContext.operationStartValidator = threadId -> { + Task task = resourceTasks.values().stream().findAny().get(); + + // One thread is currently working on task but not finished + assertEquals(1, resourceTasks.size()); + assertEquals(1, task.getResourceStats().size()); + assertEquals(1, task.getResourceStats().get(threadId).size()); + assertTrue(task.getResourceStats().get(threadId).get(0).isActive()); + assertEquals(0, task.getTotalResourceStats().getCpuTimeInNanos()); + assertEquals(0, task.getTotalResourceStats().getMemoryInBytes()); + }; + + taskTestContext.operationFinishedValidator = threadId -> { + Task task = resourceTasks.values().stream().findAny().get(); + + // Thread has finished working on the task's runnable + assertEquals(1, resourceTasks.size()); + assertEquals(1, task.getResourceStats().size()); + assertEquals(1, task.getResourceStats().get(threadId).size()); + assertFalse(task.getResourceStats().get(threadId).get(0).isActive()); + + long expectedArrayAllocationOverhead = 2 * 4012688; // Task's memory overhead due to array allocations + long actualTaskMemoryOverhead = task.getTotalResourceStats().getMemoryInBytes(); + + assertTrue(actualTaskMemoryOverhead - expectedArrayAllocationOverhead < taskTestContext.memoryConsumptionWhenExecutionStarts); + assertTrue(task.getTotalResourceStats().getCpuTimeInNanos() > 0); + }; + + startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { + @Override + public void onResponse(NodesResponse listTasksResponse) { + responseReference.set(listTasksResponse); + taskTestContext.requestCompleteLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throwableReference.set(e); + taskTestContext.requestCompleteLatch.countDown(); + } + }); + + // Waiting for whole request to complete and return successfully till client + taskTestContext.requestCompleteLatch.await(); + + assertTasksRequestFinishedSuccessfully(resourceTasks.size(), responseReference.get(), throwableReference.get()); + } + + public void testTaskResourceTrackingDuringTaskCancellation() throws Exception { + setup(true, false); + + final AtomicReference throwableReference = new AtomicReference<>(); + final AtomicReference responseReference = new AtomicReference<>(); + TaskTestContext taskTestContext = new TaskTestContext(); + + Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); + + taskTestContext.operationStartValidator = threadId -> { + Task task = resourceTasks.values().stream().findAny().get(); + + // One thread is currently working on task but not finished + assertEquals(1, resourceTasks.size()); + assertEquals(1, task.getResourceStats().size()); + assertEquals(1, task.getResourceStats().get(threadId).size()); + assertTrue(task.getResourceStats().get(threadId).get(0).isActive()); + assertEquals(0, task.getTotalResourceStats().getCpuTimeInNanos()); + assertEquals(0, task.getTotalResourceStats().getMemoryInBytes()); + }; + + taskTestContext.operationFinishedValidator = threadId -> { + Task task = resourceTasks.values().stream().findAny().get(); + + // Thread has finished working on the task's runnable + assertEquals(1, resourceTasks.size()); + assertEquals(1, task.getResourceStats().size()); + assertEquals(1, task.getResourceStats().get(threadId).size()); + assertFalse(task.getResourceStats().get(threadId).get(0).isActive()); + + // allocations are completed before the task is cancelled + long expectedArrayAllocationOverhead = 4012688; // Task's memory overhead due to array allocations + long taskCancellationOverhead = 30000; // Task cancellation overhead ~ 30Kb + long actualTaskMemoryOverhead = task.getTotalResourceStats().getMemoryInBytes(); + + long expectedOverhead = expectedArrayAllocationOverhead + taskCancellationOverhead; + assertTrue(actualTaskMemoryOverhead - expectedOverhead < taskTestContext.memoryConsumptionWhenExecutionStarts); + assertTrue(task.getTotalResourceStats().getCpuTimeInNanos() > 0); + }; + + startResourceAwareNodesAction(testNodes[0], true, taskTestContext, new ActionListener() { + @Override + public void onResponse(NodesResponse listTasksResponse) { + responseReference.set(listTasksResponse); + taskTestContext.requestCompleteLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throwableReference.set(e); + taskTestContext.requestCompleteLatch.countDown(); + } + }); + + // Cancel main task + CancelTasksRequest request = new CancelTasksRequest(); + request.setReason("Cancelling request to verify Task resource tracking behaviour"); + request.setTaskId(new TaskId(testNodes[0].getNodeId(), taskTestContext.mainTask.getId())); + ActionTestUtils.executeBlocking(testNodes[0].transportCancelTasksAction, request); + + // Waiting for whole request to complete and return successfully till client + taskTestContext.requestCompleteLatch.await(); + + assertEquals(0, resourceTasks.size()); + assertNull(throwableReference.get()); + assertNotNull(responseReference.get()); + assertEquals(1, responseReference.get().failureCount()); + assertEquals(TaskCancelledException.class, findActualException(responseReference.get().failures().get(0)).getClass()); + } + + public void testTaskResourceTrackingDisabled() throws Exception { + setup(false, false); + + final AtomicReference throwableReference = new AtomicReference<>(); + final AtomicReference responseReference = new AtomicReference<>(); + TaskTestContext taskTestContext = new TaskTestContext(); + + Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); + + taskTestContext.operationStartValidator = threadId -> { assertEquals(0, resourceTasks.size()); }; + + taskTestContext.operationFinishedValidator = threadId -> { assertEquals(0, resourceTasks.size()); }; + + startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { + @Override + public void onResponse(NodesResponse listTasksResponse) { + responseReference.set(listTasksResponse); + taskTestContext.requestCompleteLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throwableReference.set(e); + taskTestContext.requestCompleteLatch.countDown(); + } + }); + + // Waiting for whole request to complete and return successfully till client + taskTestContext.requestCompleteLatch.await(); + + assertTasksRequestFinishedSuccessfully(resourceTasks.size(), responseReference.get(), throwableReference.get()); + } + + public void testTaskResourceTrackingDisabledWhileTaskInProgress() throws Exception { + setup(true, false); + + final AtomicReference throwableReference = new AtomicReference<>(); + final AtomicReference responseReference = new AtomicReference<>(); + TaskTestContext taskTestContext = new TaskTestContext(); + + Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); + + taskTestContext.operationStartValidator = threadId -> { + Task task = resourceTasks.values().stream().findAny().get(); + // One thread is currently working on task but not finished + assertEquals(1, resourceTasks.size()); + assertEquals(1, task.getResourceStats().size()); + assertEquals(1, task.getResourceStats().get(threadId).size()); + assertTrue(task.getResourceStats().get(threadId).get(0).isActive()); + assertEquals(0, task.getTotalResourceStats().getCpuTimeInNanos()); + assertEquals(0, task.getTotalResourceStats().getMemoryInBytes()); + + testNodes[0].taskResourceTrackingService.setTaskResourceTrackingEnabled(false); + }; + + taskTestContext.operationFinishedValidator = threadId -> { + Task task = resourceTasks.values().stream().findAny().get(); + // Thread has finished working on the task's runnable + assertEquals(1, resourceTasks.size()); + assertEquals(1, task.getResourceStats().size()); + assertEquals(1, task.getResourceStats().get(threadId).size()); + assertFalse(task.getResourceStats().get(threadId).get(0).isActive()); + + long expectedArrayAllocationOverhead = 2 * 4012688; // Task's memory overhead due to array allocations + long actualTaskMemoryOverhead = task.getTotalResourceStats().getMemoryInBytes(); + + assertTrue(actualTaskMemoryOverhead - expectedArrayAllocationOverhead < taskTestContext.memoryConsumptionWhenExecutionStarts); + assertTrue(task.getTotalResourceStats().getCpuTimeInNanos() > 0); + }; + + startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { + @Override + public void onResponse(NodesResponse listTasksResponse) { + responseReference.set(listTasksResponse); + taskTestContext.requestCompleteLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throwableReference.set(e); + taskTestContext.requestCompleteLatch.countDown(); + } + }); + + // Waiting for whole request to complete and return successfully till client + taskTestContext.requestCompleteLatch.await(); + + assertTasksRequestFinishedSuccessfully(resourceTasks.size(), responseReference.get(), throwableReference.get()); + } + + public void testTaskResourceTrackingEnabledWhileTaskInProgress() throws Exception { + setup(false, false); + + final AtomicReference throwableReference = new AtomicReference<>(); + final AtomicReference responseReference = new AtomicReference<>(); + TaskTestContext taskTestContext = new TaskTestContext(); + + Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); + + taskTestContext.operationStartValidator = threadId -> { + assertEquals(0, resourceTasks.size()); + + testNodes[0].taskResourceTrackingService.setTaskResourceTrackingEnabled(true); + }; + + taskTestContext.operationFinishedValidator = threadId -> { assertEquals(0, resourceTasks.size()); }; + + startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { + @Override + public void onResponse(NodesResponse listTasksResponse) { + responseReference.set(listTasksResponse); + taskTestContext.requestCompleteLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throwableReference.set(e); + taskTestContext.requestCompleteLatch.countDown(); + } + }); + + // Waiting for whole request to complete and return successfully till client + taskTestContext.requestCompleteLatch.await(); + + assertTasksRequestFinishedSuccessfully(resourceTasks.size(), responseReference.get(), throwableReference.get()); + } + + public void testOnDemandRefreshWhileFetchingTasks() throws InterruptedException { + setup(true, false); + + final AtomicReference throwableReference = new AtomicReference<>(); + final AtomicReference responseReference = new AtomicReference<>(); + + TaskTestContext taskTestContext = new TaskTestContext(); + + Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); + + taskTestContext.operationStartValidator = threadId -> { + ListTasksResponse listTasksResponse = ActionTestUtils.executeBlocking( + testNodes[0].transportListTasksAction, + new ListTasksRequest().setActions("internal:resourceAction*").setDetailed(true) + ); + + TaskInfo taskInfo = listTasksResponse.getTasks().get(1); + + assertNotNull(taskInfo.getResourceStats()); + assertNotNull(taskInfo.getResourceStats().getResourceUsageInfo()); + assertTrue(taskInfo.getResourceStats().getResourceUsageInfo().get("total").getCpuTimeInNanos() > 0); + assertTrue(taskInfo.getResourceStats().getResourceUsageInfo().get("total").getMemoryInBytes() > 0); + }; + + startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { + @Override + public void onResponse(NodesResponse listTasksResponse) { + responseReference.set(listTasksResponse); + taskTestContext.requestCompleteLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throwableReference.set(e); + taskTestContext.requestCompleteLatch.countDown(); + } + }); + + // Waiting for whole request to complete and return successfully till client + taskTestContext.requestCompleteLatch.await(); + + assertTasksRequestFinishedSuccessfully(resourceTasks.size(), responseReference.get(), throwableReference.get()); + } + + public void testTaskIdPersistsInThreadContext() throws InterruptedException { + setup(true, true); + + final List taskIdsAddedToThreadContext = new ArrayList<>(); + final List taskIdsRemovedFromThreadContext = new ArrayList<>(); + AtomicLong actualTaskIdInThreadContext = new AtomicLong(-1); + AtomicLong expectedTaskIdInThreadContext = new AtomicLong(-2); + + ((MockTaskManager) testNodes[0].transportService.getTaskManager()).addListener(new MockTaskManagerListener() { + @Override + public void waitForTaskCompletion(Task task) {} + + @Override + public void taskExecutionStarted(Task task, Boolean closeableInvoked) { + if (closeableInvoked) { + taskIdsRemovedFromThreadContext.add(task.getId()); + } else { + taskIdsAddedToThreadContext.add(task.getId()); + } + } + + @Override + public void onTaskRegistered(Task task) {} + + @Override + public void onTaskUnregistered(Task task) { + if (task.getAction().equals("internal:resourceAction[n]")) { + expectedTaskIdInThreadContext.set(task.getId()); + actualTaskIdInThreadContext.set(threadPool.getThreadContext().getTransient(TASK_ID)); + } + } + }); + + TaskTestContext taskTestContext = new TaskTestContext(); + startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { + @Override + public void onResponse(NodesResponse listTasksResponse) { + taskTestContext.requestCompleteLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + taskTestContext.requestCompleteLatch.countDown(); + } + }); + + taskTestContext.requestCompleteLatch.await(); + + assertEquals(expectedTaskIdInThreadContext.get(), actualTaskIdInThreadContext.get()); + assertThat(taskIdsAddedToThreadContext, containsInAnyOrder(taskIdsRemovedFromThreadContext.toArray())); + } + + private void setup(boolean resourceTrackingEnabled, boolean useMockTaskManager) { + Settings settings = Settings.builder() + .put("task_resource_tracking.enabled", resourceTrackingEnabled) + .put(MockTaskManager.USE_MOCK_TASK_MANAGER_SETTING.getKey(), useMockTaskManager) + .build(); + setupTestNodes(settings); + connectNodes(testNodes[0]); + + runnableTaskListener.set(testNodes[0].taskResourceTrackingService); + } + + private Throwable findActualException(Exception e) { + Throwable throwable = e.getCause(); + while (throwable.getCause() != null) { + throwable = throwable.getCause(); + } + return throwable; + } + + private void assertTasksRequestFinishedSuccessfully(int activeResourceTasks, NodesResponse nodesResponse, Throwable throwable) { + assertEquals(0, activeResourceTasks); + assertNull(throwable); + assertNotNull(nodesResponse); + assertEquals(0, nodesResponse.failureCount()); + } + +} diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index c8411b31e0709..51fc5d80f2de3 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -59,8 +59,10 @@ import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.tasks.TaskCancellationService; import org.opensearch.tasks.TaskManager; +import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.tasks.MockTaskManager; +import org.opensearch.threadpool.RunnableTaskExecutionListener; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -74,6 +76,7 @@ import java.util.List; import java.util.Set; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import static java.util.Collections.emptyMap; @@ -89,10 +92,12 @@ public abstract class TaskManagerTestCase extends OpenSearchTestCase { protected ThreadPool threadPool; protected TestNode[] testNodes; protected int nodesCount; + protected AtomicReference runnableTaskListener; @Before public void setupThreadPool() { - threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName()); + runnableTaskListener = new AtomicReference<>(); + threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName(), runnableTaskListener); } public void setupTestNodes(Settings settings) { @@ -225,14 +230,22 @@ protected TaskManager createTaskManager(Settings settings, ThreadPool threadPool transportService.start(); clusterService = createClusterService(threadPool, discoveryNode.get()); clusterService.addStateApplier(transportService.getTaskManager()); + taskResourceTrackingService = new TaskResourceTrackingService(settings, clusterService.getClusterSettings(), threadPool); + transportService.getTaskManager().setTaskResourceTrackingService(taskResourceTrackingService); ActionFilters actionFilters = new ActionFilters(emptySet()); - transportListTasksAction = new TransportListTasksAction(clusterService, transportService, actionFilters); + transportListTasksAction = new TransportListTasksAction( + clusterService, + transportService, + actionFilters, + taskResourceTrackingService + ); transportCancelTasksAction = new TransportCancelTasksAction(clusterService, transportService, actionFilters); transportService.acceptIncomingRequests(); } public final ClusterService clusterService; public final TransportService transportService; + public final TaskResourceTrackingService taskResourceTrackingService; private final SetOnce discoveryNode = new SetOnce<>(); public final TransportListTasksAction transportListTasksAction; public final TransportCancelTasksAction transportCancelTasksAction; diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java index 4b98870422ce8..202f1b7dcb5b4 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java @@ -91,6 +91,7 @@ import static java.util.Collections.emptyMap; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Answers.RETURNS_MOCKS; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.anyString; @@ -224,7 +225,7 @@ public void setupAction() { remoteResponseHandler = ArgumentCaptor.forClass(TransportResponseHandler.class); // setup services that will be called by action - transportService = mock(TransportService.class); + transportService = mock(TransportService.class, RETURNS_MOCKS); clusterService = mock(ClusterService.class); localIngest = true; // setup nodes for local and remote diff --git a/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java b/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java index 9c70accaca3e4..64286e47b4966 100644 --- a/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java +++ b/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java @@ -48,6 +48,7 @@ import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.sameInstance; +import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; public class ThreadContextTests extends OpenSearchTestCase { @@ -154,6 +155,15 @@ public void testNewContextWithClearedTransients() { assertEquals(1, threadContext.getResponseHeaders().get("baz").size()); } + public void testStashContextWithPreservedTransients() { + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + threadContext.putTransient("foo", "bar"); + threadContext.putTransient(TASK_ID, 1); + threadContext.stashContext(); + assertNull(threadContext.getTransient("foo")); + assertEquals(1, (int) threadContext.getTransient(TASK_ID)); + } + public void testStashWithOrigin() { final String origin = randomAlphaOfLengthBetween(4, 16); final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index a896aab0f70c9..5f303bc774930 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -198,6 +198,7 @@ import org.opensearch.search.fetch.FetchPhase; import org.opensearch.search.query.QueryPhase; import org.opensearch.snapshots.mockstore.MockEventuallyConsistentRepository; +import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.disruption.DisruptableMockTransport; import org.opensearch.threadpool.ThreadPool; @@ -1738,6 +1739,8 @@ public void onFailure(final Exception e) { final IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver( new ThreadContext(Settings.EMPTY) ); + transportService.getTaskManager() + .setTaskResourceTrackingService(new TaskResourceTrackingService(settings, clusterSettings, threadPool)); repositoriesService = new RepositoriesService( settings, clusterService, diff --git a/server/src/test/java/org/opensearch/tasks/TaskManagerTests.java b/server/src/test/java/org/opensearch/tasks/TaskManagerTests.java index 0f09b0de34206..ab49109eb8247 100644 --- a/server/src/test/java/org/opensearch/tasks/TaskManagerTests.java +++ b/server/src/test/java/org/opensearch/tasks/TaskManagerTests.java @@ -40,6 +40,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.RunnableTaskExecutionListener; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.FakeTcpChannel; @@ -59,6 +60,7 @@ import java.util.Set; import java.util.concurrent.Phaser; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; @@ -67,10 +69,12 @@ public class TaskManagerTests extends OpenSearchTestCase { private ThreadPool threadPool; + private AtomicReference runnableTaskListener; @Before public void setupThreadPool() { - threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName()); + runnableTaskListener = new AtomicReference<>(); + threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName(), runnableTaskListener); } @After diff --git a/server/src/test/java/org/opensearch/tasks/TaskResourceTrackingServiceTests.java b/server/src/test/java/org/opensearch/tasks/TaskResourceTrackingServiceTests.java new file mode 100644 index 0000000000000..8ba23c5d3219c --- /dev/null +++ b/server/src/test/java/org/opensearch/tasks/TaskResourceTrackingServiceTests.java @@ -0,0 +1,97 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks; + +import org.junit.After; +import org.junit.Before; +import org.opensearch.action.admin.cluster.node.tasks.TransportTasksActionTests; +import org.opensearch.action.search.SearchTask; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import java.util.HashMap; +import java.util.concurrent.atomic.AtomicReference; + +import static org.opensearch.tasks.ResourceStats.MEMORY; +import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; + +public class TaskResourceTrackingServiceTests extends OpenSearchTestCase { + + private ThreadPool threadPool; + private TaskResourceTrackingService taskResourceTrackingService; + + @Before + public void setup() { + threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName(), new AtomicReference<>()); + taskResourceTrackingService = new TaskResourceTrackingService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + } + + @After + public void terminateThreadPool() { + terminate(threadPool); + } + + public void testThreadContextUpdateOnTrackingStart() { + taskResourceTrackingService.setTaskResourceTrackingEnabled(true); + + Task task = new SearchTask(1, "test", "test", () -> "Test", TaskId.EMPTY_TASK_ID, new HashMap<>()); + + String key = "KEY"; + String value = "VALUE"; + + // Prepare thread context + threadPool.getThreadContext().putHeader(key, value); + threadPool.getThreadContext().putTransient(key, value); + threadPool.getThreadContext().addResponseHeader(key, value); + + ThreadContext.StoredContext storedContext = taskResourceTrackingService.startTracking(task); + + // All headers should be preserved and Task Id should also be included in thread context + verifyThreadContextFixedHeaders(key, value); + assertEquals((long) threadPool.getThreadContext().getTransient(TASK_ID), task.getId()); + + storedContext.restore(); + + // Post restore only task id should be removed from the thread context + verifyThreadContextFixedHeaders(key, value); + assertNull(threadPool.getThreadContext().getTransient(TASK_ID)); + } + + public void testStopTrackingHandlesCurrentActiveThread() { + taskResourceTrackingService.setTaskResourceTrackingEnabled(true); + Task task = new SearchTask(1, "test", "test", () -> "Test", TaskId.EMPTY_TASK_ID, new HashMap<>()); + ThreadContext.StoredContext storedContext = taskResourceTrackingService.startTracking(task); + long threadId = Thread.currentThread().getId(); + taskResourceTrackingService.taskExecutionStartedOnThread(task.getId(), threadId); + + assertTrue(task.getResourceStats().get(threadId).get(0).isActive()); + assertEquals(0, task.getResourceStats().get(threadId).get(0).getResourceUsageInfo().getStatsInfo().get(MEMORY).getTotalValue()); + + taskResourceTrackingService.stopTracking(task); + + // Makes sure stop tracking marks the current active thread inactive and refreshes the resource stats before returning. + assertFalse(task.getResourceStats().get(threadId).get(0).isActive()); + assertTrue(task.getResourceStats().get(threadId).get(0).getResourceUsageInfo().getStatsInfo().get(MEMORY).getTotalValue() > 0); + } + + private void verifyThreadContextFixedHeaders(String key, String value) { + assertEquals(threadPool.getThreadContext().getHeader(key), value); + assertEquals(threadPool.getThreadContext().getTransient(key), value); + assertEquals(threadPool.getThreadContext().getResponseHeaders().get(key).get(0), value); + } + +} diff --git a/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManager.java b/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManager.java index e60871f67ea54..677ec7a0a6600 100644 --- a/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManager.java +++ b/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManager.java @@ -39,6 +39,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskAwareRequest; import org.opensearch.tasks.TaskManager; @@ -127,6 +128,21 @@ public void waitForTaskCompletion(Task task, long untilInNanos) { super.waitForTaskCompletion(task, untilInNanos); } + @Override + public ThreadContext.StoredContext taskExecutionStarted(Task task) { + for (MockTaskManagerListener listener : listeners) { + listener.taskExecutionStarted(task, false); + } + + ThreadContext.StoredContext storedContext = super.taskExecutionStarted(task); + return () -> { + for (MockTaskManagerListener listener : listeners) { + listener.taskExecutionStarted(task, true); + } + storedContext.restore(); + }; + } + public void addListener(MockTaskManagerListener listener) { listeners.add(listener); } diff --git a/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManagerListener.java b/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManagerListener.java index eb8361ac552fc..f15f878995aa2 100644 --- a/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManagerListener.java +++ b/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManagerListener.java @@ -43,4 +43,7 @@ public interface MockTaskManagerListener { void onTaskUnregistered(Task task); void waitForTaskCompletion(Task task); + + void taskExecutionStarted(Task task, Boolean closeableInvoked); + } diff --git a/test/framework/src/main/java/org/opensearch/threadpool/TestThreadPool.java b/test/framework/src/main/java/org/opensearch/threadpool/TestThreadPool.java index 5f8611d99f0a0..2d97d5bffee01 100644 --- a/test/framework/src/main/java/org/opensearch/threadpool/TestThreadPool.java +++ b/test/framework/src/main/java/org/opensearch/threadpool/TestThreadPool.java @@ -40,6 +40,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.atomic.AtomicReference; public class TestThreadPool extends ThreadPool { @@ -47,12 +48,29 @@ public class TestThreadPool extends ThreadPool { private volatile boolean returnRejectingExecutor = false; private volatile ThreadPoolExecutor rejectingExecutor; + public TestThreadPool( + String name, + AtomicReference runnableTaskListener, + ExecutorBuilder... customBuilders + ) { + this(name, Settings.EMPTY, runnableTaskListener, customBuilders); + } + public TestThreadPool(String name, ExecutorBuilder... customBuilders) { this(name, Settings.EMPTY, customBuilders); } public TestThreadPool(String name, Settings settings, ExecutorBuilder... customBuilders) { - super(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), name).put(settings).build(), customBuilders); + this(name, settings, null, customBuilders); + } + + public TestThreadPool( + String name, + Settings settings, + AtomicReference runnableTaskListener, + ExecutorBuilder... customBuilders + ) { + super(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), name).put(settings).build(), runnableTaskListener, customBuilders); } @Override From 03fbca3f500d8541b4b32c1456997a8493ebe4f5 Mon Sep 17 00:00:00 2001 From: Peng Huo Date: Thu, 21 Apr 2022 07:06:33 -0700 Subject: [PATCH 110/653] Add new multi_term aggregation (#2687) Adds a new multi_term aggregation. The current implementation focuses on adding new type aggregates. Performance (latency) is suboptimal in this iteration, mainly because of brute force encoding/decoding a list of values into bucket keys. A performance improvement change will be made as a follow on. Signed-off-by: Peng Huo --- .../client/RestHighLevelClient.java | 3 + .../search.aggregation/370_multi_terms.yml | 620 ++++++++++++ .../aggregations/bucket/MultiTermsIT.java | 167 ++++ .../bucket/terms/BaseStringTermsTestCase.java | 256 +++++ .../bucket/terms/StringTermsIT.java | 239 +---- .../org/opensearch/search/SearchModule.java | 9 + .../aggregations/AggregationBuilders.java | 8 + .../bucket/terms/InternalMultiTerms.java | 440 +++++++++ .../bucket/terms/InternalTerms.java | 59 +- .../terms/MultiTermsAggregationBuilder.java | 443 +++++++++ .../terms/MultiTermsAggregationFactory.java | 163 ++++ .../bucket/terms/MultiTermsAggregator.java | 438 +++++++++ .../bucket/terms/ParsedMultiTerms.java | 77 ++ .../bucket/terms/ParsedTerms.java | 7 +- .../BaseMultiValuesSourceFieldConfig.java | 216 +++++ .../support/MultiTermsValuesSourceConfig.java | 203 ++++ .../support/MultiValuesSourceFieldConfig.java | 160 +-- .../aggregations/AggregationsTests.java | 2 + .../bucket/terms/InternalMultiTermsTests.java | 116 +++ .../MultiTermsAggregationBuilderTests.java | 182 ++++ .../terms/MultiTermsAggregatorTests.java | 909 ++++++++++++++++++ .../MultiTermsValuesSourceConfigTests.java | 65 ++ .../test/InternalAggregationTestCase.java | 3 + 23 files changed, 4378 insertions(+), 407 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/370_multi_terms.yml create mode 100644 server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java create mode 100644 server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/BaseStringTermsTestCase.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTerms.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationBuilder.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationFactory.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/terms/ParsedMultiTerms.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/support/BaseMultiValuesSourceFieldConfig.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/support/MultiTermsValuesSourceConfig.java create mode 100644 server/src/test/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTermsTests.java create mode 100644 server/src/test/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationBuilderTests.java create mode 100644 server/src/test/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregatorTests.java create mode 100644 server/src/test/java/org/opensearch/search/aggregations/support/MultiTermsValuesSourceConfigTests.java diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java index 3eebb361fd9c4..e69ca149d697d 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java @@ -139,7 +139,9 @@ import org.opensearch.search.aggregations.bucket.sampler.InternalSampler; import org.opensearch.search.aggregations.bucket.sampler.ParsedSampler; import org.opensearch.search.aggregations.bucket.terms.LongRareTerms; +import org.opensearch.search.aggregations.bucket.terms.MultiTermsAggregationBuilder; import org.opensearch.search.aggregations.bucket.terms.ParsedLongRareTerms; +import org.opensearch.search.aggregations.bucket.terms.ParsedMultiTerms; import org.opensearch.search.aggregations.bucket.terms.ParsedSignificantLongTerms; import org.opensearch.search.aggregations.bucket.terms.ParsedSignificantStringTerms; import org.opensearch.search.aggregations.bucket.terms.ParsedStringRareTerms; @@ -2140,6 +2142,7 @@ static List getDefaultNamedXContents() { map.put(IpRangeAggregationBuilder.NAME, (p, c) -> ParsedBinaryRange.fromXContent(p, (String) c)); map.put(TopHitsAggregationBuilder.NAME, (p, c) -> ParsedTopHits.fromXContent(p, (String) c)); map.put(CompositeAggregationBuilder.NAME, (p, c) -> ParsedComposite.fromXContent(p, (String) c)); + map.put(MultiTermsAggregationBuilder.NAME, (p, c) -> ParsedMultiTerms.fromXContent(p, (String) c)); List entries = map.entrySet() .stream() .map(entry -> new NamedXContentRegistry.Entry(Aggregation.class, new ParseField(entry.getKey()), entry.getValue())) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/370_multi_terms.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/370_multi_terms.yml new file mode 100644 index 0000000000000..a0e4762ea9b53 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/370_multi_terms.yml @@ -0,0 +1,620 @@ +setup: + - do: + indices.create: + index: test_1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + str: + type: keyword + ip: + type: ip + boolean: + type: boolean + integer: + type: long + double: + type: double + number: + type: long + date: + type: date + + - do: + indices.create: + index: test_2 + body: + settings: + number_of_shards: 2 + number_of_replicas: 0 + mappings: + properties: + str: + type: keyword + integer: + type: long + boolean: + type: boolean + + - do: + cluster.health: + wait_for_status: green + +--- +"Basic test": + - skip: + version: "- 2.9.99" + reason: multi_terms aggregation is introduced in 3.0.0 + + - do: + bulk: + index: test_1 + refresh: true + body: + - '{"index": {}}' + - '{"str": "a", "integer": 1}' + - '{"index": {}}' + - '{"str": "a", "integer": 2}' + - '{"index": {}}' + - '{"str": "b", "integer": 1}' + - '{"index": {}}' + - '{"str": "b", "integer": 2}' + - '{"index": {}}' + - '{"str": "a", "integer": 1}' + - '{"index": {}}' + - '{"str": "b", "integer": 1}' + + - do: + search: + index: test_1 + size: 0 + body: + aggs: + m_terms: + multi_terms: + terms: + - field: str + - field: integer + + - length: { aggregations.m_terms.buckets: 4 } + - match: { aggregations.m_terms.buckets.0.key: ["a", 1] } + - match: { aggregations.m_terms.buckets.0.key_as_string: "a|1" } + - match: { aggregations.m_terms.buckets.0.doc_count: 2 } + - match: { aggregations.m_terms.buckets.1.key: ["b", 1] } + - match: { aggregations.m_terms.buckets.1.key_as_string: "b|1" } + - match: { aggregations.m_terms.buckets.1.doc_count: 2 } + - match: { aggregations.m_terms.buckets.2.key: ["a", 2] } + - match: { aggregations.m_terms.buckets.2.key_as_string: "a|2" } + - match: { aggregations.m_terms.buckets.2.doc_count: 1 } + - match: { aggregations.m_terms.buckets.3.key: ["b", 2] } + - match: { aggregations.m_terms.buckets.3.key_as_string: "b|2" } + - match: { aggregations.m_terms.buckets.3.doc_count: 1 } + +--- +"IP test": + - skip: + version: "- 2.9.99" + reason: multi_terms aggregation is introduced in 3.0.0 + + - do: + bulk: + index: test_1 + refresh: true + body: + - '{"index": {}}' + - '{"str": "a", "ip": "::1"}' + - '{"index": {}}' + - '{"str": "a", "ip": "127.0.0.1"}' + - '{"index": {}}' + - '{"str": "b", "ip": "::1"}' + - '{"index": {}}' + - '{"str": "b", "ip": "127.0.0.1"}' + - '{"index": {}}' + - '{"str": "a", "ip": "127.0.0.1"}' + - '{"index": {}}' + - '{"str": "b", "ip": "::1"}' + + - do: + search: + index: test_1 + size: 0 + body: + aggs: + m_terms: + multi_terms: + terms: + - field: str + - field: ip + + - length: { aggregations.m_terms.buckets: 4 } + - match: { aggregations.m_terms.buckets.0.key: ["a", "127.0.0.1"] } + - match: { aggregations.m_terms.buckets.0.key_as_string: "a|127.0.0.1" } + - match: { aggregations.m_terms.buckets.0.doc_count: 2 } + - match: { aggregations.m_terms.buckets.1.key: ["b", "::1"] } + - match: { aggregations.m_terms.buckets.1.key_as_string: "b|::1" } + - match: { aggregations.m_terms.buckets.1.doc_count: 2 } + - match: { aggregations.m_terms.buckets.2.key: ["a", "::1"] } + - match: { aggregations.m_terms.buckets.2.key_as_string: "a|::1" } + - match: { aggregations.m_terms.buckets.2.doc_count: 1 } + - match: { aggregations.m_terms.buckets.3.key: ["b", "127.0.0.1"] } + - match: { aggregations.m_terms.buckets.3.key_as_string: "b|127.0.0.1" } + - match: { aggregations.m_terms.buckets.3.doc_count: 1 } + +--- +"Boolean test": + - skip: + version: "- 2.9.99" + reason: multi_terms aggregation is introduced in 3.0.0 + + - do: + bulk: + index: test_1 + refresh: true + body: + - '{"index": {}}' + - '{"str": "a", "boolean": true}' + - '{"index": {}}' + - '{"str": "a", "boolean": false}' + - '{"index": {}}' + - '{"str": "b", "boolean": false}' + - '{"index": {}}' + - '{"str": "b", "boolean": true}' + - '{"index": {}}' + - '{"str": "a", "boolean": true}' + - '{"index": {}}' + - '{"str": "b", "boolean": false}' + + - do: + search: + index: test_1 + size: 0 + body: + aggs: + m_terms: + multi_terms: + terms: + - field: str + - field: boolean + + - length: { aggregations.m_terms.buckets: 4 } + - match: { aggregations.m_terms.buckets.0.key: ["a", true] } + - match: { aggregations.m_terms.buckets.0.key_as_string: "a|true" } + - match: { aggregations.m_terms.buckets.0.doc_count: 2 } + - match: { aggregations.m_terms.buckets.1.key: ["b", false] } + - match: { aggregations.m_terms.buckets.1.key_as_string: "b|false" } + - match: { aggregations.m_terms.buckets.1.doc_count: 2 } + - match: { aggregations.m_terms.buckets.2.key: ["a", false] } + - match: { aggregations.m_terms.buckets.2.key_as_string: "a|false" } + - match: { aggregations.m_terms.buckets.2.doc_count: 1 } + - match: { aggregations.m_terms.buckets.3.key: ["b", true] } + - match: { aggregations.m_terms.buckets.3.key_as_string: "b|true" } + - match: { aggregations.m_terms.buckets.3.doc_count: 1 } + +--- +"Double test": + - skip: + version: "- 2.9.99" + reason: multi_terms aggregation is introduced in 3.0.0 + + - do: + bulk: + index: test_1 + refresh: true + body: + - '{"index": {}}' + - '{"str": "a", "double": 1234.5}' + - '{"index": {}}' + - '{"str": "a", "double": 5678.5}' + - '{"index": {}}' + - '{"str": "b", "double": 1234.5}' + - '{"index": {}}' + - '{"str": "a", "double": 1234.5}' + + - do: + search: + index: test_1 + size: 0 + body: + aggs: + m_terms: + multi_terms: + terms: + - field: str + - field: double + + - length: { aggregations.m_terms.buckets: 3 } + - match: { aggregations.m_terms.buckets.0.key: ["a", 1234.5] } + - match: { aggregations.m_terms.buckets.0.key_as_string: "a|1234.5" } + - match: { aggregations.m_terms.buckets.0.doc_count: 2 } + - match: { aggregations.m_terms.buckets.1.key: ["a", 5678.5] } + - match: { aggregations.m_terms.buckets.1.key_as_string: "a|5678.5" } + - match: { aggregations.m_terms.buckets.1.doc_count: 1 } + - match: { aggregations.m_terms.buckets.2.key: ["b", 1234.5] } + - match: { aggregations.m_terms.buckets.2.key_as_string: "b|1234.5" } + - match: { aggregations.m_terms.buckets.2.doc_count: 1 } + +--- +"Date test": + - skip: + version: "- 2.9.99" + reason: multi_terms aggregation is introduced in 3.0.0 + + - do: + bulk: + index: test_1 + refresh: true + body: + - '{"index": {}}' + - '{"str": "a", "date": "2022-03-23"}' + - '{"index": {}}' + - '{"str": "a", "date": "2022-03-25"}' + - '{"index": {}}' + - '{"str": "b", "date": "2022-03-23"}' + - '{"index": {}}' + - '{"str": "a", "date": "2022-03-23"}' + + - do: + search: + index: test_1 + size: 0 + body: + aggs: + m_terms: + multi_terms: + terms: + - field: str + - field: date + + - length: { aggregations.m_terms.buckets: 3 } + - match: { aggregations.m_terms.buckets.0.key: ["a", "2022-03-23T00:00:00.000Z"] } + - match: { aggregations.m_terms.buckets.0.key_as_string: "a|2022-03-23T00:00:00.000Z" } + - match: { aggregations.m_terms.buckets.0.doc_count: 2 } + - match: { aggregations.m_terms.buckets.1.key: ["a", "2022-03-25T00:00:00.000Z"] } + - match: { aggregations.m_terms.buckets.1.key_as_string: "a|2022-03-25T00:00:00.000Z" } + - match: { aggregations.m_terms.buckets.1.doc_count: 1 } + - match: { aggregations.m_terms.buckets.2.key: ["b", "2022-03-23T00:00:00.000Z"] } + - match: { aggregations.m_terms.buckets.2.key_as_string: "b|2022-03-23T00:00:00.000Z" } + - match: { aggregations.m_terms.buckets.2.doc_count: 1 } + +--- +"Unmapped keywords": + - skip: + version: "- 2.9.99" + reason: multi_terms aggregation is introduced in 3.0.0 + + - do: + bulk: + index: test_1 + refresh: true + body: + - '{"index": {}}' + - '{"str": "a", "integer": 1}' + - '{"index": {}}' + - '{"str": "a", "integer": 2}' + - '{"index": {}}' + - '{"str": "b", "integer": 1}' + + - do: + search: + index: test_1 + size: 0 + body: + aggs: + m_terms: + multi_terms: + terms: + - field: str + - field: unmapped_string + value_type: string + missing: abc + + - length: { aggregations.m_terms.buckets: 2 } + - match: { aggregations.m_terms.buckets.0.key: ["a", "abc"] } + - match: { aggregations.m_terms.buckets.0.key_as_string: "a|abc" } + - match: { aggregations.m_terms.buckets.0.doc_count: 2 } + - match: { aggregations.m_terms.buckets.1.key: ["b", "abc"] } + - match: { aggregations.m_terms.buckets.1.key_as_string: "b|abc" } + - match: { aggregations.m_terms.buckets.1.doc_count: 1 } + +--- +"Null value": + - skip: + version: "- 2.9.99" + reason: multi_terms aggregation is introduced in 3.0.0 + + - do: + bulk: + index: test_1 + refresh: true + body: + - '{"index": {}}' + - '{"str": "a", "integer": null}' + - '{"index": {}}' + - '{"str": "a", "integer": 2}' + - '{"index": {}}' + - '{"str": null, "integer": 1}' + + - do: + search: + index: test_1 + size: 0 + body: + aggs: + m_terms: + multi_terms: + terms: + - field: str + - field: integer + + - length: { aggregations.m_terms.buckets: 1 } + - match: { aggregations.m_terms.buckets.0.key: ["a", 2] } + - match: { aggregations.m_terms.buckets.0.key_as_string: "a|2" } + - match: { aggregations.m_terms.buckets.0.doc_count: 1 } + +--- +"multiple multi_terms bucket": + - skip: + version: "- 2.9.99" + reason: multi_terms aggregation is introduced in 3.0.0 + + - do: + bulk: + index: test_1 + refresh: true + body: + - '{"index": {}}' + - '{"str": "a", "integer": 1, "double": 1234.5, "boolean": true}' + - '{"index": {}}' + - '{"str": "a", "integer": 1, "double": 5678.9, "boolean": false}' + - '{"index": {}}' + - '{"str": "a", "integer": 1, "double": 1234.5, "boolean": true}' + - '{"index": {}}' + - '{"str": "b", "integer": 1, "double": 1234.5, "boolean": true}' + + - do: + search: + index: test_1 + size: 0 + body: + aggs: + m_terms: + multi_terms: + terms: + - field: str + - field: integer + aggs: + n_terms: + multi_terms: + terms: + - field: double + - field: boolean + + - length: { aggregations.m_terms.buckets: 2 } + - match: { aggregations.m_terms.buckets.0.key: ["a", 1] } + - match: { aggregations.m_terms.buckets.0.key_as_string: "a|1" } + - match: { aggregations.m_terms.buckets.0.doc_count: 3 } + - match: { aggregations.m_terms.buckets.0.n_terms.buckets.0.key: [1234.5, true] } + - match: { aggregations.m_terms.buckets.0.n_terms.buckets.0.key_as_string: "1234.5|true" } + - match: { aggregations.m_terms.buckets.0.n_terms.buckets.0.doc_count: 2 } + - match: { aggregations.m_terms.buckets.0.n_terms.buckets.1.key: [5678.9, false] } + - match: { aggregations.m_terms.buckets.0.n_terms.buckets.1.key_as_string: "5678.9|false" } + - match: { aggregations.m_terms.buckets.0.n_terms.buckets.1.doc_count: 1 } + - match: { aggregations.m_terms.buckets.1.key: ["b", 1] } + - match: { aggregations.m_terms.buckets.1.key_as_string: "b|1" } + - match: { aggregations.m_terms.buckets.1.doc_count: 1 } + +--- +"ordered by metrics": + - skip: + version: "- 3.0.0" + reason: multi_terms aggregation is introduced in 3.0.0 + + - do: + bulk: + index: test_1 + refresh: true + body: + - '{"index": {}}' + - '{"str": "a", "double": 1234.5, "integer": 1}' + - '{"index": {}}' + - '{"str": "b", "double": 5678.9, "integer": 2}' + - '{"index": {}}' + - '{"str": "b", "double": 5678.9, "integer": 2}' + - '{"index": {}}' + - '{"str": "a", "double": 1234.5, "integer": 1}' + + - do: + search: + index: test_1 + size: 0 + body: + aggs: + m_terms: + multi_terms: + terms: + - field: str + - field: double + order: + the_int_sum: desc + aggs: + the_int_sum: + sum: + field: integer + + - length: { aggregations.m_terms.buckets: 2 } + - match: { aggregations.m_terms.buckets.0.key: ["b", 5678.9] } + - match: { aggregations.m_terms.buckets.0.key_as_string: "b|5678.9" } + - match: { aggregations.m_terms.buckets.0.the_int_sum.value: 4.0 } + - match: { aggregations.m_terms.buckets.0.doc_count: 2 } + - match: { aggregations.m_terms.buckets.1.key: ["a", 1234.5] } + - match: { aggregations.m_terms.buckets.1.key_as_string: "a|1234.5" } + - match: { aggregations.m_terms.buckets.1.the_int_sum.value: 2.0 } + - match: { aggregations.m_terms.buckets.1.doc_count: 2 } + +--- +"top 1 ordered by metrics ": + - skip: + version: "- 2.9.99" + reason: multi_terms aggregation is introduced in 3.0.0 + + - do: + bulk: + index: test_1 + refresh: true + body: + - '{"index": {}}' + - '{"str": "a", "double": 1234.5, "integer": 1}' + - '{"index": {}}' + - '{"str": "b", "double": 5678.9, "integer": 2}' + - '{"index": {}}' + - '{"str": "b", "double": 5678.9, "integer": 2}' + - '{"index": {}}' + - '{"str": "a", "double": 1234.5, "integer": 1}' + + - do: + search: + index: test_1 + size: 0 + body: + aggs: + m_terms: + multi_terms: + terms: + - field: str + - field: double + order: + the_int_sum: desc + size: 1 + aggs: + the_int_sum: + sum: + field: integer + + - length: { aggregations.m_terms.buckets: 1 } + - match: { aggregations.m_terms.buckets.0.key: ["b", 5678.9] } + - match: { aggregations.m_terms.buckets.0.key_as_string: "b|5678.9" } + - match: { aggregations.m_terms.buckets.0.the_int_sum.value: 4.0 } + - match: { aggregations.m_terms.buckets.0.doc_count: 2 } + +--- +"min_doc_count": + - skip: + version: "- 2.9.99" + reason: multi_terms aggregation is introduced in 3.0.0 + + - do: + bulk: + index: test_1 + refresh: true + body: + - '{"index": {}}' + - '{"str": "a", "integer": 1}' + - '{"index": {}}' + - '{"str": "a", "integer": 1}' + - '{"index": {}}' + - '{"str": "b", "integer": 1}' + - '{"index": {}}' + - '{"str": "c", "integer": 1}' + + - do: + search: + index: test_1 + body: + size: 0 + query: + simple_query_string: + fields: [str] + query: a b + minimum_should_match: 1 + aggs: + m_terms: + multi_terms: + terms: + - field: str + - field: integer + min_doc_count: 2 + + - length: { aggregations.m_terms.buckets: 1 } + - match: { aggregations.m_terms.buckets.0.key: ["a", 1] } + - match: { aggregations.m_terms.buckets.0.key_as_string: "a|1" } + - match: { aggregations.m_terms.buckets.0.doc_count: 2 } + + - do: + search: + index: test_1 + body: + size: 0 + query: + simple_query_string: + fields: [str] + query: a b + minimum_should_match: 1 + aggs: + m_terms: + multi_terms: + terms: + - field: str + - field: integer + min_doc_count: 0 + + - length: { aggregations.m_terms.buckets: 3 } + - match: { aggregations.m_terms.buckets.0.key: ["a", 1] } + - match: { aggregations.m_terms.buckets.0.key_as_string: "a|1" } + - match: { aggregations.m_terms.buckets.0.doc_count: 2 } + - match: { aggregations.m_terms.buckets.1.key: ["b", 1] } + - match: { aggregations.m_terms.buckets.1.key_as_string: "b|1" } + - match: { aggregations.m_terms.buckets.1.doc_count: 1 } + - match: { aggregations.m_terms.buckets.2.key: ["c", 1] } + - match: { aggregations.m_terms.buckets.2.key_as_string: "c|1" } + - match: { aggregations.m_terms.buckets.2.doc_count: 0 } + +--- +"sum_other_doc_count": + - skip: + version: "- 2.9.99" + reason: multi_terms aggregation is introduced in 3.0.0 + + - do: + bulk: + index: test_2 + refresh: true + body: + - '{"index": {"routing": "s1"}}' + - '{"str": "a", "integer": 1}' + - '{"index": {"routing": "s1"}}' + - '{"str": "a", "integer": 1}' + - '{"index": {"routing": "s1"}}' + - '{"str": "a", "integer": 1}' + - '{"index": {"routing": "s1"}}' + - '{"str": "a", "integer": 1}' + - '{"index": {"routing": "s2"}}' + - '{"str": "b", "integer": 1}' + - '{"index": {"routing": "s2"}}' + - '{"str": "b", "integer": 1}' + - '{"index": {"routing": "s2"}}' + - '{"str": "b", "integer": 1}' + - '{"index": {"routing": "s2"}}' + - '{"str": "a", "integer": 1}' + + - do: + search: + index: test_2 + size: 0 + body: + aggs: + m_terms: + multi_terms: + size: 1 + shard_size: 1 + terms: + - field: str + - field: integer + + - length: { aggregations.m_terms.buckets: 1 } + - match: { aggregations.m_terms.sum_other_doc_count: 4 } + - match: { aggregations.m_terms.buckets.0.key: ["a", 1] } + - match: { aggregations.m_terms.buckets.0.key_as_string: "a|1" } + - match: { aggregations.m_terms.buckets.0.doc_count: 4 } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java new file mode 100644 index 0000000000000..7d7f80c8ac758 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java @@ -0,0 +1,167 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket; + +import org.opensearch.action.search.SearchResponse; +import org.opensearch.script.Script; +import org.opensearch.script.ScriptType; +import org.opensearch.search.aggregations.bucket.terms.BaseStringTermsTestCase; +import org.opensearch.search.aggregations.bucket.terms.StringTermsIT; +import org.opensearch.search.aggregations.bucket.terms.Terms; +import org.opensearch.search.aggregations.support.MultiTermsValuesSourceConfig; +import org.opensearch.search.aggregations.support.ValueType; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.Collections; + +import static java.util.Arrays.asList; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.core.IsNull.notNullValue; +import static org.opensearch.search.aggregations.AggregationBuilders.multiTerms; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; + +/** + * Extend {@link BaseStringTermsTestCase}. + */ +@OpenSearchIntegTestCase.SuiteScopeTestCase +public class MultiTermsIT extends BaseStringTermsTestCase { + + // the main purpose of this test is to make sure we're not allocating 2GB of memory per shard + public void testSizeIsZero() { + final int minDocCount = randomInt(1); + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> client().prepareSearch("high_card_idx") + .addAggregation( + multiTerms("mterms").terms( + asList( + new MultiTermsValuesSourceConfig.Builder().setFieldName(SINGLE_VALUED_FIELD_NAME).build(), + new MultiTermsValuesSourceConfig.Builder().setFieldName(MULTI_VALUED_FIELD_NAME).build() + ) + ).minDocCount(minDocCount).size(0) + ) + .get() + ); + assertThat(exception.getMessage(), containsString("[size] must be greater than 0. Found [0] in [mterms]")); + } + + public void testSingleValuedFieldWithValueScript() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + multiTerms("mterms").terms( + asList( + new MultiTermsValuesSourceConfig.Builder().setFieldName("i").build(), + new MultiTermsValuesSourceConfig.Builder().setFieldName(SINGLE_VALUED_FIELD_NAME) + .setScript( + new Script( + ScriptType.INLINE, + StringTermsIT.CustomScriptPlugin.NAME, + "'foo_' + _value", + Collections.emptyMap() + ) + ) + .build() + ) + ) + ) + .get(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("mterms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("mterms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + Terms.Bucket bucket = terms.getBucketByKey(i + "|foo_val" + i); + assertThat(bucket, notNullValue()); + assertThat(key(bucket), equalTo(i + "|foo_val" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + + public void testSingleValuedFieldWithScript() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + multiTerms("mterms").terms( + asList( + new MultiTermsValuesSourceConfig.Builder().setFieldName("i").build(), + new MultiTermsValuesSourceConfig.Builder().setScript( + new Script( + ScriptType.INLINE, + StringTermsIT.CustomScriptPlugin.NAME, + "doc['" + SINGLE_VALUED_FIELD_NAME + "'].value", + Collections.emptyMap() + ) + ).setUserValueTypeHint(ValueType.STRING).build() + ) + ) + ) + .get(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("mterms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("mterms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + Terms.Bucket bucket = terms.getBucketByKey(i + "|val" + i); + assertThat(bucket, notNullValue()); + assertThat(key(bucket), equalTo(i + "|val" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + + public void testMultiValuedFieldWithValueScript() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + multiTerms("mterms").terms( + asList( + new MultiTermsValuesSourceConfig.Builder().setFieldName("tag").build(), + new MultiTermsValuesSourceConfig.Builder().setFieldName(MULTI_VALUED_FIELD_NAME) + .setScript( + new Script( + ScriptType.INLINE, + StringTermsIT.CustomScriptPlugin.NAME, + "_value.substring(0,3)", + Collections.emptyMap() + ) + ) + .build() + ) + ) + ) + .get(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("mterms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("mterms")); + assertThat(terms.getBuckets().size(), equalTo(2)); + + Terms.Bucket bucket = terms.getBucketByKey("more|val"); + assertThat(bucket, notNullValue()); + assertThat(key(bucket), equalTo("more|val")); + assertThat(bucket.getDocCount(), equalTo(3L)); + + bucket = terms.getBucketByKey("less|val"); + assertThat(bucket, notNullValue()); + assertThat(key(bucket), equalTo("less|val")); + assertThat(bucket.getDocCount(), equalTo(2L)); + } + + private MultiTermsValuesSourceConfig field(String name) { + return new MultiTermsValuesSourceConfig.Builder().setFieldName(name).build(); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/BaseStringTermsTestCase.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/BaseStringTermsTestCase.java new file mode 100644 index 0000000000000..7775618ba5b13 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/BaseStringTermsTestCase.java @@ -0,0 +1,256 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket.terms; + +import org.junit.After; +import org.junit.Before; +import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.common.Strings; +import org.opensearch.index.fielddata.ScriptDocValues; +import org.opensearch.plugins.Plugin; +import org.opensearch.search.aggregations.AggregationTestScriptsPlugin; +import org.opensearch.search.aggregations.bucket.AbstractTermsTestCase; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Function; + +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.SuiteScopeTestCase +public class BaseStringTermsTestCase extends AbstractTermsTestCase { + + protected static final String SINGLE_VALUED_FIELD_NAME = "s_value"; + protected static final String MULTI_VALUED_FIELD_NAME = "s_values"; + protected static Map> expectedMultiSortBuckets; + + @Override + protected Collection> nodePlugins() { + return Collections.singleton(CustomScriptPlugin.class); + } + + @Before + public void randomizeOptimizations() { + TermsAggregatorFactory.COLLECT_SEGMENT_ORDS = randomBoolean(); + TermsAggregatorFactory.REMAP_GLOBAL_ORDS = randomBoolean(); + } + + @After + public void resetOptimizations() { + TermsAggregatorFactory.COLLECT_SEGMENT_ORDS = null; + TermsAggregatorFactory.REMAP_GLOBAL_ORDS = null; + } + + public static class CustomScriptPlugin extends AggregationTestScriptsPlugin { + + @Override + protected Map, Object>> pluginScripts() { + Map, Object>> scripts = super.pluginScripts(); + + scripts.put("'foo_' + _value", vars -> "foo_" + (String) vars.get("_value")); + scripts.put("_value.substring(0,3)", vars -> ((String) vars.get("_value")).substring(0, 3)); + + scripts.put("doc['" + MULTI_VALUED_FIELD_NAME + "']", vars -> { + Map doc = (Map) vars.get("doc"); + return doc.get(MULTI_VALUED_FIELD_NAME); + }); + + scripts.put("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value", vars -> { + Map doc = (Map) vars.get("doc"); + ScriptDocValues.Strings value = (ScriptDocValues.Strings) doc.get(SINGLE_VALUED_FIELD_NAME); + return value.getValue(); + }); + + scripts.put("42", vars -> 42); + + return scripts; + } + + @Override + protected Map, Object>> nonDeterministicPluginScripts() { + Map, Object>> scripts = new HashMap<>(); + + scripts.put("Math.random()", vars -> randomDouble()); + + return scripts; + } + } + + @Override + public void setupSuiteScopeCluster() throws Exception { + assertAcked( + client().admin() + .indices() + .prepareCreate("idx") + .setMapping(SINGLE_VALUED_FIELD_NAME, "type=keyword", MULTI_VALUED_FIELD_NAME, "type=keyword", "tag", "type=keyword") + .get() + ); + List builders = new ArrayList<>(); + for (int i = 0; i < 5; i++) { + builders.add( + client().prepareIndex("idx") + .setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, "val" + i) + .field("i", i) + .field("constant", 1) + .field("tag", i < 5 / 2 + 1 ? "more" : "less") + .startArray(MULTI_VALUED_FIELD_NAME) + .value("val" + i) + .value("val" + (i + 1)) + .endArray() + .endObject() + ) + ); + } + + getMultiSortDocs(builders); + + assertAcked( + client().admin() + .indices() + .prepareCreate("high_card_idx") + .setMapping(SINGLE_VALUED_FIELD_NAME, "type=keyword", MULTI_VALUED_FIELD_NAME, "type=keyword", "tag", "type=keyword") + .get() + ); + for (int i = 0; i < 100; i++) { + builders.add( + client().prepareIndex("high_card_idx") + .setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, "val" + Strings.padStart(i + "", 3, '0')) + .startArray(MULTI_VALUED_FIELD_NAME) + .value("val" + Strings.padStart(i + "", 3, '0')) + .value("val" + Strings.padStart((i + 1) + "", 3, '0')) + .endArray() + .endObject() + ) + ); + } + prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer").get(); + + for (int i = 0; i < 2; i++) { + builders.add( + client().prepareIndex("empty_bucket_idx") + .setId("" + i) + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) + ); + } + indexRandom(true, builders); + createIndex("idx_unmapped"); + ensureSearchable(); + } + + private void getMultiSortDocs(List builders) throws IOException { + expectedMultiSortBuckets = new HashMap<>(); + Map bucketProps = new HashMap<>(); + bucketProps.put("_term", "val1"); + bucketProps.put("_count", 3L); + bucketProps.put("avg_l", 1d); + bucketProps.put("sum_d", 6d); + expectedMultiSortBuckets.put((String) bucketProps.get("_term"), bucketProps); + bucketProps = new HashMap<>(); + bucketProps.put("_term", "val2"); + bucketProps.put("_count", 3L); + bucketProps.put("avg_l", 2d); + bucketProps.put("sum_d", 6d); + expectedMultiSortBuckets.put((String) bucketProps.get("_term"), bucketProps); + bucketProps = new HashMap<>(); + bucketProps.put("_term", "val3"); + bucketProps.put("_count", 2L); + bucketProps.put("avg_l", 3d); + bucketProps.put("sum_d", 3d); + expectedMultiSortBuckets.put((String) bucketProps.get("_term"), bucketProps); + bucketProps = new HashMap<>(); + bucketProps.put("_term", "val4"); + bucketProps.put("_count", 2L); + bucketProps.put("avg_l", 3d); + bucketProps.put("sum_d", 4d); + expectedMultiSortBuckets.put((String) bucketProps.get("_term"), bucketProps); + bucketProps = new HashMap<>(); + bucketProps.put("_term", "val5"); + bucketProps.put("_count", 2L); + bucketProps.put("avg_l", 5d); + bucketProps.put("sum_d", 3d); + expectedMultiSortBuckets.put((String) bucketProps.get("_term"), bucketProps); + bucketProps = new HashMap<>(); + bucketProps.put("_term", "val6"); + bucketProps.put("_count", 1L); + bucketProps.put("avg_l", 5d); + bucketProps.put("sum_d", 1d); + expectedMultiSortBuckets.put((String) bucketProps.get("_term"), bucketProps); + bucketProps = new HashMap<>(); + bucketProps.put("_term", "val7"); + bucketProps.put("_count", 1L); + bucketProps.put("avg_l", 5d); + bucketProps.put("sum_d", 1d); + expectedMultiSortBuckets.put((String) bucketProps.get("_term"), bucketProps); + + assertAcked( + client().admin() + .indices() + .prepareCreate("sort_idx") + .setMapping(SINGLE_VALUED_FIELD_NAME, "type=keyword", MULTI_VALUED_FIELD_NAME, "type=keyword", "tag", "type=keyword") + .get() + ); + for (int i = 1; i <= 3; i++) { + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val1").field("l", 1).field("d", i).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val2").field("l", 2).field("d", i).endObject()) + ); + } + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val3").field("l", 3).field("d", 1).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val3").field("l", 3).field("d", 2).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val4").field("l", 3).field("d", 1).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val4").field("l", 3).field("d", 3).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val5").field("l", 5).field("d", 1).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val5").field("l", 5).field("d", 2).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val6").field("l", 5).field("d", 1).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val7").field("l", 5).field("d", 1).endObject()) + ); + } + + protected String key(Terms.Bucket bucket) { + return bucket.getKeyAsString(); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java index 3190bcb72fcbb..64f81cdcdec98 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java @@ -32,25 +32,19 @@ package org.opensearch.search.aggregations.bucket.terms; import org.opensearch.OpenSearchException; -import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; -import org.opensearch.common.Strings; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentParseException; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.mapper.IndexFieldMapper; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.plugins.Plugin; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.search.aggregations.AggregationExecutionException; -import org.opensearch.search.aggregations.AggregationTestScriptsPlugin; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.BucketOrder; -import org.opensearch.search.aggregations.bucket.AbstractTermsTestCase; import org.opensearch.search.aggregations.bucket.filter.Filter; import org.opensearch.search.aggregations.bucket.terms.Terms.Bucket; import org.opensearch.search.aggregations.metrics.Avg; @@ -60,23 +54,13 @@ import org.opensearch.search.aggregations.support.ValueType; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.test.OpenSearchIntegTestCase; -import org.junit.After; -import org.junit.Before; -import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; -import java.util.List; -import java.util.Map; import java.util.Set; -import java.util.function.Function; -import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.termQuery; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.extendedStats; @@ -93,228 +77,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class StringTermsIT extends AbstractTermsTestCase { - - private static final String SINGLE_VALUED_FIELD_NAME = "s_value"; - private static final String MULTI_VALUED_FIELD_NAME = "s_values"; - private static Map> expectedMultiSortBuckets; - - @Override - protected Collection> nodePlugins() { - return Collections.singleton(CustomScriptPlugin.class); - } - - @Before - public void randomizeOptimizations() { - TermsAggregatorFactory.COLLECT_SEGMENT_ORDS = randomBoolean(); - TermsAggregatorFactory.REMAP_GLOBAL_ORDS = randomBoolean(); - } - - @After - public void resetOptimizations() { - TermsAggregatorFactory.COLLECT_SEGMENT_ORDS = null; - TermsAggregatorFactory.REMAP_GLOBAL_ORDS = null; - } - - public static class CustomScriptPlugin extends AggregationTestScriptsPlugin { - - @Override - protected Map, Object>> pluginScripts() { - Map, Object>> scripts = super.pluginScripts(); - - scripts.put("'foo_' + _value", vars -> "foo_" + (String) vars.get("_value")); - scripts.put("_value.substring(0,3)", vars -> ((String) vars.get("_value")).substring(0, 3)); - - scripts.put("doc['" + MULTI_VALUED_FIELD_NAME + "']", vars -> { - Map doc = (Map) vars.get("doc"); - return doc.get(MULTI_VALUED_FIELD_NAME); - }); - - scripts.put("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value", vars -> { - Map doc = (Map) vars.get("doc"); - ScriptDocValues.Strings value = (ScriptDocValues.Strings) doc.get(SINGLE_VALUED_FIELD_NAME); - return value.getValue(); - }); - - scripts.put("42", vars -> 42); - - return scripts; - } - - @Override - protected Map, Object>> nonDeterministicPluginScripts() { - Map, Object>> scripts = new HashMap<>(); - - scripts.put("Math.random()", vars -> StringTermsIT.randomDouble()); - - return scripts; - } - } - - @Override - public void setupSuiteScopeCluster() throws Exception { - assertAcked( - client().admin() - .indices() - .prepareCreate("idx") - .setMapping(SINGLE_VALUED_FIELD_NAME, "type=keyword", MULTI_VALUED_FIELD_NAME, "type=keyword", "tag", "type=keyword") - .get() - ); - List builders = new ArrayList<>(); - for (int i = 0; i < 5; i++) { - builders.add( - client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, "val" + i) - .field("i", i) - .field("constant", 1) - .field("tag", i < 5 / 2 + 1 ? "more" : "less") - .startArray(MULTI_VALUED_FIELD_NAME) - .value("val" + i) - .value("val" + (i + 1)) - .endArray() - .endObject() - ) - ); - } - - getMultiSortDocs(builders); - - assertAcked( - client().admin() - .indices() - .prepareCreate("high_card_idx") - .setMapping(SINGLE_VALUED_FIELD_NAME, "type=keyword", MULTI_VALUED_FIELD_NAME, "type=keyword", "tag", "type=keyword") - .get() - ); - for (int i = 0; i < 100; i++) { - builders.add( - client().prepareIndex("high_card_idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, "val" + Strings.padStart(i + "", 3, '0')) - .startArray(MULTI_VALUED_FIELD_NAME) - .value("val" + Strings.padStart(i + "", 3, '0')) - .value("val" + Strings.padStart((i + 1) + "", 3, '0')) - .endArray() - .endObject() - ) - ); - } - prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer").get(); - - for (int i = 0; i < 2; i++) { - builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) - ); - } - indexRandom(true, builders); - createIndex("idx_unmapped"); - ensureSearchable(); - } - - private void getMultiSortDocs(List builders) throws IOException { - expectedMultiSortBuckets = new HashMap<>(); - Map bucketProps = new HashMap<>(); - bucketProps.put("_term", "val1"); - bucketProps.put("_count", 3L); - bucketProps.put("avg_l", 1d); - bucketProps.put("sum_d", 6d); - expectedMultiSortBuckets.put((String) bucketProps.get("_term"), bucketProps); - bucketProps = new HashMap<>(); - bucketProps.put("_term", "val2"); - bucketProps.put("_count", 3L); - bucketProps.put("avg_l", 2d); - bucketProps.put("sum_d", 6d); - expectedMultiSortBuckets.put((String) bucketProps.get("_term"), bucketProps); - bucketProps = new HashMap<>(); - bucketProps.put("_term", "val3"); - bucketProps.put("_count", 2L); - bucketProps.put("avg_l", 3d); - bucketProps.put("sum_d", 3d); - expectedMultiSortBuckets.put((String) bucketProps.get("_term"), bucketProps); - bucketProps = new HashMap<>(); - bucketProps.put("_term", "val4"); - bucketProps.put("_count", 2L); - bucketProps.put("avg_l", 3d); - bucketProps.put("sum_d", 4d); - expectedMultiSortBuckets.put((String) bucketProps.get("_term"), bucketProps); - bucketProps = new HashMap<>(); - bucketProps.put("_term", "val5"); - bucketProps.put("_count", 2L); - bucketProps.put("avg_l", 5d); - bucketProps.put("sum_d", 3d); - expectedMultiSortBuckets.put((String) bucketProps.get("_term"), bucketProps); - bucketProps = new HashMap<>(); - bucketProps.put("_term", "val6"); - bucketProps.put("_count", 1L); - bucketProps.put("avg_l", 5d); - bucketProps.put("sum_d", 1d); - expectedMultiSortBuckets.put((String) bucketProps.get("_term"), bucketProps); - bucketProps = new HashMap<>(); - bucketProps.put("_term", "val7"); - bucketProps.put("_count", 1L); - bucketProps.put("avg_l", 5d); - bucketProps.put("sum_d", 1d); - expectedMultiSortBuckets.put((String) bucketProps.get("_term"), bucketProps); - - assertAcked( - client().admin() - .indices() - .prepareCreate("sort_idx") - .setMapping(SINGLE_VALUED_FIELD_NAME, "type=keyword", MULTI_VALUED_FIELD_NAME, "type=keyword", "tag", "type=keyword") - .get() - ); - for (int i = 1; i <= 3; i++) { - builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val1").field("l", 1).field("d", i).endObject()) - ); - builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val2").field("l", 2).field("d", i).endObject()) - ); - } - builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val3").field("l", 3).field("d", 1).endObject()) - ); - builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val3").field("l", 3).field("d", 2).endObject()) - ); - builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val4").field("l", 3).field("d", 1).endObject()) - ); - builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val4").field("l", 3).field("d", 3).endObject()) - ); - builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val5").field("l", 5).field("d", 1).endObject()) - ); - builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val5").field("l", 5).field("d", 2).endObject()) - ); - builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val6").field("l", 5).field("d", 1).endObject()) - ); - builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val7").field("l", 5).field("d", 1).endObject()) - ); - } - - private String key(Terms.Bucket bucket) { - return bucket.getKeyAsString(); - } +public class StringTermsIT extends BaseStringTermsTestCase { // the main purpose of this test is to make sure we're not allocating 2GB of memory per shard public void testSizeIsZero() { diff --git a/server/src/main/java/org/opensearch/search/SearchModule.java b/server/src/main/java/org/opensearch/search/SearchModule.java index dc5309b50abb8..bf0cc646d271e 100644 --- a/server/src/main/java/org/opensearch/search/SearchModule.java +++ b/server/src/main/java/org/opensearch/search/SearchModule.java @@ -159,8 +159,11 @@ import org.opensearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder; import org.opensearch.search.aggregations.bucket.sampler.UnmappedSampler; import org.opensearch.search.aggregations.bucket.terms.DoubleTerms; +import org.opensearch.search.aggregations.bucket.terms.InternalMultiTerms; import org.opensearch.search.aggregations.bucket.terms.LongRareTerms; import org.opensearch.search.aggregations.bucket.terms.LongTerms; +import org.opensearch.search.aggregations.bucket.terms.MultiTermsAggregationBuilder; +import org.opensearch.search.aggregations.bucket.terms.MultiTermsAggregationFactory; import org.opensearch.search.aggregations.bucket.terms.RareTermsAggregationBuilder; import org.opensearch.search.aggregations.bucket.terms.SignificantLongTerms; import org.opensearch.search.aggregations.bucket.terms.SignificantStringTerms; @@ -687,6 +690,12 @@ private ValuesSourceRegistry registerAggregations(List plugins) { .setAggregatorRegistrar(CompositeAggregationBuilder::registerAggregators), builder ); + registerAggregation( + new AggregationSpec(MultiTermsAggregationBuilder.NAME, MultiTermsAggregationBuilder::new, MultiTermsAggregationBuilder.PARSER) + .addResultReader(InternalMultiTerms::new) + .setAggregatorRegistrar(MultiTermsAggregationFactory::registerAggregators), + builder + ); registerFromPlugin(plugins, SearchPlugin::getAggregations, (agg) -> this.registerAggregation(agg, builder)); // after aggs have been registered, see if there are any new VSTypes that need to be linked to core fields diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregationBuilders.java b/server/src/main/java/org/opensearch/search/aggregations/AggregationBuilders.java index 99a1107675edf..69a9fd92ac459 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregationBuilders.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregationBuilders.java @@ -66,6 +66,7 @@ import org.opensearch.search.aggregations.bucket.sampler.DiversifiedAggregationBuilder; import org.opensearch.search.aggregations.bucket.sampler.Sampler; import org.opensearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder; +import org.opensearch.search.aggregations.bucket.terms.MultiTermsAggregationBuilder; import org.opensearch.search.aggregations.bucket.terms.SignificantTerms; import org.opensearch.search.aggregations.bucket.terms.SignificantTermsAggregationBuilder; import org.opensearch.search.aggregations.bucket.terms.SignificantTextAggregationBuilder; @@ -388,4 +389,11 @@ public static ScriptedMetricAggregationBuilder scriptedMetric(String name) { public static CompositeAggregationBuilder composite(String name, List> sources) { return new CompositeAggregationBuilder(name, sources); } + + /** + * Create a new {@link MultiTermsAggregationBuilder} aggregation with the given name. + */ + public static MultiTermsAggregationBuilder multiTerms(String name) { + return new MultiTermsAggregationBuilder(name); + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTerms.java new file mode 100644 index 0000000000000..fd1758d3ea8ba --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTerms.java @@ -0,0 +1,440 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket.terms; + +import org.apache.lucene.util.BytesRef; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.search.DocValueFormat; +import org.opensearch.search.aggregations.AggregationExecutionException; +import org.opensearch.search.aggregations.Aggregations; +import org.opensearch.search.aggregations.BucketOrder; +import org.opensearch.search.aggregations.InternalAggregations; +import org.opensearch.search.aggregations.KeyComparable; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * Result of the {@link MultiTermsAggregator}. + */ +public class InternalMultiTerms extends InternalTerms { + /** + * Internal Multi Terms Bucket. + */ + public static class Bucket extends InternalTerms.AbstractInternalBucket implements KeyComparable { + + protected long bucketOrd; + /** + * list of terms values. + */ + protected List termValues; + protected long docCount; + protected InternalAggregations aggregations; + protected boolean showDocCountError; + protected long docCountError; + /** + * A list of term's {@link DocValueFormat}. + */ + protected final List termFormats; + + private static final String PIPE = "|"; + + /** + * Create default {@link Bucket}. + */ + public static Bucket EMPTY(boolean showTermDocCountError, List formats) { + return new Bucket(null, 0, null, showTermDocCountError, 0, formats); + } + + public Bucket( + List values, + long docCount, + InternalAggregations aggregations, + boolean showDocCountError, + long docCountError, + List formats + ) { + this.termValues = values; + this.docCount = docCount; + this.aggregations = aggregations; + this.showDocCountError = showDocCountError; + this.docCountError = docCountError; + this.termFormats = formats; + } + + public Bucket(StreamInput in, List formats, boolean showDocCountError) throws IOException { + this.termValues = in.readList(StreamInput::readGenericValue); + this.docCount = in.readVLong(); + this.aggregations = InternalAggregations.readFrom(in); + this.showDocCountError = showDocCountError; + this.docCountError = -1; + if (showDocCountError) { + this.docCountError = in.readLong(); + } + this.termFormats = formats; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(CommonFields.KEY.getPreferredName(), getKey()); + builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), getKeyAsString()); + builder.field(CommonFields.DOC_COUNT.getPreferredName(), getDocCount()); + if (showDocCountError) { + builder.field(DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME.getPreferredName(), getDocCountError()); + } + aggregations.toXContentInternal(builder, params); + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(termValues, StreamOutput::writeGenericValue); + out.writeVLong(docCount); + aggregations.writeTo(out); + if (showDocCountError) { + out.writeLong(docCountError); + } + } + + @Override + public List getKey() { + List keys = new ArrayList<>(termValues.size()); + for (int i = 0; i < termValues.size(); i++) { + keys.add(formatObject(termValues.get(i), termFormats.get(i))); + } + return keys; + } + + @Override + public String getKeyAsString() { + return getKey().stream().map(Object::toString).collect(Collectors.joining(PIPE)); + } + + @Override + public long getDocCount() { + return docCount; + } + + @Override + public Aggregations getAggregations() { + return aggregations; + } + + @Override + void setDocCountError(long docCountError) { + this.docCountError = docCountError; + } + + @Override + public void setDocCountError(Function updater) { + this.docCountError = updater.apply(this.docCountError); + } + + @Override + public boolean showDocCountError() { + return showDocCountError; + } + + @Override + public Number getKeyAsNumber() { + throw new IllegalArgumentException("getKeyAsNumber is not supported by [" + MultiTermsAggregationBuilder.NAME + "]"); + } + + @Override + public long getDocCountError() { + if (!showDocCountError) { + throw new IllegalStateException("show_terms_doc_count_error is false"); + } + return docCountError; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || getClass() != obj.getClass()) { + return false; + } + Bucket other = (Bucket) obj; + if (showDocCountError && docCountError != other.docCountError) { + return false; + } + return termValues.equals(other.termValues) + && docCount == other.docCount + && aggregations.equals(other.aggregations) + && showDocCountError == other.showDocCountError; + } + + @Override + public int hashCode() { + return Objects.hash(termValues, docCount, aggregations, showDocCountError, showDocCountError ? docCountError : 0); + } + + @Override + public int compareKey(Bucket other) { + return new BucketComparator().compare(this.termValues, other.termValues); + } + + /** + * Visible for testing. + */ + protected static class BucketComparator implements Comparator> { + @SuppressWarnings({ "unchecked" }) + @Override + public int compare(List thisObjects, List thatObjects) { + if (thisObjects.size() != thatObjects.size()) { + throw new AggregationExecutionException( + "[" + MultiTermsAggregationBuilder.NAME + "] aggregations failed due to terms" + " size is different" + ); + } + for (int i = 0; i < thisObjects.size(); i++) { + final Object thisObject = thisObjects.get(i); + final Object thatObject = thatObjects.get(i); + int ret = ((Comparable) thisObject).compareTo(thatObject); + if (ret != 0) { + return ret; + } + } + return 0; + } + } + } + + private final int shardSize; + private final boolean showTermDocCountError; + private final long otherDocCount; + private final List termFormats; + private final List buckets; + private Map bucketMap; + + private long docCountError; + + public InternalMultiTerms( + String name, + BucketOrder reduceOrder, + BucketOrder order, + int requiredSize, + long minDocCount, + Map metadata, + int shardSize, + boolean showTermDocCountError, + long otherDocCount, + long docCountError, + List formats, + List buckets + ) { + super(name, reduceOrder, order, requiredSize, minDocCount, metadata); + this.shardSize = shardSize; + this.showTermDocCountError = showTermDocCountError; + this.otherDocCount = otherDocCount; + this.termFormats = formats; + this.buckets = buckets; + this.docCountError = docCountError; + } + + public InternalMultiTerms(StreamInput in) throws IOException { + super(in); + this.docCountError = in.readZLong(); + this.termFormats = in.readList(stream -> stream.readNamedWriteable(DocValueFormat.class)); + this.shardSize = readSize(in); + this.showTermDocCountError = in.readBoolean(); + this.otherDocCount = in.readVLong(); + this.buckets = in.readList(steam -> new Bucket(steam, termFormats, showTermDocCountError)); + } + + @Override + public String getWriteableName() { + return MultiTermsAggregationBuilder.NAME; + } + + @Override + public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { + return doXContentCommon(builder, params, docCountError, otherDocCount, buckets); + } + + @Override + public InternalMultiTerms create(List buckets) { + return new InternalMultiTerms( + name, + reduceOrder, + order, + requiredSize, + minDocCount, + metadata, + shardSize, + showTermDocCountError, + otherDocCount, + docCountError, + termFormats, + buckets + ); + } + + @Override + public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) { + return new Bucket( + prototype.termValues, + prototype.docCount, + aggregations, + prototype.showDocCountError, + prototype.docCountError, + prototype.termFormats + ); + } + + @Override + protected void writeTermTypeInfoTo(StreamOutput out) throws IOException { + out.writeZLong(docCountError); + out.writeCollection(termFormats, StreamOutput::writeNamedWriteable); + writeSize(shardSize, out); + out.writeBoolean(showTermDocCountError); + out.writeVLong(otherDocCount); + out.writeList(buckets); + } + + @Override + public List getBuckets() { + return buckets; + } + + @Override + public Bucket getBucketByKey(String term) { + if (bucketMap == null) { + bucketMap = buckets.stream().collect(Collectors.toMap(InternalMultiTerms.Bucket::getKeyAsString, Function.identity())); + } + return bucketMap.get(term); + } + + @Override + public long getDocCountError() { + return docCountError; + } + + @Override + public long getSumOfOtherDocCounts() { + return otherDocCount; + } + + @Override + protected void setDocCountError(long docCountError) { + this.docCountError = docCountError; + } + + @Override + protected int getShardSize() { + return shardSize; + } + + @Override + protected InternalMultiTerms create( + String name, + List buckets, + BucketOrder reduceOrder, + long docCountError, + long otherDocCount + ) { + return new InternalMultiTerms( + name, + reduceOrder, + order, + requiredSize, + minDocCount, + metadata, + shardSize, + showTermDocCountError, + otherDocCount, + docCountError, + termFormats, + buckets + ); + } + + @Override + protected Bucket[] createBucketsArray(int size) { + return new Bucket[size]; + } + + @Override + Bucket createBucket(long docCount, InternalAggregations aggs, long docCountError, Bucket prototype) { + return new Bucket( + prototype.termValues, + docCount, + aggs, + prototype.showDocCountError, + prototype.docCountError, + prototype.termFormats + ); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + if (super.equals(obj) == false) return false; + InternalMultiTerms that = (InternalMultiTerms) obj; + + if (showTermDocCountError && docCountError != that.docCountError) { + return false; + } + return Objects.equals(buckets, that.buckets) + && Objects.equals(otherDocCount, that.otherDocCount) + && Objects.equals(showTermDocCountError, that.showTermDocCountError) + && Objects.equals(shardSize, that.shardSize) + && Objects.equals(docCountError, that.docCountError); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), buckets, otherDocCount, showTermDocCountError, shardSize); + } + + /** + * Copy from InternalComposite + * + * Format obj using the provided {@link DocValueFormat}. + * If the format is equals to {@link DocValueFormat#RAW}, the object is returned as is + * for numbers and a string for {@link BytesRef}s. + */ + static Object formatObject(Object obj, DocValueFormat format) { + if (obj == null) { + return null; + } + if (obj.getClass() == BytesRef.class) { + BytesRef value = (BytesRef) obj; + if (format == DocValueFormat.RAW) { + return value.utf8ToString(); + } else { + return format.format(value); + } + } else if (obj.getClass() == Long.class) { + long value = (long) obj; + if (format == DocValueFormat.RAW) { + return value; + } else { + return format.format(value); + } + } else if (obj.getClass() == Double.class) { + double value = (double) obj; + if (format == DocValueFormat.RAW) { + return value; + } else { + return format.format(value); + } + } + return obj; + } +} diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalTerms.java index be397bcbb2f2c..8fae5720a9082 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalTerms.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalTerms.java @@ -57,11 +57,12 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.Function; import static org.opensearch.search.aggregations.InternalOrder.isKeyAsc; import static org.opensearch.search.aggregations.InternalOrder.isKeyOrder; -public abstract class InternalTerms, B extends InternalTerms.Bucket> extends +public abstract class InternalTerms, B extends InternalTerms.AbstractInternalBucket> extends InternalMultiBucketAggregation implements Terms { @@ -69,10 +70,15 @@ public abstract class InternalTerms, B extends Int protected static final ParseField DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME = new ParseField("doc_count_error_upper_bound"); protected static final ParseField SUM_OF_OTHER_DOC_COUNTS = new ParseField("sum_other_doc_count"); - public abstract static class Bucket> extends InternalMultiBucketAggregation.InternalBucket - implements - Terms.Bucket, - KeyComparable { + public abstract static class AbstractInternalBucket extends InternalMultiBucketAggregation.InternalBucket implements Terms.Bucket { + abstract void setDocCountError(long docCountError); + + abstract void setDocCountError(Function updater); + + abstract boolean showDocCountError(); + } + + public abstract static class Bucket> extends AbstractInternalBucket implements KeyComparable { /** * Reads a bucket. Should be a constructor reference. */ @@ -142,6 +148,21 @@ public long getDocCountError() { return docCountError; } + @Override + public void setDocCountError(long docCountError) { + this.docCountError = docCountError; + } + + @Override + public void setDocCountError(Function updater) { + this.docCountError = updater.apply(this.docCountError); + } + + @Override + public boolean showDocCountError() { + return showDocCountError; + } + @Override public Aggregations getAggregations() { return aggregations; @@ -274,7 +295,7 @@ private long getDocCountError(InternalTerms terms) { } else { // otherwise use the doc count of the last term in the // aggregation - return terms.getBuckets().stream().mapToLong(Bucket::getDocCount).min().getAsLong(); + return terms.getBuckets().stream().mapToLong(MultiBucketsAggregation.Bucket::getDocCount).min().getAsLong(); } } else { return -1; @@ -393,7 +414,7 @@ public InternalAggregation reduce(List aggregations, Reduce // for the existing error calculated in a previous reduce. // Note that if the error is unbounded (-1) this will be fixed // later in this method. - bucket.docCountError -= thisAggDocCountError; + bucket.setDocCountError(docCountError -> docCountError - thisAggDocCountError); } } @@ -419,11 +440,12 @@ public InternalAggregation reduce(List aggregations, Reduce final BucketPriorityQueue ordered = new BucketPriorityQueue<>(size, order.comparator()); for (B bucket : reducedBuckets) { if (sumDocCountError == -1) { - bucket.docCountError = -1; + bucket.setDocCountError(-1); } else { - bucket.docCountError += sumDocCountError; + final long finalSumDocCountError = sumDocCountError; + bucket.setDocCountError(docCountError -> docCountError + finalSumDocCountError); } - if (bucket.docCount >= minDocCount) { + if (bucket.getDocCount() >= minDocCount) { B removed = ordered.insertWithOverflow(bucket); if (removed != null) { otherDocCount += removed.getDocCount(); @@ -448,9 +470,10 @@ public InternalAggregation reduce(List aggregations, Reduce reduceContext.consumeBucketsAndMaybeBreak(1); list[i] = reducedBuckets.get(i); if (sumDocCountError == -1) { - list[i].docCountError = -1; + list[i].setDocCountError(-1); } else { - list[i].docCountError += sumDocCountError; + final long fSumDocCountError = sumDocCountError; + list[i].setDocCountError(docCountError -> docCountError + fSumDocCountError); } } } @@ -474,15 +497,15 @@ protected B reduceBucket(List buckets, ReduceContext context) { long docCountError = 0; List aggregationsList = new ArrayList<>(buckets.size()); for (B bucket : buckets) { - docCount += bucket.docCount; + docCount += bucket.getDocCount(); if (docCountError != -1) { - if (bucket.docCountError == -1) { + if (bucket.showDocCountError() == false || bucket.getDocCountError() == -1) { docCountError = -1; } else { - docCountError += bucket.docCountError; + docCountError += bucket.getDocCountError(); } } - aggregationsList.add(bucket.aggregations); + aggregationsList.add((InternalAggregations) bucket.getAggregations()); } InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context); return createBucket(docCount, aggs, docCountError, buckets.get(0)); @@ -524,12 +547,12 @@ protected static XContentBuilder doXContentCommon( Params params, long docCountError, long otherDocCount, - List buckets + List buckets ) throws IOException { builder.field(DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME.getPreferredName(), docCountError); builder.field(SUM_OF_OTHER_DOC_COUNTS.getPreferredName(), otherDocCount); builder.startArray(CommonFields.BUCKETS.getPreferredName()); - for (Bucket bucket : buckets) { + for (AbstractInternalBucket bucket : buckets) { bucket.toXContent(builder, params); } builder.endArray(); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationBuilder.java new file mode 100644 index 0000000000000..78be4f980bce5 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationBuilder.java @@ -0,0 +1,443 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket.terms; + +import org.opensearch.common.ParseField; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.ObjectParser; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.search.aggregations.AbstractAggregationBuilder; +import org.opensearch.search.aggregations.AggregationBuilder; +import org.opensearch.search.aggregations.Aggregator; +import org.opensearch.search.aggregations.AggregatorFactories; +import org.opensearch.search.aggregations.AggregatorFactory; +import org.opensearch.search.aggregations.BucketOrder; +import org.opensearch.search.aggregations.InternalOrder; +import org.opensearch.search.aggregations.support.MultiTermsValuesSourceConfig; +import org.opensearch.search.aggregations.support.ValuesSourceRegistry; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS; + +/** + * Multi-terms aggregation supports collecting terms from multiple fields in the same document. + * + *

+ * For example, using the multi-terms aggregation to group by two fields region, host, calculate max cpu, and sort by max cpu. + *

+ *
+ *   GET test_000001/_search
+ *   {
+ *     "size": 0,
+ *     "aggs": {
+ *       "hot": {
+ *         "multi_terms": {
+ *           "terms": [{
+ *             "field": "region"
+ *           },{
+ *             "field": "host"
+ *           }],
+ *           "order": {"max-cpu": "desc"}
+ *         },
+ *         "aggs": {
+ *           "max-cpu": { "max": { "field": "cpu" } }
+ *         }
+ *       }
+ *     }
+ *   }
+ * 
+ * + *

+ * The aggregation result contains + * - key: a list of value extract from multiple fields in the same doc. + *

+ *
+ *   {
+ *     "hot": {
+ *       "doc_count_error_upper_bound": 0,
+ *       "sum_other_doc_count": 0,
+ *       "buckets": [
+ *         {
+ *           "key": [
+ *             "dub",
+ *             "h1"
+ *           ],
+ *           "key_as_string": "dub|h1",
+ *           "doc_count": 2,
+ *           "max-cpu": {
+ *             "value": 90.0
+ *           }
+ *         },
+ *         {
+ *           "key": [
+ *             "dub",
+ *             "h2"
+ *           ],
+ *           "key_as_string": "dub|h2",
+ *           "doc_count": 2,
+ *           "max-cpu": {
+ *             "value": 70.0
+ *           }
+ *         }
+ *       ]
+ *     }
+ *   }
+ * 
+ * + *

+ * Notes: The current implementation focuses on adding new type aggregates. Performance (latency) is not good,mainly because of + * simply encoding/decoding a list of values as bucket keys. + *

+ */ +public class MultiTermsAggregationBuilder extends AbstractAggregationBuilder { + public static final String NAME = "multi_terms"; + public static final ObjectParser PARSER = ObjectParser.fromBuilder( + NAME, + MultiTermsAggregationBuilder::new + ); + + public static final ParseField TERMS_FIELD = new ParseField("terms"); + public static final ParseField SHARD_SIZE_FIELD_NAME = new ParseField("shard_size"); + public static final ParseField MIN_DOC_COUNT_FIELD_NAME = new ParseField("min_doc_count"); + public static final ParseField SHARD_MIN_DOC_COUNT_FIELD_NAME = new ParseField("shard_min_doc_count"); + public static final ParseField REQUIRED_SIZE_FIELD_NAME = new ParseField("size"); + public static final ParseField SHOW_TERM_DOC_COUNT_ERROR = new ParseField("show_term_doc_count_error"); + public static final ParseField ORDER_FIELD = new ParseField("order"); + + @Override + public String getType() { + return NAME; + } + + static { + final ObjectParser parser = MultiTermsValuesSourceConfig.PARSER.apply( + true, + true, + true, + true + ); + PARSER.declareObjectArray(MultiTermsAggregationBuilder::terms, (p, c) -> parser.parse(p, null).build(), TERMS_FIELD); + + PARSER.declareBoolean(MultiTermsAggregationBuilder::showTermDocCountError, SHOW_TERM_DOC_COUNT_ERROR); + + PARSER.declareInt(MultiTermsAggregationBuilder::shardSize, SHARD_SIZE_FIELD_NAME); + + PARSER.declareLong(MultiTermsAggregationBuilder::minDocCount, MIN_DOC_COUNT_FIELD_NAME); + + PARSER.declareLong(MultiTermsAggregationBuilder::shardMinDocCount, SHARD_MIN_DOC_COUNT_FIELD_NAME); + + PARSER.declareInt(MultiTermsAggregationBuilder::size, REQUIRED_SIZE_FIELD_NAME); + + PARSER.declareObjectArray(MultiTermsAggregationBuilder::order, (p, c) -> InternalOrder.Parser.parseOrderParam(p), ORDER_FIELD); + + PARSER.declareField( + MultiTermsAggregationBuilder::collectMode, + (p, c) -> Aggregator.SubAggCollectionMode.parse(p.text(), LoggingDeprecationHandler.INSTANCE), + Aggregator.SubAggCollectionMode.KEY, + ObjectParser.ValueType.STRING + ); + } + + public static final ValuesSourceRegistry.RegistryKey REGISTRY_KEY = + new ValuesSourceRegistry.RegistryKey<>( + MultiTermsAggregationBuilder.NAME, + MultiTermsAggregationFactory.InternalValuesSourceSupplier.class + ); + + private List terms; + + private BucketOrder order = BucketOrder.compound(BucketOrder.count(false)); // automatically adds tie-breaker key asc order + private Aggregator.SubAggCollectionMode collectMode = null; + private TermsAggregator.BucketCountThresholds bucketCountThresholds = new TermsAggregator.BucketCountThresholds( + DEFAULT_BUCKET_COUNT_THRESHOLDS + ); + private boolean showTermDocCountError = false; + + public MultiTermsAggregationBuilder(String name) { + super(name); + } + + protected MultiTermsAggregationBuilder( + MultiTermsAggregationBuilder clone, + AggregatorFactories.Builder factoriesBuilder, + Map metadata + ) { + super(clone, factoriesBuilder, metadata); + this.terms = new ArrayList<>(clone.terms); + this.order = clone.order; + this.collectMode = clone.collectMode; + this.bucketCountThresholds = new TermsAggregator.BucketCountThresholds(clone.bucketCountThresholds); + this.showTermDocCountError = clone.showTermDocCountError; + } + + @Override + protected AggregationBuilder shallowCopy(AggregatorFactories.Builder factoriesBuilder, Map metadata) { + return new MultiTermsAggregationBuilder(this, factoriesBuilder, metadata); + } + + /** + * Read from a stream. + */ + public MultiTermsAggregationBuilder(StreamInput in) throws IOException { + super(in); + terms = in.readList(MultiTermsValuesSourceConfig::new); + bucketCountThresholds = new TermsAggregator.BucketCountThresholds(in); + collectMode = in.readOptionalWriteable(Aggregator.SubAggCollectionMode::readFromStream); + order = InternalOrder.Streams.readOrder(in); + showTermDocCountError = in.readBoolean(); + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + out.writeList(terms); + bucketCountThresholds.writeTo(out); + out.writeOptionalWriteable(collectMode); + order.writeTo(out); + out.writeBoolean(showTermDocCountError); + } + + @Override + protected AggregatorFactory doBuild( + QueryShardContext queryShardContext, + AggregatorFactory parent, + AggregatorFactories.Builder subfactoriesBuilder + ) throws IOException { + return new MultiTermsAggregationFactory( + name, + queryShardContext, + parent, + subfactoriesBuilder, + metadata, + terms, + order, + collectMode, + bucketCountThresholds, + showTermDocCountError + ); + } + + @Override + protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (terms != null) { + builder.field(TERMS_FIELD.getPreferredName(), terms); + } + bucketCountThresholds.toXContent(builder, params); + builder.field(SHOW_TERM_DOC_COUNT_ERROR.getPreferredName(), showTermDocCountError); + builder.field(ORDER_FIELD.getPreferredName()); + order.toXContent(builder, params); + if (collectMode != null) { + builder.field(Aggregator.SubAggCollectionMode.KEY.getPreferredName(), collectMode.parseField().getPreferredName()); + } + builder.endObject(); + return builder; + } + + /** + * Set the terms. + */ + public MultiTermsAggregationBuilder terms(List terms) { + if (terms == null) { + throw new IllegalArgumentException("[terms] must not be null. Found null terms in [" + name + "]"); + } + if (terms.size() < 2) { + throw new IllegalArgumentException( + "multi term aggregation must has at least 2 terms. Found [" + + terms.size() + + "] in" + + " [" + + name + + "]" + + (terms.size() == 1 ? " Use terms aggregation for single term aggregation" : "") + ); + } + this.terms = terms; + return this; + } + + /** + * Sets the size - indicating how many term buckets should be returned + * (defaults to 10) + */ + public MultiTermsAggregationBuilder size(int size) { + if (size <= 0) { + throw new IllegalArgumentException("[size] must be greater than 0. Found [" + size + "] in [" + name + "]"); + } + bucketCountThresholds.setRequiredSize(size); + return this; + } + + /** + * Returns the number of term buckets currently configured + */ + public int size() { + return bucketCountThresholds.getRequiredSize(); + } + + /** + * Sets the shard_size - indicating the number of term buckets each shard + * will return to the coordinating node (the node that coordinates the + * search execution). The higher the shard size is, the more accurate the + * results are. + */ + public MultiTermsAggregationBuilder shardSize(int shardSize) { + if (shardSize <= 0) { + throw new IllegalArgumentException("[shardSize] must be greater than 0. Found [" + shardSize + "] in [" + name + "]"); + } + bucketCountThresholds.setShardSize(shardSize); + return this; + } + + /** + * Returns the number of term buckets per shard that are currently configured + */ + public int shardSize() { + return bucketCountThresholds.getShardSize(); + } + + /** + * Set the minimum document count terms should have in order to appear in + * the response. + */ + public MultiTermsAggregationBuilder minDocCount(long minDocCount) { + if (minDocCount < 0) { + throw new IllegalArgumentException( + "[minDocCount] must be greater than or equal to 0. Found [" + minDocCount + "] in [" + name + "]" + ); + } + bucketCountThresholds.setMinDocCount(minDocCount); + return this; + } + + /** + * Returns the minimum document count required per term + */ + public long minDocCount() { + return bucketCountThresholds.getMinDocCount(); + } + + /** + * Set the minimum document count terms should have on the shard in order to + * appear in the response. + */ + public MultiTermsAggregationBuilder shardMinDocCount(long shardMinDocCount) { + if (shardMinDocCount < 0) { + throw new IllegalArgumentException( + "[shardMinDocCount] must be greater than or equal to 0. Found [" + shardMinDocCount + "] in [" + name + "]" + ); + } + bucketCountThresholds.setShardMinDocCount(shardMinDocCount); + return this; + } + + /** + * Returns the minimum document count required per term, per shard + */ + public long shardMinDocCount() { + return bucketCountThresholds.getShardMinDocCount(); + } + + /** Set a new order on this builder and return the builder so that calls + * can be chained. A tie-breaker may be added to avoid non-deterministic ordering. */ + public MultiTermsAggregationBuilder order(BucketOrder order) { + if (order == null) { + throw new IllegalArgumentException("[order] must not be null: [" + name + "]"); + } + if (order instanceof InternalOrder.CompoundOrder || InternalOrder.isKeyOrder(order)) { + this.order = order; // if order already contains a tie-breaker we are good to go + } else { // otherwise add a tie-breaker by using a compound order + this.order = BucketOrder.compound(order); + } + return this; + } + + /** + * Sets the order in which the buckets will be returned. A tie-breaker may be added to avoid non-deterministic + * ordering. + */ + public MultiTermsAggregationBuilder order(List orders) { + if (orders == null) { + throw new IllegalArgumentException("[orders] must not be null: [" + name + "]"); + } + // if the list only contains one order use that to avoid inconsistent xcontent + order(orders.size() > 1 ? BucketOrder.compound(orders) : orders.get(0)); + return this; + } + + /** + * Gets the order in which the buckets will be returned. + */ + public BucketOrder order() { + return order; + } + + /** + * Expert: set the collection mode. + */ + public MultiTermsAggregationBuilder collectMode(Aggregator.SubAggCollectionMode collectMode) { + if (collectMode == null) { + throw new IllegalArgumentException("[collectMode] must not be null: [" + name + "]"); + } + this.collectMode = collectMode; + return this; + } + + /** + * Expert: get the collection mode. + */ + public Aggregator.SubAggCollectionMode collectMode() { + return collectMode; + } + + /** + * Get whether doc count error will be return for individual terms + */ + public boolean showTermDocCountError() { + return showTermDocCountError; + } + + /** + * Set whether doc count error will be return for individual terms + */ + public MultiTermsAggregationBuilder showTermDocCountError(boolean showTermDocCountError) { + this.showTermDocCountError = showTermDocCountError; + return this; + } + + @Override + public BucketCardinality bucketCardinality() { + return BucketCardinality.MANY; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), bucketCountThresholds, collectMode, order, showTermDocCountError); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + if (super.equals(obj) == false) return false; + MultiTermsAggregationBuilder other = (MultiTermsAggregationBuilder) obj; + return Objects.equals(terms, other.terms) + && Objects.equals(bucketCountThresholds, other.bucketCountThresholds) + && Objects.equals(collectMode, other.collectMode) + && Objects.equals(order, other.order) + && Objects.equals(showTermDocCountError, other.showTermDocCountError); + } +} diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationFactory.java new file mode 100644 index 0000000000000..d5600bc030bf2 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationFactory.java @@ -0,0 +1,163 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket.terms; + +import org.opensearch.common.collect.Tuple; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.search.DocValueFormat; +import org.opensearch.search.aggregations.Aggregator; +import org.opensearch.search.aggregations.AggregatorFactories; +import org.opensearch.search.aggregations.AggregatorFactory; +import org.opensearch.search.aggregations.BucketOrder; +import org.opensearch.search.aggregations.CardinalityUpperBound; +import org.opensearch.search.aggregations.InternalOrder; +import org.opensearch.search.aggregations.bucket.BucketUtils; +import org.opensearch.search.aggregations.support.CoreValuesSourceType; +import org.opensearch.search.aggregations.support.MultiTermsValuesSourceConfig; +import org.opensearch.search.aggregations.support.ValuesSource; +import org.opensearch.search.aggregations.support.ValuesSourceConfig; +import org.opensearch.search.aggregations.support.ValuesSourceRegistry; +import org.opensearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.opensearch.search.aggregations.bucket.terms.MultiTermsAggregationBuilder.REGISTRY_KEY; + +/** + * Factory of {@link MultiTermsAggregator}. + */ +public class MultiTermsAggregationFactory extends AggregatorFactory { + + private final List> configs; + private final List formats; + /** + * Fields inherent from Terms Aggregation Factory. + */ + private final BucketOrder order; + private final Aggregator.SubAggCollectionMode collectMode; + private final TermsAggregator.BucketCountThresholds bucketCountThresholds; + private final boolean showTermDocCountError; + + public static void registerAggregators(ValuesSourceRegistry.Builder builder) { + builder.register( + REGISTRY_KEY, + org.opensearch.common.collect.List.of(CoreValuesSourceType.BYTES, CoreValuesSourceType.IP), + config -> { + final IncludeExclude.StringFilter filter = config.v2() == null + ? null + : config.v2().convertToStringFilter(config.v1().format()); + return MultiTermsAggregator.InternalValuesSourceFactory.bytesValuesSource(config.v1().getValuesSource(), filter); + }, + true + ); + + builder.register( + REGISTRY_KEY, + org.opensearch.common.collect.List.of(CoreValuesSourceType.NUMERIC, CoreValuesSourceType.BOOLEAN, CoreValuesSourceType.DATE), + config -> { + ValuesSourceConfig valuesSourceConfig = config.v1(); + IncludeExclude includeExclude = config.v2(); + ValuesSource.Numeric valuesSource = ((ValuesSource.Numeric) valuesSourceConfig.getValuesSource()); + IncludeExclude.LongFilter longFilter = null; + if (valuesSource.isFloatingPoint()) { + if (includeExclude != null) { + longFilter = includeExclude.convertToDoubleFilter(); + } + return MultiTermsAggregator.InternalValuesSourceFactory.doubleValueSource(valuesSource, longFilter); + } else { + if (includeExclude != null) { + longFilter = includeExclude.convertToLongFilter(valuesSourceConfig.format()); + } + return MultiTermsAggregator.InternalValuesSourceFactory.longValuesSource(valuesSource, longFilter); + } + }, + true + ); + + builder.registerUsage(MultiTermsAggregationBuilder.NAME); + } + + public MultiTermsAggregationFactory( + String name, + QueryShardContext queryShardContext, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, + Map metadata, + List multiTermConfigs, + BucketOrder order, + Aggregator.SubAggCollectionMode collectMode, + TermsAggregator.BucketCountThresholds bucketCountThresholds, + boolean showTermDocCountError + ) throws IOException { + super(name, queryShardContext, parent, subFactoriesBuilder, metadata); + this.configs = multiTermConfigs.stream() + .map( + c -> new Tuple( + ValuesSourceConfig.resolveUnregistered( + queryShardContext, + c.getUserValueTypeHint(), + c.getFieldName(), + c.getScript(), + c.getMissing(), + c.getTimeZone(), + c.getFormat(), + CoreValuesSourceType.BYTES + ), + c.getIncludeExclude() + ) + ) + .collect(Collectors.toList()); + this.formats = this.configs.stream().map(c -> c.v1().format()).collect(Collectors.toList()); + this.order = order; + this.collectMode = collectMode; + this.bucketCountThresholds = bucketCountThresholds; + this.showTermDocCountError = showTermDocCountError; + } + + @Override + protected Aggregator createInternal( + SearchContext searchContext, + Aggregator parent, + CardinalityUpperBound cardinality, + Map metadata + ) throws IOException { + TermsAggregator.BucketCountThresholds bucketCountThresholds = new TermsAggregator.BucketCountThresholds(this.bucketCountThresholds); + if (InternalOrder.isKeyOrder(order) == false + && bucketCountThresholds.getShardSize() == TermsAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS.getShardSize()) { + // The user has not made a shardSize selection. Use default + // heuristic to avoid any wrong-ranking caused by distributed + // counting + bucketCountThresholds.setShardSize(BucketUtils.suggestShardSideQueueSize(bucketCountThresholds.getRequiredSize())); + } + bucketCountThresholds.ensureValidity(); + return new MultiTermsAggregator( + name, + factories, + showTermDocCountError, + configs.stream() + .map(config -> queryShardContext.getValuesSourceRegistry().getAggregator(REGISTRY_KEY, config.v1()).build(config)) + .collect(Collectors.toList()), + configs.stream().map(c -> c.v1().format()).collect(Collectors.toList()), + order, + collectMode, + bucketCountThresholds, + searchContext, + parent, + cardinality, + metadata + ); + } + + public interface InternalValuesSourceSupplier { + MultiTermsAggregator.InternalValuesSource build(Tuple config); + } +} diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java new file mode 100644 index 0000000000000..36bf710f74398 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java @@ -0,0 +1,438 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket.terms; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefBuilder; +import org.apache.lucene.util.NumericUtils; +import org.apache.lucene.util.PriorityQueue; +import org.opensearch.ExceptionsHelper; +import org.opensearch.common.CheckedSupplier; +import org.opensearch.common.bytes.BytesArray; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.lease.Releasables; +import org.opensearch.index.fielddata.SortedBinaryDocValues; +import org.opensearch.index.fielddata.SortedNumericDoubleValues; +import org.opensearch.search.DocValueFormat; +import org.opensearch.search.aggregations.Aggregator; +import org.opensearch.search.aggregations.AggregatorFactories; +import org.opensearch.search.aggregations.BucketOrder; +import org.opensearch.search.aggregations.CardinalityUpperBound; +import org.opensearch.search.aggregations.InternalAggregation; +import org.opensearch.search.aggregations.InternalOrder; +import org.opensearch.search.aggregations.LeafBucketCollector; +import org.opensearch.search.aggregations.bucket.DeferableBucketAggregator; +import org.opensearch.search.aggregations.support.AggregationPath; +import org.opensearch.search.aggregations.support.ValuesSource; +import org.opensearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.opensearch.search.aggregations.InternalOrder.isKeyOrder; +import static org.opensearch.search.aggregations.bucket.terms.TermsAggregator.descendsFromNestedAggregator; + +/** + * An aggregator that aggregate with multi_terms. + */ +public class MultiTermsAggregator extends DeferableBucketAggregator { + + private final BytesKeyedBucketOrds bucketOrds; + private final MultiTermsValuesSource multiTermsValue; + private final boolean showTermDocCountError; + private final List formats; + private final TermsAggregator.BucketCountThresholds bucketCountThresholds; + private final BucketOrder order; + private final Comparator partiallyBuiltBucketComparator; + private final SubAggCollectionMode collectMode; + private final Set aggsUsedForSorting = new HashSet<>(); + + public MultiTermsAggregator( + String name, + AggregatorFactories factories, + boolean showTermDocCountError, + List internalValuesSources, + List formats, + BucketOrder order, + SubAggCollectionMode collectMode, + TermsAggregator.BucketCountThresholds bucketCountThresholds, + SearchContext context, + Aggregator parent, + CardinalityUpperBound cardinality, + Map metadata + ) throws IOException { + super(name, factories, context, parent, metadata); + this.bucketOrds = BytesKeyedBucketOrds.build(context.bigArrays(), cardinality); + this.multiTermsValue = new MultiTermsValuesSource(internalValuesSources); + this.showTermDocCountError = showTermDocCountError; + this.formats = formats; + this.bucketCountThresholds = bucketCountThresholds; + this.order = order; + this.partiallyBuiltBucketComparator = order == null ? null : order.partiallyBuiltBucketComparator(b -> b.bucketOrd, this); + // Todo, copy from TermsAggregator. need to remove duplicate code. + if (subAggsNeedScore() && descendsFromNestedAggregator(parent)) { + /** + * Force the execution to depth_first because we need to access the score of + * nested documents in a sub-aggregation and we are not able to generate this score + * while replaying deferred documents. + */ + this.collectMode = SubAggCollectionMode.DEPTH_FIRST; + } else { + this.collectMode = collectMode; + } + // Don't defer any child agg if we are dependent on it for pruning results + if (order instanceof InternalOrder.Aggregation) { + AggregationPath path = ((InternalOrder.Aggregation) order).path(); + aggsUsedForSorting.add(path.resolveTopmostAggregator(this)); + } else if (order instanceof InternalOrder.CompoundOrder) { + InternalOrder.CompoundOrder compoundOrder = (InternalOrder.CompoundOrder) order; + for (BucketOrder orderElement : compoundOrder.orderElements()) { + if (orderElement instanceof InternalOrder.Aggregation) { + AggregationPath path = ((InternalOrder.Aggregation) orderElement).path(); + aggsUsedForSorting.add(path.resolveTopmostAggregator(this)); + } + } + } + } + + @Override + public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + InternalMultiTerms.Bucket[][] topBucketsPerOrd = new InternalMultiTerms.Bucket[owningBucketOrds.length][]; + long[] otherDocCounts = new long[owningBucketOrds.length]; + for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { + collectZeroDocEntriesIfNeeded(owningBucketOrds[ordIdx]); + long bucketsInOrd = bucketOrds.bucketsInOrd(owningBucketOrds[ordIdx]); + + int size = (int) Math.min(bucketsInOrd, bucketCountThresholds.getShardSize()); + PriorityQueue ordered = new BucketPriorityQueue<>(size, partiallyBuiltBucketComparator); + InternalMultiTerms.Bucket spare = null; + BytesRef dest = null; + BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); + CheckedSupplier emptyBucketBuilder = () -> InternalMultiTerms.Bucket.EMPTY( + showTermDocCountError, + formats + ); + while (ordsEnum.next()) { + long docCount = bucketDocCount(ordsEnum.ord()); + otherDocCounts[ordIdx] += docCount; + if (docCount < bucketCountThresholds.getShardMinDocCount()) { + continue; + } + if (spare == null) { + spare = emptyBucketBuilder.get(); + dest = new BytesRef(); + } + + ordsEnum.readValue(dest); + + spare.termValues = decode(dest); + spare.docCount = docCount; + spare.bucketOrd = ordsEnum.ord(); + spare = ordered.insertWithOverflow(spare); + } + + // Get the top buckets + InternalMultiTerms.Bucket[] bucketsForOrd = new InternalMultiTerms.Bucket[ordered.size()]; + topBucketsPerOrd[ordIdx] = bucketsForOrd; + for (int b = ordered.size() - 1; b >= 0; --b) { + topBucketsPerOrd[ordIdx][b] = ordered.pop(); + otherDocCounts[ordIdx] -= topBucketsPerOrd[ordIdx][b].getDocCount(); + } + } + + buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs); + + InternalAggregation[] result = new InternalAggregation[owningBucketOrds.length]; + for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { + result[ordIdx] = buildResult(owningBucketOrds[ordIdx], otherDocCounts[ordIdx], topBucketsPerOrd[ordIdx]); + } + return result; + } + + InternalMultiTerms buildResult(long owningBucketOrd, long otherDocCount, InternalMultiTerms.Bucket[] topBuckets) { + BucketOrder reduceOrder; + if (isKeyOrder(order) == false) { + reduceOrder = InternalOrder.key(true); + Arrays.sort(topBuckets, reduceOrder.comparator()); + } else { + reduceOrder = order; + } + return new InternalMultiTerms( + name, + reduceOrder, + order, + bucketCountThresholds.getRequiredSize(), + bucketCountThresholds.getMinDocCount(), + metadata(), + bucketCountThresholds.getShardSize(), + showTermDocCountError, + otherDocCount, + 0, + formats, + org.opensearch.common.collect.List.of(topBuckets) + ); + } + + @Override + public InternalAggregation buildEmptyAggregation() { + return null; + } + + @Override + protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { + MultiTermsValuesSourceCollector collector = multiTermsValue.getValues(ctx); + return new LeafBucketCollector() { + @Override + public void collect(int doc, long owningBucketOrd) throws IOException { + for (List value : collector.apply(doc)) { + long bucketOrd = bucketOrds.add(owningBucketOrd, encode(value)); + if (bucketOrd < 0) { + bucketOrd = -1 - bucketOrd; + collectExistingBucket(sub, doc, bucketOrd); + } else { + collectBucket(sub, doc, bucketOrd); + } + } + } + }; + } + + @Override + protected void doClose() { + Releasables.close(bucketOrds); + } + + private static BytesRef encode(List values) { + try (BytesStreamOutput output = new BytesStreamOutput()) { + output.writeCollection(values, StreamOutput::writeGenericValue); + return output.bytes().toBytesRef(); + } catch (IOException e) { + throw ExceptionsHelper.convertToRuntime(e); + } + } + + private static List decode(BytesRef bytesRef) { + try (StreamInput input = new BytesArray(bytesRef).streamInput()) { + return input.readList(StreamInput::readGenericValue); + } catch (IOException e) { + throw ExceptionsHelper.convertToRuntime(e); + } + } + + private boolean subAggsNeedScore() { + for (Aggregator subAgg : subAggregators) { + if (subAgg.scoreMode().needsScores()) { + return true; + } + } + return false; + } + + @Override + protected boolean shouldDefer(Aggregator aggregator) { + return collectMode == Aggregator.SubAggCollectionMode.BREADTH_FIRST && !aggsUsedForSorting.contains(aggregator); + } + + private void collectZeroDocEntriesIfNeeded(long owningBucketOrd) throws IOException { + if (bucketCountThresholds.getMinDocCount() != 0) { + return; + } + if (InternalOrder.isCountDesc(order) && bucketOrds.bucketsInOrd(owningBucketOrd) >= bucketCountThresholds.getRequiredSize()) { + return; + } + // we need to fill-in the blanks + for (LeafReaderContext ctx : context.searcher().getTopReaderContext().leaves()) { + MultiTermsValuesSourceCollector collector = multiTermsValue.getValues(ctx); + // brute force + for (int docId = 0; docId < ctx.reader().maxDoc(); ++docId) { + for (List value : collector.apply(docId)) { + bucketOrds.add(owningBucketOrd, encode(value)); + } + } + } + } + + /** + * A multi_terms collector which collect values on each doc, + */ + @FunctionalInterface + interface MultiTermsValuesSourceCollector { + /** + * Collect a list values of multi_terms on each doc. + * Each terms could have multi_values, so the result is the cartesian product of each term's values. + */ + List> apply(int doc) throws IOException; + } + + @FunctionalInterface + interface InternalValuesSource { + /** + * Create {@link InternalValuesSourceCollector} from existing {@link LeafReaderContext}. + */ + InternalValuesSourceCollector apply(LeafReaderContext ctx) throws IOException; + } + + /** + * A terms collector which collect values on each doc, + */ + @FunctionalInterface + interface InternalValuesSourceCollector { + /** + * Collect a list values of a term on specific doc. + */ + List apply(int doc) throws IOException; + } + + /** + * Multi_Term ValuesSource, it is a collection of {@link InternalValuesSource} + */ + static class MultiTermsValuesSource { + private final List valuesSources; + + public MultiTermsValuesSource(List valuesSources) { + this.valuesSources = valuesSources; + } + + public MultiTermsValuesSourceCollector getValues(LeafReaderContext ctx) throws IOException { + List collectors = new ArrayList<>(); + for (InternalValuesSource valuesSource : valuesSources) { + collectors.add(valuesSource.apply(ctx)); + } + return new MultiTermsValuesSourceCollector() { + @Override + public List> apply(int doc) throws IOException { + List, IOException>> collectedValues = new ArrayList<>(); + for (InternalValuesSourceCollector collector : collectors) { + collectedValues.add(() -> collector.apply(doc)); + } + List> result = new ArrayList<>(); + apply(0, collectedValues, new ArrayList<>(), result); + return result; + } + + /** + * DFS traverse each term's values and add cartesian product to results lists. + */ + private void apply( + int index, + List, IOException>> collectedValues, + List current, + List> results + ) throws IOException { + if (index == collectedValues.size()) { + results.add(org.opensearch.common.collect.List.copyOf(current)); + } else if (null != collectedValues.get(index)) { + for (Object value : collectedValues.get(index).get()) { + current.add(value); + apply(index + 1, collectedValues, current, results); + current.remove(current.size() - 1); + } + } + } + }; + } + } + + /** + * Factory for construct {@link InternalValuesSource}. + */ + static class InternalValuesSourceFactory { + static InternalValuesSource bytesValuesSource(ValuesSource valuesSource, IncludeExclude.StringFilter includeExclude) { + return ctx -> { + SortedBinaryDocValues values = valuesSource.bytesValues(ctx); + return doc -> { + BytesRefBuilder previous = new BytesRefBuilder(); + + if (false == values.advanceExact(doc)) { + return Collections.emptyList(); + } + int valuesCount = values.docValueCount(); + List termValues = new ArrayList<>(valuesCount); + + // SortedBinaryDocValues don't guarantee uniqueness so we + // need to take care of dups + previous.clear(); + for (int i = 0; i < valuesCount; ++i) { + BytesRef bytes = values.nextValue(); + if (includeExclude != null && false == includeExclude.accept(bytes)) { + continue; + } + if (i > 0 && previous.get().equals(bytes)) { + continue; + } + previous.copyBytes(bytes); + termValues.add(BytesRef.deepCopyOf(bytes)); + } + return termValues; + }; + }; + } + + static InternalValuesSource longValuesSource(ValuesSource.Numeric valuesSource, IncludeExclude.LongFilter longFilter) { + return ctx -> { + SortedNumericDocValues values = valuesSource.longValues(ctx); + return doc -> { + if (values.advanceExact(doc)) { + int valuesCount = values.docValueCount(); + + long previous = Long.MAX_VALUE; + List termValues = new ArrayList<>(valuesCount); + for (int i = 0; i < valuesCount; ++i) { + long val = values.nextValue(); + if (previous != val || i == 0) { + if (longFilter == null || longFilter.accept(val)) { + termValues.add(val); + } + previous = val; + } + } + return termValues; + } + return Collections.emptyList(); + }; + }; + } + + static InternalValuesSource doubleValueSource(ValuesSource.Numeric valuesSource, IncludeExclude.LongFilter longFilter) { + return ctx -> { + SortedNumericDoubleValues values = valuesSource.doubleValues(ctx); + return doc -> { + if (values.advanceExact(doc)) { + int valuesCount = values.docValueCount(); + + double previous = Double.MAX_VALUE; + List termValues = new ArrayList<>(valuesCount); + for (int i = 0; i < valuesCount; ++i) { + double val = values.nextValue(); + if (previous != val || i == 0) { + if (longFilter == null || longFilter.accept(NumericUtils.doubleToSortableLong(val))) { + termValues.add(val); + } + previous = val; + } + } + return termValues; + } + return Collections.emptyList(); + }; + }; + } + } +} diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/ParsedMultiTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/ParsedMultiTerms.java new file mode 100644 index 0000000000000..8686d329fa3b2 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/ParsedMultiTerms.java @@ -0,0 +1,77 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket.terms; + +import org.opensearch.common.xcontent.ObjectParser; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; + +public class ParsedMultiTerms extends ParsedTerms { + @Override + public String getType() { + return MultiTermsAggregationBuilder.NAME; + } + + private static final ObjectParser PARSER = new ObjectParser<>( + ParsedMultiTerms.class.getSimpleName(), + true, + ParsedMultiTerms::new + ); + static { + declareParsedTermsFields(PARSER, ParsedBucket::fromXContent); + } + + public static ParsedMultiTerms fromXContent(XContentParser parser, String name) throws IOException { + ParsedMultiTerms aggregation = PARSER.parse(parser, null); + aggregation.setName(name); + return aggregation; + } + + public static class ParsedBucket extends ParsedTerms.ParsedBucket { + + private List key; + + @Override + public List getKey() { + return key; + } + + @Override + public String getKeyAsString() { + String keyAsString = super.getKeyAsString(); + if (keyAsString != null) { + return keyAsString; + } + if (key != null) { + return key.toString(); + } + return null; + } + + public Number getKeyAsNumber() { + throw new UnsupportedOperationException("not implemented"); + } + + @Override + protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException { + builder.field(CommonFields.KEY.getPreferredName(), key); + if (super.getKeyAsString() != null) { + builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), getKeyAsString()); + } + return builder; + } + + static ParsedBucket fromXContent(XContentParser parser) throws IOException { + return parseTermsBucketXContent(parser, ParsedBucket::new, (p, bucket) -> { bucket.key = p.list(); }); + } + } +} diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/ParsedTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/ParsedTerms.java index ce5f56c898fa6..054ea7d827053 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/ParsedTerms.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/ParsedTerms.java @@ -139,13 +139,16 @@ static B parseTermsBucketXContent( XContentParser.Token token; String currentFieldName = parser.currentName(); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + // field value could be list, e.g. multi_terms aggregation. + if ((token.isValue() || token == XContentParser.Token.START_ARRAY) + && CommonFields.KEY.getPreferredName().equals(currentFieldName)) { + keyConsumer.accept(parser, bucket); + } if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { if (CommonFields.KEY_AS_STRING.getPreferredName().equals(currentFieldName)) { bucket.setKeyAsString(parser.text()); - } else if (CommonFields.KEY.getPreferredName().equals(currentFieldName)) { - keyConsumer.accept(parser, bucket); } else if (CommonFields.DOC_COUNT.getPreferredName().equals(currentFieldName)) { bucket.setDocCount(parser.longValue()); } else if (DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME.getPreferredName().equals(currentFieldName)) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/BaseMultiValuesSourceFieldConfig.java b/server/src/main/java/org/opensearch/search/aggregations/support/BaseMultiValuesSourceFieldConfig.java new file mode 100644 index 0000000000000..c75ab861439d3 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/support/BaseMultiValuesSourceFieldConfig.java @@ -0,0 +1,216 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.support; + +import org.opensearch.LegacyESVersion; +import org.opensearch.common.ParseField; +import org.opensearch.common.Strings; +import org.opensearch.common.TriConsumer; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.time.DateUtils; +import org.opensearch.common.xcontent.ObjectParser; +import org.opensearch.common.xcontent.ToXContentObject; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.script.Script; + +import java.io.IOException; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.util.Objects; + +/** + * A configuration that tells aggregation how to retrieve data from index + * in order to run a specific aggregation. + */ +public abstract class BaseMultiValuesSourceFieldConfig implements Writeable, ToXContentObject { + private final String fieldName; + private final Object missing; + private final Script script; + private final ZoneId timeZone; + + static TriConsumer< + ObjectParser>, Void>, + Boolean, + Boolean> PARSER = (parser, scriptable, timezoneAware) -> { + parser.declareString(Builder::setFieldName, ParseField.CommonFields.FIELD); + parser.declareField( + Builder::setMissing, + XContentParser::objectText, + ParseField.CommonFields.MISSING, + ObjectParser.ValueType.VALUE + ); + + if (scriptable) { + parser.declareField( + Builder::setScript, + (p, context) -> Script.parse(p), + Script.SCRIPT_PARSE_FIELD, + ObjectParser.ValueType.OBJECT_OR_STRING + ); + } + + if (timezoneAware) { + parser.declareField(Builder::setTimeZone, p -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return ZoneId.of(p.text()); + } else { + return ZoneOffset.ofHours(p.intValue()); + } + }, ParseField.CommonFields.TIME_ZONE, ObjectParser.ValueType.LONG); + } + }; + + public BaseMultiValuesSourceFieldConfig(String fieldName, Object missing, Script script, ZoneId timeZone) { + this.fieldName = fieldName; + this.missing = missing; + this.script = script; + this.timeZone = timeZone; + } + + public BaseMultiValuesSourceFieldConfig(StreamInput in) throws IOException { + if (in.getVersion().onOrAfter(LegacyESVersion.V_7_6_0)) { + this.fieldName = in.readOptionalString(); + } else { + this.fieldName = in.readString(); + } + this.missing = in.readGenericValue(); + this.script = in.readOptionalWriteable(Script::new); + if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { + this.timeZone = DateUtils.dateTimeZoneToZoneId(in.readOptionalTimeZone()); + } else { + this.timeZone = in.readOptionalZoneId(); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + if (out.getVersion().onOrAfter(LegacyESVersion.V_7_6_0)) { + out.writeOptionalString(fieldName); + } else { + out.writeString(fieldName); + } + out.writeGenericValue(missing); + out.writeOptionalWriteable(script); + if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { + out.writeOptionalTimeZone(DateUtils.zoneIdToDateTimeZone(timeZone)); + } else { + out.writeOptionalZoneId(timeZone); + } + doWriteTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (missing != null) { + builder.field(ParseField.CommonFields.MISSING.getPreferredName(), missing); + } + if (script != null) { + builder.field(Script.SCRIPT_PARSE_FIELD.getPreferredName(), script); + } + if (fieldName != null) { + builder.field(ParseField.CommonFields.FIELD.getPreferredName(), fieldName); + } + if (timeZone != null) { + builder.field(ParseField.CommonFields.TIME_ZONE.getPreferredName(), timeZone.getId()); + } + doXContentBody(builder, params); + builder.endObject(); + return builder; + } + + public Object getMissing() { + return missing; + } + + public Script getScript() { + return script; + } + + public ZoneId getTimeZone() { + return timeZone; + } + + public String getFieldName() { + return fieldName; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + BaseMultiValuesSourceFieldConfig that = (BaseMultiValuesSourceFieldConfig) o; + return Objects.equals(fieldName, that.fieldName) + && Objects.equals(missing, that.missing) + && Objects.equals(script, that.script) + && Objects.equals(timeZone, that.timeZone); + } + + @Override + public int hashCode() { + return Objects.hash(fieldName, missing, script, timeZone); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + abstract void doXContentBody(XContentBuilder builder, Params params) throws IOException; + + abstract void doWriteTo(StreamOutput out) throws IOException; + + public abstract static class Builder> { + String fieldName; + Object missing = null; + Script script = null; + ZoneId timeZone = null; + + public String getFieldName() { + return fieldName; + } + + public B setFieldName(String fieldName) { + this.fieldName = fieldName; + return (B) this; + } + + public Object getMissing() { + return missing; + } + + public B setMissing(Object missing) { + this.missing = missing; + return (B) this; + } + + public Script getScript() { + return script; + } + + public B setScript(Script script) { + this.script = script; + return (B) this; + } + + public ZoneId getTimeZone() { + return timeZone; + } + + public B setTimeZone(ZoneId timeZone) { + this.timeZone = timeZone; + return (B) this; + } + + abstract public C build(); + } +} diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/MultiTermsValuesSourceConfig.java b/server/src/main/java/org/opensearch/search/aggregations/support/MultiTermsValuesSourceConfig.java new file mode 100644 index 0000000000000..3bc7f444c610d --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/support/MultiTermsValuesSourceConfig.java @@ -0,0 +1,203 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.support; + +import org.opensearch.common.ParseField; +import org.opensearch.common.Strings; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.ObjectParser; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.script.Script; +import org.opensearch.search.aggregations.AggregationBuilder; +import org.opensearch.search.aggregations.bucket.terms.IncludeExclude; + +import java.io.IOException; +import java.time.ZoneId; +import java.util.Objects; + +/** + * A configuration that used by multi_terms aggregations. + */ +public class MultiTermsValuesSourceConfig extends BaseMultiValuesSourceFieldConfig { + private final ValueType userValueTypeHint; + private final String format; + private final IncludeExclude includeExclude; + + private static final String NAME = "field_config"; + public static final ParseField FILTER = new ParseField("filter"); + + public interface ParserSupplier { + ObjectParser apply( + Boolean scriptable, + Boolean timezoneAware, + Boolean valueTypeHinted, + Boolean formatted + ); + } + + public static final MultiTermsValuesSourceConfig.ParserSupplier PARSER = (scriptable, timezoneAware, valueTypeHinted, formatted) -> { + + ObjectParser parser = new ObjectParser<>( + MultiTermsValuesSourceConfig.NAME, + MultiTermsValuesSourceConfig.Builder::new + ); + + BaseMultiValuesSourceFieldConfig.PARSER.apply(parser, scriptable, timezoneAware); + + if (valueTypeHinted) { + parser.declareField( + MultiTermsValuesSourceConfig.Builder::setUserValueTypeHint, + p -> ValueType.lenientParse(p.text()), + ValueType.VALUE_TYPE, + ObjectParser.ValueType.STRING + ); + } + + if (formatted) { + parser.declareField( + MultiTermsValuesSourceConfig.Builder::setFormat, + XContentParser::text, + ParseField.CommonFields.FORMAT, + ObjectParser.ValueType.STRING + ); + } + + parser.declareField( + (b, v) -> b.setIncludeExclude(IncludeExclude.merge(b.getIncludeExclude(), v)), + IncludeExclude::parseExclude, + IncludeExclude.EXCLUDE_FIELD, + ObjectParser.ValueType.STRING_ARRAY + ); + + return parser; + }; + + protected MultiTermsValuesSourceConfig( + String fieldName, + Object missing, + Script script, + ZoneId timeZone, + ValueType userValueTypeHint, + String format, + IncludeExclude includeExclude + ) { + super(fieldName, missing, script, timeZone); + this.userValueTypeHint = userValueTypeHint; + this.format = format; + this.includeExclude = includeExclude; + } + + public MultiTermsValuesSourceConfig(StreamInput in) throws IOException { + super(in); + this.userValueTypeHint = in.readOptionalWriteable(ValueType::readFromStream); + this.format = in.readOptionalString(); + this.includeExclude = in.readOptionalWriteable(IncludeExclude::new); + } + + public ValueType getUserValueTypeHint() { + return userValueTypeHint; + } + + public String getFormat() { + return format; + } + + /** + * Get terms to include and exclude from the aggregation results + */ + public IncludeExclude getIncludeExclude() { + return includeExclude; + } + + @Override + public void doWriteTo(StreamOutput out) throws IOException { + out.writeOptionalWriteable(userValueTypeHint); + out.writeOptionalString(format); + out.writeOptionalWriteable(includeExclude); + } + + @Override + public void doXContentBody(XContentBuilder builder, Params params) throws IOException { + if (userValueTypeHint != null) { + builder.field(AggregationBuilder.CommonFields.VALUE_TYPE.getPreferredName(), userValueTypeHint.getPreferredName()); + } + if (format != null) { + builder.field(AggregationBuilder.CommonFields.FORMAT.getPreferredName(), format); + } + if (includeExclude != null) { + includeExclude.toXContent(builder, params); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (super.equals(o) == false) return false; + + MultiTermsValuesSourceConfig that = (MultiTermsValuesSourceConfig) o; + return Objects.equals(userValueTypeHint, that.userValueTypeHint) + && Objects.equals(format, that.format) + && Objects.equals(includeExclude, that.includeExclude); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), userValueTypeHint, format, includeExclude); + } + + public static class Builder extends BaseMultiValuesSourceFieldConfig.Builder { + private ValueType userValueTypeHint = null; + private String format; + private IncludeExclude includeExclude = null; + + public IncludeExclude getIncludeExclude() { + return includeExclude; + } + + public Builder setIncludeExclude(IncludeExclude includeExclude) { + this.includeExclude = includeExclude; + return this; + } + + public ValueType getUserValueTypeHint() { + return userValueTypeHint; + } + + public Builder setUserValueTypeHint(ValueType userValueTypeHint) { + this.userValueTypeHint = userValueTypeHint; + return this; + } + + public String getFormat() { + return format; + } + + public Builder setFormat(String format) { + this.format = format; + return this; + } + + public MultiTermsValuesSourceConfig build() { + if (Strings.isNullOrEmpty(fieldName) && script == null) { + throw new IllegalArgumentException( + "[" + + ParseField.CommonFields.FIELD.getPreferredName() + + "] and [" + + Script.SCRIPT_PARSE_FIELD.getPreferredName() + + "] cannot both be null. " + + "Please specify one or the other." + ); + } + return new MultiTermsValuesSourceConfig(fieldName, missing, script, timeZone, userValueTypeHint, format, includeExclude); + } + } +} diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceFieldConfig.java b/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceFieldConfig.java index 54450763148c8..ea9bbe8019276 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceFieldConfig.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceFieldConfig.java @@ -38,26 +38,17 @@ import org.opensearch.common.TriFunction; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.common.io.stream.Writeable; -import org.opensearch.common.time.DateUtils; import org.opensearch.common.xcontent.ObjectParser; -import org.opensearch.common.xcontent.ToXContentObject; import org.opensearch.common.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentParser; import org.opensearch.index.query.AbstractQueryBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.script.Script; import java.io.IOException; import java.time.ZoneId; -import java.time.ZoneOffset; import java.util.Objects; -public class MultiValuesSourceFieldConfig implements Writeable, ToXContentObject { - private final String fieldName; - private final Object missing; - private final Script script; - private final ZoneId timeZone; +public class MultiValuesSourceFieldConfig extends BaseMultiValuesSourceFieldConfig { private final QueryBuilder filter; private static final String NAME = "field_config"; @@ -73,32 +64,7 @@ public class MultiValuesSourceFieldConfig implements Writeable, ToXContentObject MultiValuesSourceFieldConfig.Builder::new ); - parser.declareString(MultiValuesSourceFieldConfig.Builder::setFieldName, ParseField.CommonFields.FIELD); - parser.declareField( - MultiValuesSourceFieldConfig.Builder::setMissing, - XContentParser::objectText, - ParseField.CommonFields.MISSING, - ObjectParser.ValueType.VALUE - ); - - if (scriptable) { - parser.declareField( - MultiValuesSourceFieldConfig.Builder::setScript, - (p, context) -> Script.parse(p), - Script.SCRIPT_PARSE_FIELD, - ObjectParser.ValueType.OBJECT_OR_STRING - ); - } - - if (timezoneAware) { - parser.declareField(MultiValuesSourceFieldConfig.Builder::setTimeZone, p -> { - if (p.currentToken() == XContentParser.Token.VALUE_STRING) { - return ZoneId.of(p.text()); - } else { - return ZoneOffset.ofHours(p.intValue()); - } - }, ParseField.CommonFields.TIME_ZONE, ObjectParser.ValueType.LONG); - } + BaseMultiValuesSourceFieldConfig.PARSER.apply(parser, scriptable, timezoneAware); if (filtered) { parser.declareField( @@ -112,26 +78,12 @@ public class MultiValuesSourceFieldConfig implements Writeable, ToXContentObject }; protected MultiValuesSourceFieldConfig(String fieldName, Object missing, Script script, ZoneId timeZone, QueryBuilder filter) { - this.fieldName = fieldName; - this.missing = missing; - this.script = script; - this.timeZone = timeZone; + super(fieldName, missing, script, timeZone); this.filter = filter; } public MultiValuesSourceFieldConfig(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_6_0)) { - this.fieldName = in.readOptionalString(); - } else { - this.fieldName = in.readString(); - } - this.missing = in.readGenericValue(); - this.script = in.readOptionalWriteable(Script::new); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - this.timeZone = DateUtils.dateTimeZoneToZoneId(in.readOptionalTimeZone()); - } else { - this.timeZone = in.readOptionalZoneId(); - } + super(in); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { this.filter = in.readOptionalNamedWriteable(QueryBuilder.class); } else { @@ -139,133 +91,43 @@ public MultiValuesSourceFieldConfig(StreamInput in) throws IOException { } } - public Object getMissing() { - return missing; - } - - public Script getScript() { - return script; - } - - public ZoneId getTimeZone() { - return timeZone; - } - - public String getFieldName() { - return fieldName; - } - public QueryBuilder getFilter() { return filter; } @Override - public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_6_0)) { - out.writeOptionalString(fieldName); - } else { - out.writeString(fieldName); - } - out.writeGenericValue(missing); - out.writeOptionalWriteable(script); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalTimeZone(DateUtils.zoneIdToDateTimeZone(timeZone)); - } else { - out.writeOptionalZoneId(timeZone); - } + public void doWriteTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) { out.writeOptionalNamedWriteable(filter); } } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - if (missing != null) { - builder.field(ParseField.CommonFields.MISSING.getPreferredName(), missing); - } - if (script != null) { - builder.field(Script.SCRIPT_PARSE_FIELD.getPreferredName(), script); - } - if (fieldName != null) { - builder.field(ParseField.CommonFields.FIELD.getPreferredName(), fieldName); - } - if (timeZone != null) { - builder.field(ParseField.CommonFields.TIME_ZONE.getPreferredName(), timeZone.getId()); - } + public void doXContentBody(XContentBuilder builder, Params params) throws IOException { if (filter != null) { builder.field(FILTER.getPreferredName()); filter.toXContent(builder, params); } - builder.endObject(); - return builder; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; + if (super.equals(o) == false) return false; + MultiValuesSourceFieldConfig that = (MultiValuesSourceFieldConfig) o; - return Objects.equals(fieldName, that.fieldName) - && Objects.equals(missing, that.missing) - && Objects.equals(script, that.script) - && Objects.equals(timeZone, that.timeZone) - && Objects.equals(filter, that.filter); + return Objects.equals(filter, that.filter); } @Override public int hashCode() { - return Objects.hash(fieldName, missing, script, timeZone, filter); + return Objects.hash(super.hashCode(), filter); } - @Override - public String toString() { - return Strings.toString(this); - } - - public static class Builder { - private String fieldName; - private Object missing = null; - private Script script = null; - private ZoneId timeZone = null; + public static class Builder extends BaseMultiValuesSourceFieldConfig.Builder { private QueryBuilder filter = null; - public String getFieldName() { - return fieldName; - } - - public Builder setFieldName(String fieldName) { - this.fieldName = fieldName; - return this; - } - - public Object getMissing() { - return missing; - } - - public Builder setMissing(Object missing) { - this.missing = missing; - return this; - } - - public Script getScript() { - return script; - } - - public Builder setScript(Script script) { - this.script = script; - return this; - } - - public ZoneId getTimeZone() { - return timeZone; - } - - public Builder setTimeZone(ZoneId timeZone) { - this.timeZone = timeZone; - return this; - } - public Builder setFilter(QueryBuilder filter) { this.filter = filter; return this; diff --git a/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java b/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java index fe029d22a45b2..421865013a28c 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java @@ -64,6 +64,7 @@ import org.opensearch.search.aggregations.bucket.range.InternalRangeTests; import org.opensearch.search.aggregations.bucket.sampler.InternalSamplerTests; import org.opensearch.search.aggregations.bucket.terms.DoubleTermsTests; +import org.opensearch.search.aggregations.bucket.terms.InternalMultiTermsTests; import org.opensearch.search.aggregations.bucket.terms.LongRareTermsTests; import org.opensearch.search.aggregations.bucket.terms.LongTermsTests; import org.opensearch.search.aggregations.bucket.terms.SignificantLongTermsTests; @@ -172,6 +173,7 @@ private static List> getAggsTests() { aggsTests.add(new InternalTopHitsTests()); aggsTests.add(new InternalCompositeTests()); aggsTests.add(new InternalMedianAbsoluteDeviationTests()); + aggsTests.add(new InternalMultiTermsTests()); return Collections.unmodifiableList(aggsTests); } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTermsTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTermsTests.java new file mode 100644 index 0000000000000..2657f2bdd5138 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTermsTests.java @@ -0,0 +1,116 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket.terms; + +import org.apache.lucene.document.InetAddressPoint; +import org.apache.lucene.util.BytesRef; +import org.opensearch.search.DocValueFormat; +import org.opensearch.search.aggregations.BucketOrder; +import org.opensearch.search.aggregations.InternalAggregations; +import org.opensearch.search.aggregations.ParsedMultiBucketAggregation; +import org.opensearch.search.aggregations.support.CoreValuesSourceType; +import org.opensearch.search.aggregations.support.ValuesSourceType; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static java.util.Arrays.asList; + +public class InternalMultiTermsTests extends InternalTermsTestCase { + + /** + * terms count and type should consistent across entire test. + */ + private final List types = getSupportedValuesSourceTypes(); + + @Override + protected InternalTerms createTestInstance( + String name, + Map metadata, + InternalAggregations aggregations, + boolean showTermDocCountError, + long docCountError + ) { + BucketOrder order = BucketOrder.count(false); + long minDocCount = 1; + int requiredSize = 3; + int shardSize = requiredSize + 2; + long otherDocCount = 0; + + final int numBuckets = randomNumberOfBuckets(); + + List buckets = new ArrayList<>(); + List formats = types.stream().map(type -> type.getFormatter(null, null)).collect(Collectors.toList()); + + for (int i = 0; i < numBuckets; i++) { + buckets.add( + new InternalMultiTerms.Bucket( + types.stream().map(this::value).collect(Collectors.toList()), + minDocCount, + aggregations, + showTermDocCountError, + docCountError, + formats + ) + ); + } + BucketOrder reduceOrder = rarely() ? order : BucketOrder.key(true); + // mimic per-shard bucket sort operation, which is required by bucket reduce phase. + Collections.sort(buckets, reduceOrder.comparator()); + return new InternalMultiTerms( + name, + reduceOrder, + order, + requiredSize, + minDocCount, + metadata, + shardSize, + showTermDocCountError, + otherDocCount, + docCountError, + formats, + buckets + ); + } + + @Override + protected Class implementationClass() { + return ParsedMultiTerms.class; + } + + private static List getSupportedValuesSourceTypes() { + return Collections.unmodifiableList( + asList( + CoreValuesSourceType.NUMERIC, + CoreValuesSourceType.BYTES, + CoreValuesSourceType.IP, + CoreValuesSourceType.DATE, + CoreValuesSourceType.BOOLEAN + ) + ); + } + + private Object value(ValuesSourceType type) { + if (CoreValuesSourceType.NUMERIC.equals(type)) { + return randomInt(); + } else if (CoreValuesSourceType.DATE.equals(type)) { + return randomNonNegativeLong(); + } else if (CoreValuesSourceType.BOOLEAN.equals(type)) { + return randomBoolean(); + } else if (CoreValuesSourceType.BYTES.equals(type)) { + return new BytesRef(randomAlphaOfLength(10)); + } else if (CoreValuesSourceType.IP.equals(type)) { + return new BytesRef(InetAddressPoint.encode(randomIp(randomBoolean()))); + } + throw new IllegalArgumentException("unexpected type [" + type.typeName() + "]"); + } +} diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationBuilderTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationBuilderTests.java new file mode 100644 index 0000000000000..505fb7382ab3b --- /dev/null +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationBuilderTests.java @@ -0,0 +1,182 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket.terms; + +import org.opensearch.search.DocValueFormat; +import org.opensearch.search.aggregations.Aggregator; +import org.opensearch.search.aggregations.BaseAggregationTestCase; +import org.opensearch.search.aggregations.BucketOrder; +import org.opensearch.search.aggregations.support.MultiTermsValuesSourceConfig; +import org.opensearch.search.aggregations.support.ValueType; + +import java.time.ZoneId; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.function.Supplier; + +public class MultiTermsAggregationBuilderTests extends BaseAggregationTestCase { + + @Override + protected MultiTermsAggregationBuilder createTestAggregatorBuilder() { + String name = randomAlphaOfLengthBetween(3, 20); + MultiTermsAggregationBuilder factory = new MultiTermsAggregationBuilder(name); + + int termsCount = randomIntBetween(2, 10); + List fieldConfigs = new ArrayList<>(); + for (int i = 0; i < termsCount; i++) { + fieldConfigs.add(randomFieldConfig()); + } + factory.terms(fieldConfigs); + + if (randomBoolean()) { + factory.size(randomIntBetween(1, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + factory.shardSize(randomIntBetween(1, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + int minDocCount = randomInt(4); + switch (minDocCount) { + case 0: + break; + case 1: + case 2: + case 3: + case 4: + minDocCount = randomIntBetween(0, Integer.MAX_VALUE); + break; + default: + fail(); + } + factory.minDocCount(minDocCount); + } + if (randomBoolean()) { + int shardMinDocCount = randomInt(4); + switch (shardMinDocCount) { + case 0: + break; + case 1: + case 2: + case 3: + case 4: + shardMinDocCount = randomIntBetween(0, Integer.MAX_VALUE); + break; + default: + fail(); + } + factory.shardMinDocCount(shardMinDocCount); + } + if (randomBoolean()) { + factory.collectMode(randomFrom(Aggregator.SubAggCollectionMode.values())); + } + if (randomBoolean()) { + List order = randomOrder(); + if (order.size() == 1 && randomBoolean()) { + factory.order(order.get(0)); + } else { + factory.order(order); + } + } + if (randomBoolean()) { + factory.showTermDocCountError(randomBoolean()); + } + return factory; + } + + public void testInvalidTermsParams() { + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> { new MultiTermsAggregationBuilder("_name").terms(Collections.singletonList(randomFieldConfig())); } + ); + assertEquals( + "multi term aggregation must has at least 2 terms. Found [1] in [_name] Use terms aggregation for single term aggregation", + exception.getMessage() + ); + + exception = expectThrows( + IllegalArgumentException.class, + () -> { new MultiTermsAggregationBuilder("_name").terms(Collections.emptyList()); } + ); + assertEquals("multi term aggregation must has at least 2 terms. Found [0] in [_name]", exception.getMessage()); + + exception = expectThrows(IllegalArgumentException.class, () -> { new MultiTermsAggregationBuilder("_name").terms(null); }); + assertEquals("[terms] must not be null. Found null terms in [_name]", exception.getMessage()); + } + + private List randomOrder() { + List orders = new ArrayList<>(); + switch (randomInt(4)) { + case 0: + orders.add(BucketOrder.key(randomBoolean())); + break; + case 1: + orders.add(BucketOrder.count(randomBoolean())); + break; + case 2: + orders.add(BucketOrder.aggregation(randomAlphaOfLengthBetween(3, 20), randomBoolean())); + break; + case 3: + orders.add(BucketOrder.aggregation(randomAlphaOfLengthBetween(3, 20), randomAlphaOfLengthBetween(3, 20), randomBoolean())); + break; + case 4: + int numOrders = randomIntBetween(1, 3); + for (int i = 0; i < numOrders; i++) { + orders.addAll(randomOrder()); + } + break; + default: + fail(); + } + return orders; + } + + protected static MultiTermsValuesSourceConfig randomFieldConfig() { + String field = randomAlphaOfLength(10); + Object missing = randomBoolean() ? randomAlphaOfLength(10) : null; + ZoneId timeZone = randomBoolean() ? randomZone() : null; + ValueType userValueTypeHint = randomBoolean() + ? randomFrom(ValueType.STRING, ValueType.LONG, ValueType.DOUBLE, ValueType.DATE, ValueType.IP) + : null; + String format = randomBoolean() ? randomNumericDocValueFormat().toString() : null; + return randomFieldOrScript( + new MultiTermsValuesSourceConfig.Builder().setMissing(missing) + .setTimeZone(timeZone) + .setUserValueTypeHint(userValueTypeHint) + .setFormat(format), + field + ).build(); + } + + protected static MultiTermsValuesSourceConfig.Builder randomFieldOrScript(MultiTermsValuesSourceConfig.Builder builder, String field) { + int choice = randomInt(1); + switch (choice) { + case 0: + builder.setFieldName(field); + break; + case 1: + builder.setScript(mockScript("doc[" + field + "] + 1")); + break; + default: + throw new AssertionError("Unknown random operation [" + choice + "]"); + } + return builder; + } + + /** + * @return a random {@link DocValueFormat} that can be used in aggregations which + * compute numbers. + */ + protected static DocValueFormat randomNumericDocValueFormat() { + final List> formats = new ArrayList<>(3); + formats.add(() -> DocValueFormat.RAW); + formats.add(() -> new DocValueFormat.Decimal(randomFrom("###.##", "###,###.##"))); + return randomFrom(formats).get(); + } +} diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregatorTests.java new file mode 100644 index 0000000000000..f3922a65ff264 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregatorTests.java @@ -0,0 +1,909 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket.terms; + +import org.apache.lucene.document.DoubleDocValuesField; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FloatDocValuesField; +import org.apache.lucene.document.InetAddressPoint; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.SortedDocValuesField; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.document.SortedSetDocValuesField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.util.BytesRef; +import org.hamcrest.MatcherAssert; +import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.network.InetAddresses; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.time.DateFormatter; +import org.opensearch.index.mapper.BooleanFieldMapper; +import org.opensearch.index.mapper.DateFieldMapper; +import org.opensearch.index.mapper.GeoPointFieldMapper; +import org.opensearch.index.mapper.IpFieldMapper; +import org.opensearch.index.mapper.KeywordFieldMapper; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.script.MockScriptEngine; +import org.opensearch.script.Script; +import org.opensearch.script.ScriptEngine; +import org.opensearch.script.ScriptModule; +import org.opensearch.script.ScriptService; +import org.opensearch.script.ScriptType; +import org.opensearch.search.aggregations.AggregationBuilder; +import org.opensearch.search.aggregations.AggregatorTestCase; +import org.opensearch.search.aggregations.BucketOrder; +import org.opensearch.search.aggregations.metrics.InternalMax; +import org.opensearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.opensearch.search.aggregations.support.CoreValuesSourceType; +import org.opensearch.search.aggregations.support.MultiTermsValuesSourceConfig; +import org.opensearch.search.aggregations.support.ValueType; +import org.opensearch.search.aggregations.support.ValuesSourceType; +import org.opensearch.search.lookup.LeafDocLookup; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; +import java.util.function.Function; + +import static java.util.Arrays.asList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static java.util.stream.Collectors.toList; +import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class MultiTermsAggregatorTests extends AggregatorTestCase { + private static final String FIELD_NAME = "field"; + private static final String VALUE_SCRIPT_NAME = "value_script"; + private static final String FIELD_SCRIPT_NAME = "field_script"; + + private static final String AGG_NAME = "_name"; + + private static final String INT_FIELD = "int"; + private static final String LONG_FIELD = "long"; + private static final String FLOAT_FIELD = "float"; + private static final String DOUBLE_FIELD = "double"; + private static final String KEYWORD_FIELD = "keyword"; + private static final String DATE_FIELD = "date"; + private static final String IP_FIELD = "ip"; + private static final String GEO_POINT_FIELD = "geopoint"; + private static final String BOOL_FIELD = "bool"; + private static final String UNRELATED_KEYWORD_FIELD = "unrelated"; + + private static final Map mappedFieldTypeMap = new HashMap() { + { + put(INT_FIELD, new NumberFieldMapper.NumberFieldType(INT_FIELD, NumberFieldMapper.NumberType.INTEGER)); + put(LONG_FIELD, new NumberFieldMapper.NumberFieldType(LONG_FIELD, NumberFieldMapper.NumberType.LONG)); + put(FLOAT_FIELD, new NumberFieldMapper.NumberFieldType(FLOAT_FIELD, NumberFieldMapper.NumberType.FLOAT)); + put(DOUBLE_FIELD, new NumberFieldMapper.NumberFieldType(DOUBLE_FIELD, NumberFieldMapper.NumberType.DOUBLE)); + put(DATE_FIELD, dateFieldType(DATE_FIELD)); + put(KEYWORD_FIELD, new KeywordFieldMapper.KeywordFieldType(KEYWORD_FIELD)); + put(IP_FIELD, new IpFieldMapper.IpFieldType(IP_FIELD)); + put(FIELD_NAME, new NumberFieldMapper.NumberFieldType(FIELD_NAME, NumberFieldMapper.NumberType.INTEGER)); + put(UNRELATED_KEYWORD_FIELD, new KeywordFieldMapper.KeywordFieldType(UNRELATED_KEYWORD_FIELD)); + put(GEO_POINT_FIELD, new GeoPointFieldMapper.GeoPointFieldType(GEO_POINT_FIELD)); + put(BOOL_FIELD, new BooleanFieldMapper.BooleanFieldType(BOOL_FIELD)); + } + }; + + private static final Consumer NONE_DECORATOR = null; + + @Override + protected List getSupportedValuesSourceTypes() { + return Collections.unmodifiableList( + asList( + CoreValuesSourceType.NUMERIC, + CoreValuesSourceType.BYTES, + CoreValuesSourceType.IP, + CoreValuesSourceType.DATE, + CoreValuesSourceType.BOOLEAN + ) + ); + } + + @Override + protected AggregationBuilder createAggBuilderForTypeTest(MappedFieldType fieldType, String fieldName) { + return createTestAggregatorBuilder(asList(term(fieldName), term(fieldName))); + } + + @Override + protected ScriptService getMockScriptService() { + final Map, Object>> scripts = org.opensearch.common.collect.Map.of( + VALUE_SCRIPT_NAME, + vars -> ((Number) vars.get("_value")).doubleValue() + 1, + FIELD_SCRIPT_NAME, + vars -> { + final String fieldName = (String) vars.get(FIELD_NAME); + final LeafDocLookup lookup = (LeafDocLookup) vars.get("doc"); + return lookup.get(fieldName).stream().map(value -> ((Number) value).longValue() + 1).collect(toList()); + } + ); + final MockScriptEngine engine = new MockScriptEngine(MockScriptEngine.NAME, scripts, emptyMap()); + final Map engines = singletonMap(engine.getType(), engine); + return new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS); + } + + public void testNumbers() throws IOException { + testAggregation( + new MatchAllDocsQuery(), + fieldConfigs(asList(INT_FIELD, LONG_FIELD, FLOAT_FIELD, DOUBLE_FIELD)), + NONE_DECORATOR, + iw -> { + iw.addDocument( + asList( + new NumericDocValuesField(INT_FIELD, 1), + new SortedNumericDocValuesField(LONG_FIELD, 1L), + new FloatDocValuesField(FLOAT_FIELD, 1.0f), + new DoubleDocValuesField(DOUBLE_FIELD, 1.0d) + ) + ); + iw.addDocument( + asList( + new NumericDocValuesField(INT_FIELD, 1), + new SortedNumericDocValuesField(LONG_FIELD, 1L), + new FloatDocValuesField(FLOAT_FIELD, 1.0f), + new DoubleDocValuesField(DOUBLE_FIELD, 1.0d) + ) + ); + iw.addDocument( + asList( + new NumericDocValuesField(INT_FIELD, 2), + new SortedNumericDocValuesField(LONG_FIELD, 2L), + new FloatDocValuesField(FLOAT_FIELD, 2.0f), + new DoubleDocValuesField(DOUBLE_FIELD, 2.0d) + ) + ); + iw.addDocument( + asList( + new NumericDocValuesField(INT_FIELD, 2), + new SortedNumericDocValuesField(LONG_FIELD, 2L), + new FloatDocValuesField(FLOAT_FIELD, 3.0f), + new DoubleDocValuesField(DOUBLE_FIELD, 3.0d) + ) + ); + iw.addDocument( + asList( + new NumericDocValuesField(INT_FIELD, 2), + new SortedNumericDocValuesField(LONG_FIELD, 2L), + new FloatDocValuesField(FLOAT_FIELD, 3.0f), + new DoubleDocValuesField(DOUBLE_FIELD, 3.0d) + ) + ); + }, + h -> { + MatcherAssert.assertThat(h.getBuckets(), hasSize(3)); + MatcherAssert.assertThat(h.getBuckets().get(0).getKey(), contains(equalTo(1L), equalTo(1L), equalTo(1.0), equalTo(1.0))); + MatcherAssert.assertThat(h.getBuckets().get(0).getDocCount(), equalTo(2L)); + MatcherAssert.assertThat(h.getBuckets().get(1).getKey(), contains(equalTo(2L), equalTo(2L), equalTo(3.0), equalTo(3.0))); + MatcherAssert.assertThat(h.getBuckets().get(1).getDocCount(), equalTo(2L)); + MatcherAssert.assertThat(h.getBuckets().get(2).getKey(), contains(equalTo(2L), equalTo(2L), equalTo(2.0), equalTo(2.0))); + MatcherAssert.assertThat(h.getBuckets().get(2).getDocCount(), equalTo(1L)); + } + ); + } + + public void testMixNumberAndKeywordWithFilter() throws IOException { + testAggregation( + new TermQuery(new Term(KEYWORD_FIELD, "a")), + fieldConfigs(asList(KEYWORD_FIELD, FLOAT_FIELD)), + NONE_DECORATOR, + iw -> { + iw.addDocument( + asList( + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")), + new StringField(KEYWORD_FIELD, "a", Field.Store.NO), + new FloatDocValuesField(FLOAT_FIELD, 2.0f) + ) + ); + iw.addDocument( + asList( + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")), + new StringField(KEYWORD_FIELD, "a", Field.Store.NO), + new FloatDocValuesField(FLOAT_FIELD, 1.0f) + ) + ); + iw.addDocument( + asList( + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")), + new StringField(KEYWORD_FIELD, "b", Field.Store.NO), + new FloatDocValuesField(FLOAT_FIELD, 1.0f) + ) + ); + iw.addDocument( + asList( + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")), + new StringField(KEYWORD_FIELD, "a", Field.Store.NO), + new FloatDocValuesField(FLOAT_FIELD, 2.0f) + ) + ); + }, + h -> { + MatcherAssert.assertThat(h.getBuckets(), hasSize(2)); + MatcherAssert.assertThat(h.getBuckets().get(0).getKey(), contains(equalTo("a"), equalTo(2.0))); + MatcherAssert.assertThat(h.getBuckets().get(0).getDocCount(), equalTo(2L)); + MatcherAssert.assertThat(h.getBuckets().get(1).getKey(), contains(equalTo("a"), equalTo(1.0))); + MatcherAssert.assertThat(h.getBuckets().get(1).getDocCount(), equalTo(1L)); + } + ); + } + + public void testMixNumberAndKeyword() throws IOException { + testAggregation(new MatchAllDocsQuery(), fieldConfigs(asList(KEYWORD_FIELD, INT_FIELD, FLOAT_FIELD)), NONE_DECORATOR, iw -> { + iw.addDocument( + asList( + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")), + new NumericDocValuesField(INT_FIELD, 1), + new FloatDocValuesField(FLOAT_FIELD, 1.0f) + ) + ); + iw.addDocument( + asList( + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")), + new NumericDocValuesField(INT_FIELD, 1), + new FloatDocValuesField(FLOAT_FIELD, 1.0f) + ) + ); + iw.addDocument( + asList( + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("b")), + new NumericDocValuesField(INT_FIELD, 1), + new FloatDocValuesField(FLOAT_FIELD, 2.0f) + ) + ); + iw.addDocument( + asList( + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("c")), + new NumericDocValuesField(INT_FIELD, 2), + new FloatDocValuesField(FLOAT_FIELD, 2.0f) + ) + ); + }, h -> { + MatcherAssert.assertThat(h.getBuckets(), hasSize(3)); + MatcherAssert.assertThat(h.getBuckets().get(0).getKey(), contains(equalTo("a"), equalTo(1L), equalTo(1.0))); + MatcherAssert.assertThat(h.getBuckets().get(0).getDocCount(), equalTo(2L)); + MatcherAssert.assertThat(h.getBuckets().get(1).getKey(), contains(equalTo("b"), equalTo(1L), equalTo(2.0))); + MatcherAssert.assertThat(h.getBuckets().get(1).getDocCount(), equalTo(1L)); + MatcherAssert.assertThat(h.getBuckets().get(2).getKey(), contains(equalTo("c"), equalTo(2L), equalTo(2.0))); + MatcherAssert.assertThat(h.getBuckets().get(2).getDocCount(), equalTo(1L)); + }); + } + + public void testMultiValuesField() throws IOException { + testAggregation(new MatchAllDocsQuery(), fieldConfigs(asList(KEYWORD_FIELD, INT_FIELD)), NONE_DECORATOR, iw -> { + iw.addDocument( + asList( + new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a")), + new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("b")), + new SortedNumericDocValuesField(INT_FIELD, 1) + ) + ); + iw.addDocument( + asList( + new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a")), + new SortedNumericDocValuesField(INT_FIELD, 1), + new SortedNumericDocValuesField(INT_FIELD, 3) + ) + ); + }, h -> { + MatcherAssert.assertThat(h.getBuckets(), hasSize(3)); + MatcherAssert.assertThat(h.getBuckets().get(0).getKey(), contains(equalTo("a"), equalTo(1L))); + MatcherAssert.assertThat(h.getBuckets().get(0).getDocCount(), equalTo(2L)); + MatcherAssert.assertThat(h.getBuckets().get(1).getKey(), contains(equalTo("a"), equalTo(3L))); + MatcherAssert.assertThat(h.getBuckets().get(1).getDocCount(), equalTo(1L)); + MatcherAssert.assertThat(h.getBuckets().get(2).getKey(), contains(equalTo("b"), equalTo(1L))); + }); + + testAggregation(new MatchAllDocsQuery(), fieldConfigs(asList(KEYWORD_FIELD, INT_FIELD)), NONE_DECORATOR, iw -> { + iw.addDocument( + asList( + new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a")), + new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("b")), + new SortedNumericDocValuesField(INT_FIELD, 1), + new SortedNumericDocValuesField(INT_FIELD, 2) + ) + ); + iw.addDocument( + asList( + new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("a")), + new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef("c")), + new SortedNumericDocValuesField(INT_FIELD, 1), + new SortedNumericDocValuesField(INT_FIELD, 3) + ) + ); + }, h -> { + MatcherAssert.assertThat(h.getBuckets(), hasSize(7)); + MatcherAssert.assertThat(h.getBuckets().get(0).getKey(), contains(equalTo("a"), equalTo(1L))); + MatcherAssert.assertThat(h.getBuckets().get(0).getDocCount(), equalTo(2L)); + MatcherAssert.assertThat(h.getBuckets().get(1).getKey(), contains(equalTo("a"), equalTo(2L))); + MatcherAssert.assertThat(h.getBuckets().get(1).getDocCount(), equalTo(1L)); + MatcherAssert.assertThat(h.getBuckets().get(2).getKey(), contains(equalTo("a"), equalTo(3L))); + MatcherAssert.assertThat(h.getBuckets().get(2).getDocCount(), equalTo(1L)); + MatcherAssert.assertThat(h.getBuckets().get(3).getKey(), contains(equalTo("b"), equalTo(1L))); + MatcherAssert.assertThat(h.getBuckets().get(3).getDocCount(), equalTo(1L)); + MatcherAssert.assertThat(h.getBuckets().get(4).getKey(), contains(equalTo("b"), equalTo(2L))); + MatcherAssert.assertThat(h.getBuckets().get(4).getDocCount(), equalTo(1L)); + MatcherAssert.assertThat(h.getBuckets().get(5).getKey(), contains(equalTo("c"), equalTo(1L))); + MatcherAssert.assertThat(h.getBuckets().get(5).getDocCount(), equalTo(1L)); + MatcherAssert.assertThat(h.getBuckets().get(6).getKey(), contains(equalTo("c"), equalTo(3L))); + MatcherAssert.assertThat(h.getBuckets().get(6).getDocCount(), equalTo(1L)); + }); + } + + public void testScripts() throws IOException { + testAggregation( + new MatchAllDocsQuery(), + asList( + new MultiTermsValuesSourceConfig.Builder().setFieldName(KEYWORD_FIELD).build(), + new MultiTermsValuesSourceConfig.Builder().setScript( + new Script(ScriptType.INLINE, MockScriptEngine.NAME, FIELD_SCRIPT_NAME, singletonMap(FIELD_NAME, FIELD_NAME)) + ).setUserValueTypeHint(ValueType.LONG).build() + ), + null, + iw -> { + iw.addDocument( + asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")), new NumericDocValuesField(FIELD_NAME, 1)) + ); + iw.addDocument( + asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("b")), new NumericDocValuesField(FIELD_NAME, 2)) + ); + iw.addDocument( + asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("b")), new NumericDocValuesField(FIELD_NAME, 2)) + ); + iw.addDocument( + asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("c")), new NumericDocValuesField(FIELD_NAME, 3)) + ); + }, + h -> { + MatcherAssert.assertThat(h.getBuckets(), hasSize(3)); + MatcherAssert.assertThat(h.getBuckets().get(0).getKey(), contains(equalTo("b"), equalTo(3L))); + MatcherAssert.assertThat(h.getBuckets().get(0).getDocCount(), equalTo(2L)); + MatcherAssert.assertThat(h.getBuckets().get(1).getKey(), contains(equalTo("a"), equalTo(2L))); + MatcherAssert.assertThat(h.getBuckets().get(1).getDocCount(), equalTo(1L)); + MatcherAssert.assertThat(h.getBuckets().get(2).getKey(), contains(equalTo("c"), equalTo(4L))); + MatcherAssert.assertThat(h.getBuckets().get(2).getDocCount(), equalTo(1L)); + } + ); + } + + public void testScriptsWithoutValueTypeHint() throws IOException { + testAggregation( + new MatchAllDocsQuery(), + asList( + new MultiTermsValuesSourceConfig.Builder().setFieldName(KEYWORD_FIELD).build(), + new MultiTermsValuesSourceConfig.Builder().setScript( + new Script(ScriptType.INLINE, MockScriptEngine.NAME, FIELD_SCRIPT_NAME, singletonMap(FIELD_NAME, FIELD_NAME)) + ).build() + ), + null, + iw -> { + iw.addDocument( + asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")), new NumericDocValuesField(FIELD_NAME, 1)) + ); + iw.addDocument( + asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("b")), new NumericDocValuesField(FIELD_NAME, 2)) + ); + iw.addDocument( + asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("b")), new NumericDocValuesField(FIELD_NAME, 2)) + ); + iw.addDocument( + asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("c")), new NumericDocValuesField(FIELD_NAME, 3)) + ); + }, + h -> { + MatcherAssert.assertThat(h.getBuckets(), hasSize(3)); + MatcherAssert.assertThat(h.getBuckets().get(0).getKey(), contains(equalTo("b"), equalTo("3"))); + MatcherAssert.assertThat(h.getBuckets().get(0).getDocCount(), equalTo(2L)); + MatcherAssert.assertThat(h.getBuckets().get(1).getKey(), contains(equalTo("a"), equalTo("2"))); + MatcherAssert.assertThat(h.getBuckets().get(1).getDocCount(), equalTo(1L)); + MatcherAssert.assertThat(h.getBuckets().get(2).getKey(), contains(equalTo("c"), equalTo("4"))); + MatcherAssert.assertThat(h.getBuckets().get(2).getDocCount(), equalTo(1L)); + } + ); + } + + public void testValueScripts() throws IOException { + testAggregation( + new MatchAllDocsQuery(), + asList( + new MultiTermsValuesSourceConfig.Builder().setFieldName(KEYWORD_FIELD).build(), + new MultiTermsValuesSourceConfig.Builder().setFieldName(FIELD_NAME) + .setScript(new Script(ScriptType.INLINE, MockScriptEngine.NAME, VALUE_SCRIPT_NAME, emptyMap())) + .build() + ), + null, + iw -> { + iw.addDocument( + asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")), new NumericDocValuesField(FIELD_NAME, 1)) + ); + iw.addDocument( + asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("b")), new NumericDocValuesField(FIELD_NAME, 2)) + ); + iw.addDocument( + asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("b")), new NumericDocValuesField(FIELD_NAME, 2)) + ); + iw.addDocument( + asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("c")), new NumericDocValuesField(FIELD_NAME, 3)) + ); + }, + h -> { + MatcherAssert.assertThat(h.getBuckets(), hasSize(3)); + MatcherAssert.assertThat(h.getBuckets().get(0).getKey(), contains(equalTo("b"), equalTo(3.0))); + MatcherAssert.assertThat(h.getBuckets().get(0).getDocCount(), equalTo(2L)); + MatcherAssert.assertThat(h.getBuckets().get(1).getKey(), contains(equalTo("a"), equalTo(2.0))); + MatcherAssert.assertThat(h.getBuckets().get(1).getDocCount(), equalTo(1L)); + MatcherAssert.assertThat(h.getBuckets().get(2).getKey(), contains(equalTo("c"), equalTo(4.0))); + MatcherAssert.assertThat(h.getBuckets().get(2).getDocCount(), equalTo(1L)); + } + ); + } + + public void testOrderByMetrics() throws IOException { + testAggregation(new MatchAllDocsQuery(), fieldConfigs(asList(KEYWORD_FIELD, INT_FIELD)), b -> { + b.order(BucketOrder.aggregation("max", false)); + b.subAggregation(new MaxAggregationBuilder("max").field(FLOAT_FIELD)); + }, iw -> { + iw.addDocument( + asList( + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")), + new NumericDocValuesField(INT_FIELD, 1), + new FloatDocValuesField(FLOAT_FIELD, 1.0f) + ) + ); + iw.addDocument( + asList( + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("b")), + new NumericDocValuesField(INT_FIELD, 2), + new FloatDocValuesField(FLOAT_FIELD, 2.0f) + ) + ); + iw.addDocument( + asList( + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("c")), + new NumericDocValuesField(INT_FIELD, 3), + new FloatDocValuesField(FLOAT_FIELD, 3.0f) + ) + ); + iw.addDocument( + asList( + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")), + new NumericDocValuesField(INT_FIELD, 1), + new FloatDocValuesField(FLOAT_FIELD, 4.0f) + ) + ); + iw.addDocument( + asList( + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("b")), + new NumericDocValuesField(INT_FIELD, 2), + new FloatDocValuesField(FLOAT_FIELD, 3.0f) + ) + ); + iw.addDocument( + asList( + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("c")), + new NumericDocValuesField(INT_FIELD, 3), + new FloatDocValuesField(FLOAT_FIELD, 2.0f) + ) + ); + }, h -> { + MatcherAssert.assertThat(h.getBuckets(), hasSize(3)); + MatcherAssert.assertThat(h.getBuckets().get(0).getKey(), contains(equalTo("a"), equalTo(1L))); + MatcherAssert.assertThat(h.getBuckets().get(0).getDocCount(), equalTo(2L)); + MatcherAssert.assertThat(((InternalMax) (h.getBuckets().get(0).getAggregations().get("max"))).value(), closeTo(4.0f, 0.01)); + MatcherAssert.assertThat(h.getBuckets().get(1).getKey(), contains(equalTo("b"), equalTo(2L))); + MatcherAssert.assertThat(h.getBuckets().get(1).getDocCount(), equalTo(2L)); + MatcherAssert.assertThat(((InternalMax) (h.getBuckets().get(1).getAggregations().get("max"))).value(), closeTo(3.0f, 0.01)); + MatcherAssert.assertThat(h.getBuckets().get(2).getKey(), contains(equalTo("c"), equalTo(3L))); + MatcherAssert.assertThat(h.getBuckets().get(2).getDocCount(), equalTo(2L)); + MatcherAssert.assertThat(((InternalMax) (h.getBuckets().get(2).getAggregations().get("max"))).value(), closeTo(3.0f, 0.01)); + }); + } + + public void testNumberFieldFormat() throws IOException { + testAggregation( + new MatchAllDocsQuery(), + asList(term(KEYWORD_FIELD), new MultiTermsValuesSourceConfig.Builder().setFieldName(DOUBLE_FIELD).setFormat("00.00").build()), + null, + iw -> { + iw.addDocument( + asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")), new DoubleDocValuesField(DOUBLE_FIELD, 1.0d)) + ); + iw.addDocument( + asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("b")), new DoubleDocValuesField(DOUBLE_FIELD, 2.0d)) + ); + iw.addDocument( + asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")), new DoubleDocValuesField(DOUBLE_FIELD, 2.0d)) + ); + iw.addDocument( + asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")), new DoubleDocValuesField(DOUBLE_FIELD, 1.0d)) + ); + }, + h -> { + MatcherAssert.assertThat(h.getBuckets(), hasSize(3)); + MatcherAssert.assertThat(h.getBuckets().get(0).getKeyAsString(), equalTo("a|01.00")); + MatcherAssert.assertThat(h.getBuckets().get(0).getDocCount(), equalTo(2L)); + MatcherAssert.assertThat(h.getBuckets().get(1).getKeyAsString(), equalTo("a|02.00")); + MatcherAssert.assertThat(h.getBuckets().get(1).getDocCount(), equalTo(1L)); + MatcherAssert.assertThat(h.getBuckets().get(2).getKeyAsString(), equalTo("b|02.00")); + MatcherAssert.assertThat(h.getBuckets().get(2).getDocCount(), equalTo(1L)); + } + ); + } + + public void testDates() throws IOException { + testAggregation( + new MatchAllDocsQuery(), + asList(new MultiTermsValuesSourceConfig.Builder().setFieldName(DATE_FIELD).build(), term(KEYWORD_FIELD)), + null, + iw -> { + iw.addDocument( + asList( + new SortedNumericDocValuesField(DATE_FIELD, dateFieldType(DATE_FIELD).parse("2022-03-23")), + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")) + ) + ); + iw.addDocument( + asList( + new SortedNumericDocValuesField(DATE_FIELD, dateFieldType(DATE_FIELD).parse("2022-03-23")), + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("b")) + ) + ); + iw.addDocument( + asList( + new SortedNumericDocValuesField(DATE_FIELD, dateFieldType(DATE_FIELD).parse("2022-03-22")), + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")) + ) + ); + iw.addDocument( + asList( + new SortedNumericDocValuesField(DATE_FIELD, dateFieldType(DATE_FIELD).parse("2022-03-23")), + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")) + ) + ); + iw.addDocument( + asList( + new SortedNumericDocValuesField(DATE_FIELD, dateFieldType(DATE_FIELD).parse("2022-03-21")), + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("c")) + ) + ); + }, + h -> { + MatcherAssert.assertThat(h.getBuckets(), hasSize(4)); + MatcherAssert.assertThat(h.getBuckets().get(0).getKeyAsString(), equalTo("2022-03-23|a")); + MatcherAssert.assertThat(h.getBuckets().get(0).getDocCount(), equalTo(2L)); + MatcherAssert.assertThat(h.getBuckets().get(1).getKeyAsString(), equalTo("2022-03-21|c")); + MatcherAssert.assertThat(h.getBuckets().get(1).getDocCount(), equalTo(1L)); + MatcherAssert.assertThat(h.getBuckets().get(2).getKeyAsString(), equalTo("2022-03-22|a")); + MatcherAssert.assertThat(h.getBuckets().get(2).getDocCount(), equalTo(1L)); + MatcherAssert.assertThat(h.getBuckets().get(3).getKeyAsString(), equalTo("2022-03-23|b")); + MatcherAssert.assertThat(h.getBuckets().get(3).getDocCount(), equalTo(1L)); + } + ); + } + + public void testDatesFieldFormat() throws IOException { + testAggregation( + new MatchAllDocsQuery(), + asList( + new MultiTermsValuesSourceConfig.Builder().setFieldName(DATE_FIELD).setFormat("yyyy/MM/dd").build(), + term(KEYWORD_FIELD) + ), + null, + iw -> { + iw.addDocument( + asList( + new SortedNumericDocValuesField(DATE_FIELD, dateFieldType(DATE_FIELD).parse("2022-03-23")), + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")) + ) + ); + iw.addDocument( + asList( + new SortedNumericDocValuesField(DATE_FIELD, dateFieldType(DATE_FIELD).parse("2022-03-23")), + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("b")) + ) + ); + iw.addDocument( + asList( + new SortedNumericDocValuesField(DATE_FIELD, dateFieldType(DATE_FIELD).parse("2022-03-22")), + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")) + ) + ); + iw.addDocument( + asList( + new SortedNumericDocValuesField(DATE_FIELD, dateFieldType(DATE_FIELD).parse("2022-03-23")), + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")) + ) + ); + iw.addDocument( + asList( + new SortedNumericDocValuesField(DATE_FIELD, dateFieldType(DATE_FIELD).parse("2022-03-21")), + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("c")) + ) + ); + }, + h -> { + MatcherAssert.assertThat(h.getBuckets(), hasSize(4)); + MatcherAssert.assertThat(h.getBuckets().get(0).getKeyAsString(), equalTo("2022/03/23|a")); + MatcherAssert.assertThat(h.getBuckets().get(0).getDocCount(), equalTo(2L)); + MatcherAssert.assertThat(h.getBuckets().get(1).getKeyAsString(), equalTo("2022/03/21|c")); + MatcherAssert.assertThat(h.getBuckets().get(1).getDocCount(), equalTo(1L)); + MatcherAssert.assertThat(h.getBuckets().get(2).getKeyAsString(), equalTo("2022/03/22|a")); + MatcherAssert.assertThat(h.getBuckets().get(2).getDocCount(), equalTo(1L)); + MatcherAssert.assertThat(h.getBuckets().get(3).getKeyAsString(), equalTo("2022/03/23|b")); + MatcherAssert.assertThat(h.getBuckets().get(3).getDocCount(), equalTo(1L)); + } + ); + } + + public void testIpAndKeyword() throws IOException { + testAggregation(new MatchAllDocsQuery(), fieldConfigs(asList(KEYWORD_FIELD, IP_FIELD)), NONE_DECORATOR, iw -> { + iw.addDocument( + asList( + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")), + new SortedDocValuesField(IP_FIELD, new BytesRef(InetAddressPoint.encode(InetAddresses.forString("192.168.0.0")))) + ) + ); + iw.addDocument( + asList( + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("b")), + new SortedDocValuesField(IP_FIELD, new BytesRef(InetAddressPoint.encode(InetAddresses.forString("192.168.0.1")))) + ) + ); + iw.addDocument( + asList( + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("c")), + new SortedDocValuesField(IP_FIELD, new BytesRef(InetAddressPoint.encode(InetAddresses.forString("192.168.0.2")))) + ) + ); + iw.addDocument( + asList( + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")), + new SortedDocValuesField(IP_FIELD, new BytesRef(InetAddressPoint.encode(InetAddresses.forString("192.168.0.0")))) + ) + ); + }, h -> { + MatcherAssert.assertThat(h.getBuckets(), hasSize(3)); + MatcherAssert.assertThat(h.getBuckets().get(0).getKey(), contains(equalTo("a"), equalTo("192.168.0.0"))); + MatcherAssert.assertThat(h.getBuckets().get(0).getKeyAsString(), equalTo("a|192.168.0.0")); + MatcherAssert.assertThat(h.getBuckets().get(0).getDocCount(), equalTo(2L)); + MatcherAssert.assertThat(h.getBuckets().get(1).getKey(), contains(equalTo("b"), equalTo("192.168.0.1"))); + MatcherAssert.assertThat(h.getBuckets().get(1).getKeyAsString(), equalTo("b|192.168.0.1")); + MatcherAssert.assertThat(h.getBuckets().get(1).getDocCount(), equalTo(1L)); + MatcherAssert.assertThat(h.getBuckets().get(2).getKey(), contains(equalTo("c"), equalTo("192.168.0.2"))); + MatcherAssert.assertThat(h.getBuckets().get(2).getKeyAsString(), equalTo("c|192.168.0.2")); + MatcherAssert.assertThat(h.getBuckets().get(2).getDocCount(), equalTo(1L)); + }); + } + + public void testEmpty() throws IOException { + testAggregation(new MatchAllDocsQuery(), fieldConfigs(asList(KEYWORD_FIELD, INT_FIELD)), NONE_DECORATOR, iw -> {}, h -> { + MatcherAssert.assertThat(h.getName(), equalTo(AGG_NAME)); + MatcherAssert.assertThat(h.getBuckets(), hasSize(0)); + }); + } + + public void testNull() throws IOException { + testAggregation(new MatchAllDocsQuery(), fieldConfigs(asList(KEYWORD_FIELD, INT_FIELD, FLOAT_FIELD)), NONE_DECORATOR, iw -> { + iw.addDocument( + asList( + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")), + new NumericDocValuesField(INT_FIELD, 1), + new FloatDocValuesField(FLOAT_FIELD, 1.0f) + ) + ); + iw.addDocument( + asList( + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")), + new NumericDocValuesField(INT_FIELD, 1), + new FloatDocValuesField(FLOAT_FIELD, 1.0f) + ) + ); + iw.addDocument(asList(new NumericDocValuesField(INT_FIELD, 1), new FloatDocValuesField(FLOAT_FIELD, 2.0f))); + iw.addDocument(asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("c")), new FloatDocValuesField(FLOAT_FIELD, 2.0f))); + iw.addDocument(asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("d")), new NumericDocValuesField(INT_FIELD, 3))); + + }, h -> { + MatcherAssert.assertThat(h.getBuckets(), hasSize(1)); + MatcherAssert.assertThat(h.getBuckets().get(0).getKey(), contains(equalTo("a"), equalTo(1L), equalTo(1.0))); + MatcherAssert.assertThat(h.getBuckets().get(0).getDocCount(), equalTo(2L)); + }); + + } + + public void testMissing() throws IOException { + testAggregation( + new MatchAllDocsQuery(), + asList( + new MultiTermsValuesSourceConfig.Builder().setFieldName(KEYWORD_FIELD).setMissing("a").build(), + new MultiTermsValuesSourceConfig.Builder().setFieldName(INT_FIELD).setMissing(1).build(), + new MultiTermsValuesSourceConfig.Builder().setFieldName(FLOAT_FIELD).setMissing(2.0f).build() + ), + NONE_DECORATOR, + iw -> { + iw.addDocument( + asList( + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")), + new NumericDocValuesField(INT_FIELD, 1), + new FloatDocValuesField(FLOAT_FIELD, 2.0f) + ) + ); + iw.addDocument( + asList( + // missing KEYWORD_FIELD + new NumericDocValuesField(INT_FIELD, 1), + new FloatDocValuesField(FLOAT_FIELD, 1.0f) + ) + ); + iw.addDocument( + asList( + new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("b")), + // missing INT_FIELD + new FloatDocValuesField(FLOAT_FIELD, 2.0f) + ) + ); + iw.addDocument(asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("c")), new NumericDocValuesField(INT_FIELD, 2) + // missing FLOAT_FIELD + )); + iw.addDocument(singletonList(new SortedDocValuesField(UNRELATED_KEYWORD_FIELD, new BytesRef("unrelated")))); + }, + h -> { + MatcherAssert.assertThat(h.getBuckets(), hasSize(4)); + MatcherAssert.assertThat(h.getBuckets().get(0).getKey(), contains(equalTo("a"), equalTo(1L), equalTo(2.0))); + MatcherAssert.assertThat(h.getBuckets().get(0).getDocCount(), equalTo(2L)); + MatcherAssert.assertThat(h.getBuckets().get(1).getKey(), contains(equalTo("a"), equalTo(1L), equalTo(1.0))); + MatcherAssert.assertThat(h.getBuckets().get(1).getDocCount(), equalTo(1L)); + MatcherAssert.assertThat(h.getBuckets().get(2).getKey(), contains(equalTo("b"), equalTo(1L), equalTo(2.0))); + MatcherAssert.assertThat(h.getBuckets().get(2).getDocCount(), equalTo(1L)); + MatcherAssert.assertThat(h.getBuckets().get(3).getKey(), contains(equalTo("c"), equalTo(2L), equalTo(2.0))); + MatcherAssert.assertThat(h.getBuckets().get(3).getDocCount(), equalTo(1L)); + } + ); + } + + public void testMixKeywordAndBoolean() throws IOException { + testAggregation(new MatchAllDocsQuery(), fieldConfigs(asList(KEYWORD_FIELD, BOOL_FIELD)), NONE_DECORATOR, iw -> { + iw.addDocument(asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")), new NumericDocValuesField(BOOL_FIELD, 1))); + iw.addDocument(asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")), new NumericDocValuesField(BOOL_FIELD, 0))); + iw.addDocument(asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("b")), new NumericDocValuesField(BOOL_FIELD, 0))); + iw.addDocument(asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("b")), new NumericDocValuesField(BOOL_FIELD, 1))); + }, h -> { + MatcherAssert.assertThat(h.getBuckets(), hasSize(4)); + MatcherAssert.assertThat(h.getBuckets().get(0).getKey(), contains(equalTo("a"), equalTo(false))); + MatcherAssert.assertThat(h.getBuckets().get(0).getKeyAsString(), equalTo("a|false")); + MatcherAssert.assertThat(h.getBuckets().get(0).getDocCount(), equalTo(1L)); + MatcherAssert.assertThat(h.getBuckets().get(1).getKey(), contains(equalTo("a"), equalTo(true))); + MatcherAssert.assertThat(h.getBuckets().get(1).getKeyAsString(), equalTo("a|true")); + MatcherAssert.assertThat(h.getBuckets().get(1).getDocCount(), equalTo(1L)); + MatcherAssert.assertThat(h.getBuckets().get(2).getKey(), contains(equalTo("b"), equalTo(false))); + MatcherAssert.assertThat(h.getBuckets().get(2).getKeyAsString(), equalTo("b|false")); + MatcherAssert.assertThat(h.getBuckets().get(2).getDocCount(), equalTo(1L)); + MatcherAssert.assertThat(h.getBuckets().get(3).getKey(), contains(equalTo("b"), equalTo(true))); + MatcherAssert.assertThat(h.getBuckets().get(3).getKeyAsString(), equalTo("b|true")); + MatcherAssert.assertThat(h.getBuckets().get(3).getDocCount(), equalTo(1L)); + }); + } + + public void testGeoPointField() { + assertThrows( + IllegalArgumentException.class, + () -> testAggregation( + new MatchAllDocsQuery(), + asList(term(KEYWORD_FIELD), term(GEO_POINT_FIELD)), + NONE_DECORATOR, + iw -> {}, + f -> fail("should throw exception") + ) + ); + } + + public void testMinDocCount() throws IOException { + testAggregation(new MatchAllDocsQuery(), fieldConfigs(asList(KEYWORD_FIELD, INT_FIELD)), b -> b.minDocCount(2), iw -> { + iw.addDocument(asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")), new NumericDocValuesField(INT_FIELD, 1))); + iw.addDocument(asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")), new NumericDocValuesField(INT_FIELD, 1))); + iw.addDocument(asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")), new NumericDocValuesField(INT_FIELD, 2))); + iw.addDocument(asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("b")), new NumericDocValuesField(INT_FIELD, 1))); + iw.addDocument(asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("c")), new NumericDocValuesField(INT_FIELD, 2))); + }, h -> { + MatcherAssert.assertThat(h.getBuckets(), hasSize(1)); + MatcherAssert.assertThat(h.getBuckets().get(0).getKey(), contains(equalTo("a"), equalTo(1L))); + MatcherAssert.assertThat(h.getBuckets().get(0).getDocCount(), equalTo(2L)); + }); + } + + public void testIncludeExclude() throws IOException { + testAggregation( + new MatchAllDocsQuery(), + asList( + new MultiTermsValuesSourceConfig.Builder().setFieldName(KEYWORD_FIELD) + .setIncludeExclude(new IncludeExclude("a", null)) + .build(), + term(INT_FIELD) + ), + NONE_DECORATOR, + iw -> { + iw.addDocument(asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")), new NumericDocValuesField(INT_FIELD, 1))); + iw.addDocument(asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("a")), new NumericDocValuesField(INT_FIELD, 1))); + iw.addDocument(asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("b")), new NumericDocValuesField(INT_FIELD, 1))); + iw.addDocument(asList(new SortedDocValuesField(KEYWORD_FIELD, new BytesRef("c")), new NumericDocValuesField(INT_FIELD, 2))); + }, + h -> { + MatcherAssert.assertThat(h.getBuckets(), hasSize(1)); + MatcherAssert.assertThat(h.getBuckets().get(0).getKey(), contains(equalTo("a"), equalTo(1L))); + MatcherAssert.assertThat(h.getBuckets().get(0).getDocCount(), equalTo(2L)); + } + ); + } + + private void testAggregation( + Query query, + List terms, + Consumer decorator, + CheckedConsumer indexBuilder, + Consumer verify + ) throws IOException { + MultiTermsAggregationBuilder builder = createTestAggregatorBuilder(terms); + if (decorator != NONE_DECORATOR) { + decorator.accept(builder); + } + testCase(builder, query, indexBuilder, verify, mappedFieldTypeMap.values().toArray(new MappedFieldType[] {})); + } + + private MultiTermsValuesSourceConfig term(String field) { + return new MultiTermsValuesSourceConfig.Builder().setFieldName(field).build(); + } + + private MultiTermsAggregationBuilder createTestAggregatorBuilder(List termsConfig) { + MultiTermsAggregationBuilder factory = new MultiTermsAggregationBuilder(AGG_NAME); + factory.terms(termsConfig); + + if (randomBoolean()) { + factory.size(randomIntBetween(10, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + factory.shardSize(randomIntBetween(10, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + factory.showTermDocCountError(randomBoolean()); + } + return factory; + } + + private List fieldConfigs(List terms) { + List termConfigs = new ArrayList<>(); + for (String term : terms) { + termConfigs.add(term(term)); + } + return termConfigs; + } + + private static DateFieldMapper.DateFieldType dateFieldType(String name) { + return new DateFieldMapper.DateFieldType( + name, + true, + false, + true, + DateFormatter.forPattern("date"), + DateFieldMapper.Resolution.MILLISECONDS, + null, + Collections.emptyMap() + ); + } +} diff --git a/server/src/test/java/org/opensearch/search/aggregations/support/MultiTermsValuesSourceConfigTests.java b/server/src/test/java/org/opensearch/search/aggregations/support/MultiTermsValuesSourceConfigTests.java new file mode 100644 index 0000000000000..a142faa2048ea --- /dev/null +++ b/server/src/test/java/org/opensearch/search/aggregations/support/MultiTermsValuesSourceConfigTests.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.support; + +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.NamedXContentRegistry; +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.script.Script; +import org.opensearch.search.SearchModule; +import org.opensearch.test.AbstractSerializingTestCase; + +import java.io.IOException; +import java.time.ZoneId; +import java.util.Collections; + +import static org.hamcrest.Matchers.equalTo; + +public class MultiTermsValuesSourceConfigTests extends AbstractSerializingTestCase { + + @Override + protected MultiTermsValuesSourceConfig doParseInstance(XContentParser parser) throws IOException { + return MultiTermsValuesSourceConfig.PARSER.apply(true, true, true, true).apply(parser, null).build(); + } + + @Override + protected MultiTermsValuesSourceConfig createTestInstance() { + String field = randomAlphaOfLength(10); + Object missing = randomBoolean() ? randomAlphaOfLength(10) : null; + ZoneId timeZone = randomBoolean() ? randomZone() : null; + Script script = randomBoolean() ? new Script(randomAlphaOfLength(10)) : null; + return new MultiTermsValuesSourceConfig.Builder().setFieldName(field) + .setMissing(missing) + .setScript(script) + .setTimeZone(timeZone) + .build(); + } + + @Override + protected Writeable.Reader instanceReader() { + return MultiTermsValuesSourceConfig::new; + } + + public void testMissingFieldScript() { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new MultiTermsValuesSourceConfig.Builder().build()); + assertThat(e.getMessage(), equalTo("[field] and [script] cannot both be null. Please specify one or the other.")); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedWriteables()); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedXContents()); + } +} diff --git a/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java b/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java index 6be7abffb9ad6..f138de152a488 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java @@ -101,9 +101,11 @@ import org.opensearch.search.aggregations.bucket.terms.DoubleTerms; import org.opensearch.search.aggregations.bucket.terms.LongRareTerms; import org.opensearch.search.aggregations.bucket.terms.LongTerms; +import org.opensearch.search.aggregations.bucket.terms.MultiTermsAggregationBuilder; import org.opensearch.search.aggregations.bucket.terms.ParsedDoubleTerms; import org.opensearch.search.aggregations.bucket.terms.ParsedLongRareTerms; import org.opensearch.search.aggregations.bucket.terms.ParsedLongTerms; +import org.opensearch.search.aggregations.bucket.terms.ParsedMultiTerms; import org.opensearch.search.aggregations.bucket.terms.ParsedSignificantLongTerms; import org.opensearch.search.aggregations.bucket.terms.ParsedSignificantStringTerms; import org.opensearch.search.aggregations.bucket.terms.ParsedStringRareTerms; @@ -289,6 +291,7 @@ public ReduceContext forFinalReduction() { map.put(IpRangeAggregationBuilder.NAME, (p, c) -> ParsedBinaryRange.fromXContent(p, (String) c)); map.put(TopHitsAggregationBuilder.NAME, (p, c) -> ParsedTopHits.fromXContent(p, (String) c)); map.put(CompositeAggregationBuilder.NAME, (p, c) -> ParsedComposite.fromXContent(p, (String) c)); + map.put(MultiTermsAggregationBuilder.NAME, (p, c) -> ParsedMultiTerms.fromXContent(p, (String) c)); namedXContents = map.entrySet() .stream() From cf78065481ded1d0a8a7777779dced20399848cb Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Thu, 21 Apr 2022 16:16:57 -0500 Subject: [PATCH 111/653] [Remove] MainResponse version override cluster setting (#3031) OpenSearch 2.0.0 no longer needs HLRC compatibility with legacy clients. This commit removes all logic to spoof the version as a legacy cluster. Signed-off-by: Nicholas Walter Knize --- .../org/opensearch/client/PingAndInfoIT.java | 22 ---------- .../opensearch/action/main/MainResponse.java | 37 ++-------------- .../action/main/TransportMainAction.java | 39 +---------------- .../common/settings/ClusterSettings.java | 2 - .../action/main/MainActionTests.java | 43 ------------------- .../action/main/MainResponseTests.java | 17 -------- 6 files changed, 5 insertions(+), 155 deletions(-) diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/PingAndInfoIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/PingAndInfoIT.java index 72201084570bb..09ef90cef144d 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/PingAndInfoIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/PingAndInfoIT.java @@ -33,7 +33,6 @@ package org.opensearch.client; import org.apache.http.client.methods.HttpGet; -import org.opensearch.action.main.TransportMainAction; import org.opensearch.client.core.MainResponse; import java.io.IOException; @@ -63,25 +62,4 @@ public void testInfo() throws IOException { assertTrue(versionMap.get("number").toString().startsWith(info.getVersion().getNumber())); assertEquals(versionMap.get("lucene_version"), info.getVersion().getLuceneVersion()); } - - public void testInfo_overrideResponseVersion() throws IOException { - Request overrideResponseVersionRequest = new Request("PUT", "/_cluster/settings"); - overrideResponseVersionRequest.setOptions(expectWarnings(TransportMainAction.OVERRIDE_MAIN_RESPONSE_VERSION_DEPRECATION_MESSAGE)); - overrideResponseVersionRequest.setJsonEntity("{\"persistent\":{\"compatibility\": {\"override_main_response_version\":true}}}"); - client().performRequest(overrideResponseVersionRequest); - - MainResponse info = highLevelClient().info(RequestOptions.DEFAULT); - assertEquals("7.10.2", info.getVersion().getNumber()); - - // Set back to default version. - Request resetResponseVersionRequest = new Request("PUT", "/_cluster/settings"); - resetResponseVersionRequest.setJsonEntity("{\"persistent\":{\"compatibility\": {\"override_main_response_version\":null}}}"); - client().performRequest(resetResponseVersionRequest); - - Map infoAsMap = entityAsMap(adminClient().performRequest(new Request(HttpGet.METHOD_NAME, "/"))); - @SuppressWarnings("unchecked") - Map versionMap = (Map) infoAsMap.get("version"); - info = highLevelClient().info(RequestOptions.DEFAULT); - assertTrue(versionMap.get("number").toString().startsWith(info.getVersion().getNumber())); - } } diff --git a/server/src/main/java/org/opensearch/action/main/MainResponse.java b/server/src/main/java/org/opensearch/action/main/MainResponse.java index bd4be885fa210..1f460e5dfb019 100644 --- a/server/src/main/java/org/opensearch/action/main/MainResponse.java +++ b/server/src/main/java/org/opensearch/action/main/MainResponse.java @@ -55,7 +55,6 @@ public class MainResponse extends ActionResponse implements ToXContentObject { private ClusterName clusterName; private String clusterUuid; private Build build; - private String versionNumber; public static final String TAGLINE = "The OpenSearch Project: https://opensearch.org/"; MainResponse() {} @@ -70,7 +69,6 @@ public class MainResponse extends ActionResponse implements ToXContentObject { if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { in.readBoolean(); } - versionNumber = build.getQualifiedVersion(); } public MainResponse(String nodeName, Version version, ClusterName clusterName, String clusterUuid, Build build) { @@ -79,16 +77,6 @@ public MainResponse(String nodeName, Version version, ClusterName clusterName, S this.clusterName = clusterName; this.clusterUuid = clusterUuid; this.build = build; - this.versionNumber = build.getQualifiedVersion(); - } - - public MainResponse(String nodeName, Version version, ClusterName clusterName, String clusterUuid, Build build, String versionNumber) { - this.nodeName = nodeName; - this.version = version; - this.clusterName = clusterName; - this.clusterUuid = clusterUuid; - this.build = build; - this.versionNumber = versionNumber; } public String getNodeName() { @@ -111,18 +99,10 @@ public Build getBuild() { return build; } - public String getVersionNumber() { - return versionNumber; - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(nodeName); - if (out.getVersion().before(Version.V_1_0_0)) { - Version.writeVersion(LegacyESVersion.V_7_10_2, out); - } else { - Version.writeVersion(version, out); - } + Version.writeVersion(version, out); clusterName.writeTo(out); out.writeString(clusterUuid); Build.writeBuild(build, out); @@ -137,11 +117,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("name", nodeName); builder.field("cluster_name", clusterName.value()); builder.field("cluster_uuid", clusterUuid); - builder.startObject("version"); - if (isCompatibilityModeDisabled()) { - builder.field("distribution", build.getDistribution()); - } - builder.field("number", versionNumber) + builder.startObject("version") + .field("distribution", build.getDistribution()) + .field("number", build.getQualifiedVersion()) .field("build_type", build.type().displayName()) .field("build_hash", build.hash()) .field("build_date", build.date()) @@ -155,12 +133,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - private boolean isCompatibilityModeDisabled() { - // if we are not in compatibility mode (spoofing versionNumber), then - // build.getQualifiedVersion is always used. - return build.getQualifiedVersion().equals(versionNumber); - } - private static final ObjectParser PARSER = new ObjectParser<>( MainResponse.class.getName(), true, @@ -189,7 +161,6 @@ private boolean isCompatibilityModeDisabled() { response.version = Version.fromString( ((String) value.get("number")).replace("-SNAPSHOT", "").replaceFirst("-(alpha\\d+|beta\\d+|rc\\d+)", "") ); - response.versionNumber = response.version.toString(); }, (parser, context) -> parser.map(), new ParseField("version")); } diff --git a/server/src/main/java/org/opensearch/action/main/TransportMainAction.java b/server/src/main/java/org/opensearch/action/main/TransportMainAction.java index ef6ebb27c4505..5170b23977b1e 100644 --- a/server/src/main/java/org/opensearch/action/main/TransportMainAction.java +++ b/server/src/main/java/org/opensearch/action/main/TransportMainAction.java @@ -33,7 +33,6 @@ package org.opensearch.action.main; import org.opensearch.Build; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; @@ -41,8 +40,6 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.common.logging.DeprecationLogger; -import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.node.Node; import org.opensearch.tasks.Task; @@ -50,23 +47,8 @@ public class TransportMainAction extends HandledTransportAction { - private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(TransportMainAction.class); - - public static final String OVERRIDE_MAIN_RESPONSE_VERSION_KEY = "compatibility.override_main_response_version"; - - public static final Setting OVERRIDE_MAIN_RESPONSE_VERSION = Setting.boolSetting( - OVERRIDE_MAIN_RESPONSE_VERSION_KEY, - false, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ); - - public static final String OVERRIDE_MAIN_RESPONSE_VERSION_DEPRECATION_MESSAGE = "overriding main response version" - + " number will be removed in a future version"; - private final String nodeName; private final ClusterService clusterService; - private volatile String responseVersion; @Inject public TransportMainAction( @@ -78,32 +60,13 @@ public TransportMainAction( super(MainAction.NAME, transportService, actionFilters, MainRequest::new); this.nodeName = Node.NODE_NAME_SETTING.get(settings); this.clusterService = clusterService; - setResponseVersion(OVERRIDE_MAIN_RESPONSE_VERSION.get(settings)); - - clusterService.getClusterSettings().addSettingsUpdateConsumer(OVERRIDE_MAIN_RESPONSE_VERSION, this::setResponseVersion); - } - - private void setResponseVersion(boolean isResponseVersionOverrideEnabled) { - if (isResponseVersionOverrideEnabled) { - DEPRECATION_LOGGER.deprecate(OVERRIDE_MAIN_RESPONSE_VERSION.getKey(), OVERRIDE_MAIN_RESPONSE_VERSION_DEPRECATION_MESSAGE); - this.responseVersion = LegacyESVersion.V_7_10_2.toString(); - } else { - this.responseVersion = Build.CURRENT.getQualifiedVersion(); - } } @Override protected void doExecute(Task task, MainRequest request, ActionListener listener) { ClusterState clusterState = clusterService.state(); listener.onResponse( - new MainResponse( - nodeName, - Version.CURRENT, - clusterState.getClusterName(), - clusterState.metadata().clusterUUID(), - Build.CURRENT, - responseVersion - ) + new MainResponse(nodeName, Version.CURRENT, clusterState.getClusterName(), clusterState.metadata().clusterUUID(), Build.CURRENT) ); } } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 4cacc3bcf37eb..3cd9b62fc474a 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -32,7 +32,6 @@ package org.opensearch.common.settings; import org.apache.logging.log4j.LogManager; -import org.opensearch.action.main.TransportMainAction; import org.opensearch.cluster.routing.allocation.decider.NodeLoadAwareAllocationDecider; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; @@ -553,7 +552,6 @@ public void apply(Settings value, Settings current, Settings previous) { FsHealthService.REFRESH_INTERVAL_SETTING, FsHealthService.SLOW_PATH_LOGGING_THRESHOLD_SETTING, FsHealthService.HEALTHY_TIMEOUT_SETTING, - TransportMainAction.OVERRIDE_MAIN_RESPONSE_VERSION, NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_PROVISIONED_CAPACITY_SETTING, NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_SKEW_FACTOR_SETTING, NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_ALLOW_UNASSIGNED_PRIMARIES_SETTING, diff --git a/server/src/test/java/org/opensearch/action/main/MainActionTests.java b/server/src/test/java/org/opensearch/action/main/MainActionTests.java index 3cbb6b3eb29bd..479e36c2e13ce 100644 --- a/server/src/test/java/org/opensearch/action/main/MainActionTests.java +++ b/server/src/test/java/org/opensearch/action/main/MainActionTests.java @@ -32,7 +32,6 @@ package org.opensearch.action.main; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.cluster.ClusterName; @@ -56,7 +55,6 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import static org.opensearch.action.main.TransportMainAction.OVERRIDE_MAIN_RESPONSE_VERSION_KEY; public class MainActionTests extends OpenSearchTestCase { @@ -130,45 +128,4 @@ public void onFailure(Exception e) { assertNotNull(responseRef.get()); verify(clusterService, times(1)).state(); } - - public void testMainResponseVersionOverrideEnabledByConfigSetting() { - final ClusterName clusterName = new ClusterName("opensearch"); - ClusterState state = ClusterState.builder(clusterName).blocks(mock(ClusterBlocks.class)).build(); - - final ClusterService clusterService = mock(ClusterService.class); - when(clusterService.state()).thenReturn(state); - when(clusterService.getClusterSettings()).thenReturn( - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) - ); - - TransportService transportService = new TransportService( - Settings.EMPTY, - mock(Transport.class), - null, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, - null, - Collections.emptySet() - ); - - final Settings settings = Settings.builder().put("node.name", "my-node").put(OVERRIDE_MAIN_RESPONSE_VERSION_KEY, true).build(); - - TransportMainAction action = new TransportMainAction(settings, transportService, mock(ActionFilters.class), clusterService); - AtomicReference responseRef = new AtomicReference<>(); - action.doExecute(mock(Task.class), new MainRequest(), new ActionListener() { - @Override - public void onResponse(MainResponse mainResponse) { - responseRef.set(mainResponse); - } - - @Override - public void onFailure(Exception e) { - logger.error("unexpected error", e); - } - }); - - final MainResponse mainResponse = responseRef.get(); - assertEquals(LegacyESVersion.V_7_10_2.toString(), mainResponse.getVersionNumber()); - assertWarnings(TransportMainAction.OVERRIDE_MAIN_RESPONSE_VERSION_DEPRECATION_MESSAGE); - } } diff --git a/server/src/test/java/org/opensearch/action/main/MainResponseTests.java b/server/src/test/java/org/opensearch/action/main/MainResponseTests.java index 6e2dbe4399410..b08f08a6d16bf 100644 --- a/server/src/test/java/org/opensearch/action/main/MainResponseTests.java +++ b/server/src/test/java/org/opensearch/action/main/MainResponseTests.java @@ -33,7 +33,6 @@ package org.opensearch.action.main; import org.opensearch.Build; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.ClusterName; import org.opensearch.common.Strings; @@ -138,22 +137,6 @@ public void testToXContent() throws IOException { ); } - public void toXContent_overrideMainResponseVersion() throws IOException { - String responseVersion = LegacyESVersion.V_7_10_2.toString(); - MainResponse response = new MainResponse( - "nodeName", - Version.CURRENT, - new ClusterName("clusterName"), - randomAlphaOfLengthBetween(10, 20), - Build.CURRENT, - responseVersion - ); - XContentBuilder builder = XContentFactory.jsonBuilder(); - response.toXContent(builder, ToXContent.EMPTY_PARAMS); - assertTrue(Strings.toString(builder).contains("\"number\":\"" + responseVersion + "\",")); - assertFalse(Strings.toString(builder).contains("\"distribution\":\"" + Build.CURRENT.getDistribution() + "\",")); - } - @Override protected MainResponse mutateInstance(MainResponse mutateInstance) { String clusterUuid = mutateInstance.getClusterUuid(); From 0801a9c18a53aabf14b9925e3e661589ea128c31 Mon Sep 17 00:00:00 2001 From: Poojita Raj Date: Thu, 21 Apr 2022 14:19:57 -0700 Subject: [PATCH 112/653] Add release notes for 1.3.1 (#3029) Signed-off-by: Poojita Raj --- .../opensearch.release-notes-1.3.1.md | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 release-notes/opensearch.release-notes-1.3.1.md diff --git a/release-notes/opensearch.release-notes-1.3.1.md b/release-notes/opensearch.release-notes-1.3.1.md new file mode 100644 index 0000000000000..04e2933303fc2 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.1.md @@ -0,0 +1,21 @@ +## Version 1.3.1 Release Notes + +* __Exclude man page symlink in distribution (#2602)__ + + [Andrew Ross](mailto:andrross@amazon.com) - Fri, 25 Mar 2022 18:36:36 -0400 + + This is a short-term solution to unblock the build process for the 1.3 + + release. A tool used in that process (cpio) is failing on a symlink in + the JDK + man pages, so this is a hack to just remove that symlink. See + issue #2517 for + more details. + Signed-off-by: Andrew Ross <andrross@amazon.com> + +* __Bump the version to 1.3.1 (#2509)__ + + [Zelin Hao](mailto:87548827+zelinh@users.noreply.github.com) - Mon, 21 Mar 2022 10:30:00 -0400 + + + Signed-off-by: Zelin Hao <zelinhao@amazon.com> From 0cbd47c799b602c7bb8a68a1dea0d6677c564733 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 21 Apr 2022 17:21:51 -0400 Subject: [PATCH 113/653] Remove JavaVersion in favour of standard Runtime.Version (java-version-checker) (#3027) Signed-off-by: Andriy Redko --- .../java_version_checker/JavaVersion.java | 83 ------------------- .../JavaVersionChecker.java | 17 ++-- .../tools/launchers/JvmErgonomics.java | 14 ---- .../tools/launchers/JvmOptionsParser.java | 4 +- .../tools/launchers/SystemJvmOptions.java | 12 +-- .../tools/launchers/JvmErgonomicsTests.java | 6 -- 6 files changed, 9 insertions(+), 127 deletions(-) delete mode 100644 distribution/tools/java-version-checker/src/main/java/org/opensearch/tools/java_version_checker/JavaVersion.java diff --git a/distribution/tools/java-version-checker/src/main/java/org/opensearch/tools/java_version_checker/JavaVersion.java b/distribution/tools/java-version-checker/src/main/java/org/opensearch/tools/java_version_checker/JavaVersion.java deleted file mode 100644 index 7873f29fdff69..0000000000000 --- a/distribution/tools/java-version-checker/src/main/java/org/opensearch/tools/java_version_checker/JavaVersion.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.tools.java_version_checker; - -import java.util.ArrayList; -import java.util.List; -import java.util.Objects; - -public class JavaVersion { - - public static final List CURRENT = parse(System.getProperty("java.specification.version")); - public static final List JAVA_8 = parse("1.8"); - public static final List JAVA_11 = parse("11"); - - static List parse(final String value) { - if (!value.matches("^0*[0-9]+(\\.[0-9]+)*$")) { - throw new IllegalArgumentException(value); - } - - final List version = new ArrayList(); - final String[] components = value.split("\\."); - for (final String component : components) { - version.add(Integer.valueOf(component)); - } - return version; - } - - public static int majorVersion(final List javaVersion) { - Objects.requireNonNull(javaVersion); - if (javaVersion.get(0) > 1) { - return javaVersion.get(0); - } else { - return javaVersion.get(1); - } - } - - static int compare(final List left, final List right) { - // lexicographically compare two lists, treating missing entries as zeros - final int len = Math.max(left.size(), right.size()); - for (int i = 0; i < len; i++) { - final int l = (i < left.size()) ? left.get(i) : 0; - final int r = (i < right.size()) ? right.get(i) : 0; - if (l < r) { - return -1; - } - if (r < l) { - return 1; - } - } - return 0; - } - -} diff --git a/distribution/tools/java-version-checker/src/main/java/org/opensearch/tools/java_version_checker/JavaVersionChecker.java b/distribution/tools/java-version-checker/src/main/java/org/opensearch/tools/java_version_checker/JavaVersionChecker.java index e8ff5f3e6f3f2..eb5c7cf1592e7 100644 --- a/distribution/tools/java-version-checker/src/main/java/org/opensearch/tools/java_version_checker/JavaVersionChecker.java +++ b/distribution/tools/java-version-checker/src/main/java/org/opensearch/tools/java_version_checker/JavaVersionChecker.java @@ -32,18 +32,19 @@ package org.opensearch.tools.java_version_checker; +import java.lang.Runtime.Version; import java.util.Arrays; import java.util.Locale; /** - * Simple program that checks if the runtime Java version is at least 1.8. + * Simple program that checks if the runtime Java version is at least 11 */ final class JavaVersionChecker { private JavaVersionChecker() {} /** - * The main entry point. The exit code is 0 if the Java version is at least 1.8, otherwise the exit code is 1. + * The main entry point. The exit code is 0 if the Java version is at least 11, otherwise the exit code is 1. * * @param args the args to the program which are rejected if not empty */ @@ -52,23 +53,15 @@ public static void main(final String[] args) { if (args.length != 0) { throw new IllegalArgumentException("expected zero arguments but was " + Arrays.toString(args)); } - if (JavaVersion.compare(JavaVersion.CURRENT, JavaVersion.JAVA_8) < 0) { + if (Runtime.version().compareTo(Version.parse("11")) < 0) { final String message = String.format( Locale.ROOT, - "the minimum required Java version is 8; your Java version from [%s] does not meet this requirement", + "OpenSearch requires Java 11; your Java version from [%s] does not meet this requirement", System.getProperty("java.home") ); errPrintln(message); exit(1); } - if (JavaVersion.compare(JavaVersion.CURRENT, JavaVersion.JAVA_11) < 0) { - final String message = String.format( - Locale.ROOT, - "future versions of OpenSearch will require Java 11; your Java version from [%s] does not meet this requirement", - System.getProperty("java.home") - ); - errPrintln(message); - } exit(0); } diff --git a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/JvmErgonomics.java b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/JvmErgonomics.java index 053d892d0ec2f..cd4bea689f776 100644 --- a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/JvmErgonomics.java +++ b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/JvmErgonomics.java @@ -32,8 +32,6 @@ package org.opensearch.tools.launchers; -import org.opensearch.tools.java_version_checker.JavaVersion; - import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; @@ -73,19 +71,7 @@ static List choose(final List userDefinedJvmOptions) throws Inte final long heapSize = extractHeapSize(finalJvmOptions); final long maxDirectMemorySize = extractMaxDirectMemorySize(finalJvmOptions); - if (System.getProperty("os.name").startsWith("Windows") && JavaVersion.majorVersion(JavaVersion.CURRENT) == 8) { - Launchers.errPrintln("Warning: with JDK 8 on Windows, OpenSearch may be unable to derive correct"); - Launchers.errPrintln(" ergonomic settings due to a JDK issue (JDK-8074459). Please use a newer"); - Launchers.errPrintln(" version of Java."); - } - if (maxDirectMemorySize == 0 && userDefinedJvmOptions.stream().noneMatch(s -> s.startsWith("-XX:MaxDirectMemorySize"))) { - - if (System.getProperty("os.name").startsWith("Windows") && JavaVersion.majorVersion(JavaVersion.CURRENT) == 8) { - Launchers.errPrintln("Warning: MaxDirectMemorySize may have been miscalculated due to JDK-8074459."); - Launchers.errPrintln(" Please use a newer version of Java or set MaxDirectMemorySize explicitly."); - } - ergonomicChoices.add("-XX:MaxDirectMemorySize=" + heapSize / 2); } return ergonomicChoices; diff --git a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/JvmOptionsParser.java b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/JvmOptionsParser.java index 7703efdc56986..533d1f7e782ba 100644 --- a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/JvmOptionsParser.java +++ b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/JvmOptionsParser.java @@ -32,8 +32,6 @@ package org.opensearch.tools.launchers; -import org.opensearch.tools.java_version_checker.JavaVersion; - import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; @@ -183,7 +181,7 @@ List readJvmOptionsFiles(final Path config) throws IOException, JvmOptio Reader reader = new InputStreamReader(is, StandardCharsets.UTF_8); BufferedReader br = new BufferedReader(reader) ) { - parse(JavaVersion.majorVersion(JavaVersion.CURRENT), br, jvmOptions::add, invalidLines::put); + parse(Runtime.version().feature(), br, jvmOptions::add, invalidLines::put); } if (invalidLines.isEmpty() == false) { throw new JvmOptionsFileParserException(jvmOptionsFile, invalidLines); diff --git a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java index 738d57951c4ef..fc613ccdaae68 100644 --- a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java +++ b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java @@ -32,8 +32,6 @@ package org.opensearch.tools.launchers; -import org.opensearch.tools.java_version_checker.JavaVersion; - import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -86,7 +84,7 @@ static List systemJvmOptions() { } private static String maybeShowCodeDetailsInExceptionMessages() { - if (JavaVersion.majorVersion(JavaVersion.CURRENT) >= 14) { + if (Runtime.version().feature() >= 14) { return "-XX:+ShowCodeDetailsInExceptionMessages"; } else { return ""; @@ -101,14 +99,10 @@ private static String javaLocaleProviders() { * * Due to internationalization enhancements in JDK 9 OpenSearch need to set the provider to COMPAT otherwise time/date * parsing will break in an incompatible way for some date patterns and locales. - * //TODO COMPAT will be deprecated in jdk14 https://bugs.openjdk.java.net/browse/JDK-8232906 + * //TODO COMPAT will be deprecated in at some point, see please https://bugs.openjdk.java.net/browse/JDK-8232906 * See also: documentation in server/org.opensearch.common.time.IsoCalendarDataProvider */ - if (JavaVersion.majorVersion(JavaVersion.CURRENT) == 8) { - return "-Djava.locale.providers=SPI,JRE"; - } else { - return "-Djava.locale.providers=SPI,COMPAT"; - } + return "-Djava.locale.providers=SPI,COMPAT"; } } diff --git a/distribution/tools/launchers/src/test/java/org/opensearch/tools/launchers/JvmErgonomicsTests.java b/distribution/tools/launchers/src/test/java/org/opensearch/tools/launchers/JvmErgonomicsTests.java index ffdf2c2898032..5a8c9841aa0fe 100644 --- a/distribution/tools/launchers/src/test/java/org/opensearch/tools/launchers/JvmErgonomicsTests.java +++ b/distribution/tools/launchers/src/test/java/org/opensearch/tools/launchers/JvmErgonomicsTests.java @@ -32,8 +32,6 @@ package org.opensearch.tools.launchers; -import org.opensearch.tools.java_version_checker.JavaVersion; - import java.io.IOException; import java.util.Arrays; import java.util.Collections; @@ -69,8 +67,6 @@ public void testExtractValidHeapSizeUsingMaxHeapSize() throws InterruptedExcepti } public void testExtractValidHeapSizeNoOptionPresent() throws InterruptedException, IOException { - // Muted for jdk8/Windows, see: https://github.com/elastic/elasticsearch/issues/47384 - assumeFalse(System.getProperty("os.name").startsWith("Windows") && JavaVersion.majorVersion(JavaVersion.CURRENT) == 8); assertThat(JvmErgonomics.extractHeapSize(JvmErgonomics.finalJvmOptions(Collections.emptyList())), greaterThan(0L)); } @@ -141,8 +137,6 @@ public void testExtractNoSystemProperties() { } public void testMaxDirectMemorySizeChoice() throws InterruptedException, IOException { - // Muted for jdk8/Windows, see: https://github.com/elastic/elasticsearch/issues/47384 - assumeFalse(System.getProperty("os.name").startsWith("Windows") && JavaVersion.majorVersion(JavaVersion.CURRENT) == 8); final Map heapMaxDirectMemorySize = new HashMap<>(); heapMaxDirectMemorySize.put("64M", Long.toString((64L << 20) / 2)); heapMaxDirectMemorySize.put("512M", Long.toString((512L << 20) / 2)); From 3d49ccead2fed77a747534c5e50096b5578981b3 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Thu, 21 Apr 2022 16:36:01 -0500 Subject: [PATCH 114/653] [Upgrade] Lucene-9.2-snapshot (#2924) --- buildSrc/version.properties | 2 +- .../lucene-expressions-9.1.0.jar.sha1 | 1 - ...xpressions-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 + .../lucene-analysis-icu-9.1.0.jar.sha1 | 1 - ...alysis-icu-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 + .../lucene-analysis-kuromoji-9.1.0.jar.sha1 | 1 - ...s-kuromoji-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 + .../lucene-analysis-nori-9.1.0.jar.sha1 | 1 - ...lysis-nori-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 + .../lucene-analysis-phonetic-9.1.0.jar.sha1 | 1 - ...s-phonetic-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 + .../lucene-analysis-smartcn-9.1.0.jar.sha1 | 1 - ...is-smartcn-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 + .../lucene-analysis-stempel-9.1.0.jar.sha1 | 1 - ...is-stempel-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 + .../lucene-analysis-morfologik-9.1.0.jar.sha1 | 1 - ...morfologik-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 + .../search/query/QueryProfilePhaseTests.java | 71 ++++++++++++++----- .../lucene-analysis-common-9.1.0.jar.sha1 | 1 - ...sis-common-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 + .../lucene-backward-codecs-9.1.0.jar.sha1 | 1 - ...ard-codecs-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 + server/licenses/lucene-core-9.1.0.jar.sha1 | 1 - ...ucene-core-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 + .../licenses/lucene-grouping-9.1.0.jar.sha1 | 1 - ...e-grouping-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 + .../lucene-highlighter-9.1.0.jar.sha1 | 1 - ...ighlighter-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 + server/licenses/lucene-join-9.1.0.jar.sha1 | 1 - ...ucene-join-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 + server/licenses/lucene-memory-9.1.0.jar.sha1 | 1 - ...ene-memory-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 + server/licenses/lucene-misc-9.1.0.jar.sha1 | 1 - ...ucene-misc-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 + server/licenses/lucene-queries-9.1.0.jar.sha1 | 1 - ...ne-queries-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 + .../lucene-queryparser-9.1.0.jar.sha1 | 1 - ...ueryparser-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 + server/licenses/lucene-sandbox-9.1.0.jar.sha1 | 1 - ...ne-sandbox-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 + .../lucene-spatial-extras-9.1.0.jar.sha1 | 1 - ...ial-extras-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 + .../licenses/lucene-spatial3d-9.1.0.jar.sha1 | 1 - ...-spatial3d-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 + server/licenses/lucene-suggest-9.1.0.jar.sha1 | 1 - ...ne-suggest-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 + .../src/main/java/org/opensearch/Version.java | 2 +- .../common/lucene/uid/VersionsTests.java | 2 +- .../search/query/QueryProfilePhaseTests.java | 67 ++++++++++++----- 49 files changed, 125 insertions(+), 63 deletions(-) delete mode 100644 modules/lang-expression/licenses/lucene-expressions-9.1.0.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-9.2.0-snapshot-f4f1f70.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.1.0.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0-snapshot-f4f1f70.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.1.0.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0-snapshot-f4f1f70.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.1.0.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0-snapshot-f4f1f70.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.1.0.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0-snapshot-f4f1f70.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.1.0.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0-snapshot-f4f1f70.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.1.0.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0-snapshot-f4f1f70.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.1.0.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0-snapshot-f4f1f70.jar.sha1 delete mode 100644 server/licenses/lucene-analysis-common-9.1.0.jar.sha1 create mode 100644 server/licenses/lucene-analysis-common-9.2.0-snapshot-f4f1f70.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-9.1.0.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-9.2.0-snapshot-f4f1f70.jar.sha1 delete mode 100644 server/licenses/lucene-core-9.1.0.jar.sha1 create mode 100644 server/licenses/lucene-core-9.2.0-snapshot-f4f1f70.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-9.1.0.jar.sha1 create mode 100644 server/licenses/lucene-grouping-9.2.0-snapshot-f4f1f70.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-9.1.0.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-9.2.0-snapshot-f4f1f70.jar.sha1 delete mode 100644 server/licenses/lucene-join-9.1.0.jar.sha1 create mode 100644 server/licenses/lucene-join-9.2.0-snapshot-f4f1f70.jar.sha1 delete mode 100644 server/licenses/lucene-memory-9.1.0.jar.sha1 create mode 100644 server/licenses/lucene-memory-9.2.0-snapshot-f4f1f70.jar.sha1 delete mode 100644 server/licenses/lucene-misc-9.1.0.jar.sha1 create mode 100644 server/licenses/lucene-misc-9.2.0-snapshot-f4f1f70.jar.sha1 delete mode 100644 server/licenses/lucene-queries-9.1.0.jar.sha1 create mode 100644 server/licenses/lucene-queries-9.2.0-snapshot-f4f1f70.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-9.1.0.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-9.2.0-snapshot-f4f1f70.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-9.1.0.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-9.2.0-snapshot-f4f1f70.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-9.1.0.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-9.2.0-snapshot-f4f1f70.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-9.1.0.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-9.2.0-snapshot-f4f1f70.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-9.1.0.jar.sha1 create mode 100644 server/licenses/lucene-suggest-9.2.0-snapshot-f4f1f70.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index d3499b0df599b..3ade56a6cab21 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ opensearch = 3.0.0 -lucene = 9.1.0 +lucene = 9.2.0-snapshot-f4f1f70 bundled_jdk_vendor = adoptium bundled_jdk = 17.0.2+8 diff --git a/modules/lang-expression/licenses/lucene-expressions-9.1.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.1.0.jar.sha1 deleted file mode 100644 index c825e197188fc..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2711abb758d101fc738c35a6867ee7559da5308b \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.2.0-snapshot-f4f1f70.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.2.0-snapshot-f4f1f70.jar.sha1 new file mode 100644 index 0000000000000..c6f95e2bdecc7 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.2.0-snapshot-f4f1f70.jar.sha1 @@ -0,0 +1 @@ +f2a8008e74589f77f1d3da305cf58c88ee01d1c1 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.1.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.1.0.jar.sha1 deleted file mode 100644 index b7733cfa9a00a..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e9b429da553560fa0c363ffc04c774f957c56e14 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0-snapshot-f4f1f70.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0-snapshot-f4f1f70.jar.sha1 new file mode 100644 index 0000000000000..616925e9865ed --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0-snapshot-f4f1f70.jar.sha1 @@ -0,0 +1 @@ +f9569365e80897f1a9161254d5d2f44a44f95db8 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.1.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.1.0.jar.sha1 deleted file mode 100644 index f5b818a206e7a..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b247f8a877237b4663e4ab7d86fae21c68a58ea5 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0-snapshot-f4f1f70.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0-snapshot-f4f1f70.jar.sha1 new file mode 100644 index 0000000000000..9ddb30158c6b2 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0-snapshot-f4f1f70.jar.sha1 @@ -0,0 +1 @@ +35c14b39ff522dd6665e74a873035827b865075e \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.1.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.1.0.jar.sha1 deleted file mode 100644 index 4d22255d10316..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30e24b42fb0440911e702a531f4373bf397eb8c6 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0-snapshot-f4f1f70.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0-snapshot-f4f1f70.jar.sha1 new file mode 100644 index 0000000000000..3326a5d35baf3 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0-snapshot-f4f1f70.jar.sha1 @@ -0,0 +1 @@ +a84218a1ea0d5c52d6591d417061518b8a8be4e4 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.1.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.1.0.jar.sha1 deleted file mode 100644 index a0607e6158cdd..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -18a321d93836ea2856a5302d192e9dc99c647c6e \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0-snapshot-f4f1f70.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0-snapshot-f4f1f70.jar.sha1 new file mode 100644 index 0000000000000..d822d33da9801 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0-snapshot-f4f1f70.jar.sha1 @@ -0,0 +1 @@ +057bbd20b15899844b23d2cf034a167b4fe581f0 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.1.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.1.0.jar.sha1 deleted file mode 100644 index bff959139a86c..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -41c847f39a15bb8495be8c9d8a098974be15f74b \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0-snapshot-f4f1f70.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0-snapshot-f4f1f70.jar.sha1 new file mode 100644 index 0000000000000..55ef27aaff865 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0-snapshot-f4f1f70.jar.sha1 @@ -0,0 +1 @@ +6a9a8d49e87b6999560a131e16234e46f21e6b42 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.1.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.1.0.jar.sha1 deleted file mode 100644 index 39d25d7872ea9..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ee7995231b181aa0a01f5aef8775562e269f5ef7 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0-snapshot-f4f1f70.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0-snapshot-f4f1f70.jar.sha1 new file mode 100644 index 0000000000000..ca0f275f4772d --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0-snapshot-f4f1f70.jar.sha1 @@ -0,0 +1 @@ +fcad3608779e0b3ab8703903b9d28cdc32767d60 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.1.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.1.0.jar.sha1 deleted file mode 100644 index 9f07f122205d9..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -575c458431396baa7f01a546173807f27b12a087 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0-snapshot-f4f1f70.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0-snapshot-f4f1f70.jar.sha1 new file mode 100644 index 0000000000000..d0f2904e43195 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0-snapshot-f4f1f70.jar.sha1 @@ -0,0 +1 @@ +bd28479bdf3c076bc89a0d30864188d444410267 \ No newline at end of file diff --git a/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java b/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java index d2cb77f529793..36c20d029e997 100644 --- a/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java +++ b/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java @@ -153,7 +153,9 @@ public void testPostFilterDisablesCountOptimization() throws Exception { QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value); - assertProfileData(context, "MatchAllDocsQuery", query -> { + // IndexSearcher#rewrite optimizes by rewriting non-scoring queries to ConstantScoreQuery + // see: https://github.com/apache/lucene/pull/672 + assertProfileData(context, "ConstantScoreQuery", query -> { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); @@ -183,7 +185,9 @@ public void testPostFilterDisablesCountOptimization() throws Exception { assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); }, (query) -> { - assertThat(query.getQueryName(), equalTo("MatchAllDocsQuery")); + // IndexSearcher#rewrite optimizes by rewriting non-scoring queries to ConstantScoreQuery + // see: https://github.com/apache/lucene/pull/672 + assertThat(query.getQueryName(), equalTo("ConstantScoreQuery")); assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); @@ -265,7 +269,9 @@ public void testMinScoreDisablesCountOptimization() throws Exception { context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher); assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value); - assertProfileData(context, "MatchAllDocsQuery", query -> { + // IndexSearcher#rewrite optimizes by rewriting non-scoring queries to ConstantScoreQuery + // see: https://github.com/apache/lucene/pull/672 + assertProfileData(context, "ConstantScoreQuery", query -> { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); @@ -413,7 +419,9 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertTrue(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); - assertProfileData(context, "MatchAllDocsQuery", query -> { + // IndexSearcher#rewrite optimizes by rewriting non-scoring queries to ConstantScoreQuery + // see: https://github.com/apache/lucene/pull/672 + assertProfileData(context, "ConstantScoreQuery", query -> { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); @@ -489,27 +497,40 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); - assertProfileData(context, "BooleanQuery", query -> { + // IndexSearcher#rewrite optimizes by rewriting non-scoring queries to ConstantScoreQuery + // see: https://github.com/apache/lucene/pull/672 + assertProfileData(context, "ConstantScoreQuery", query -> { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); - assertThat(query.getProfiledChildren(), hasSize(2)); - assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("TermQuery")); + // rewritten as a ConstantScoreQuery wrapping the original BooleanQuery + // see: https://github.com/apache/lucene/pull/672 + assertThat(query.getProfiledChildren(), hasSize(1)); + assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("BooleanQuery")); assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L)); assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score"), equalTo(0L)); assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score_count"), equalTo(0L)); - assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery")); - assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L)); - assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L)); - assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); - assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score"), equalTo(0L)); - assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score_count"), equalTo(0L)); + List children = query.getProfiledChildren().get(0).getProfiledChildren(); + assertThat(children, hasSize(2)); + assertThat(children.get(0).getQueryName(), equalTo("TermQuery")); + assertThat(children.get(0).getTime(), greaterThan(0L)); + assertThat(children.get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(children.get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + assertThat(children.get(0).getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(children.get(0).getTimeBreakdown().get("score_count"), equalTo(0L)); + + assertThat(children.get(1).getQueryName(), equalTo("TermQuery")); + assertThat(children.get(1).getTime(), greaterThan(0L)); + assertThat(children.get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(children.get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + assertThat(children.get(1).getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(children.get(1).getTimeBreakdown().get("score_count"), equalTo(0L)); }, collector -> { assertThat(collector.getReason(), equalTo("search_terminate_after_count")); assertThat(collector.getTime(), greaterThan(0L)); @@ -597,7 +618,9 @@ public void testIndexSortingEarlyTermination() throws Exception { assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); FieldDoc fieldDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0]; assertThat(fieldDoc.fields[0], equalTo(1)); - assertProfileData(context, "MatchAllDocsQuery", query -> { + // IndexSearcher#rewrite optimizes by rewriting non-scoring queries to ConstantScoreQuery + // see: https://github.com/apache/lucene/pull/672 + assertProfileData(context, "ConstantScoreQuery", query -> { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); @@ -631,7 +654,9 @@ public void testIndexSortingEarlyTermination() throws Exception { assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); }, (query) -> { - assertThat(query.getQueryName(), equalTo("MatchAllDocsQuery")); + // IndexSearcher#rewrite optimizes by rewriting non-scoring queries to ConstantScoreQuery + // see: https://github.com/apache/lucene/pull/672 + assertThat(query.getQueryName(), equalTo("ConstantScoreQuery")); assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); @@ -649,7 +674,9 @@ public void testIndexSortingEarlyTermination() throws Exception { assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); - assertProfileData(context, "MatchAllDocsQuery", query -> { + // IndexSearcher#rewrite optimizes by rewriting non-scoring queries to ConstantScoreQuery + // see: https://github.com/apache/lucene/pull/672 + assertProfileData(context, "ConstantScoreQuery", query -> { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); @@ -666,7 +693,9 @@ public void testIndexSortingEarlyTermination() throws Exception { assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); - assertProfileData(context, "MatchAllDocsQuery", query -> { + // IndexSearcher#rewrite optimizes by rewriting non-scoring queries to ConstantScoreQuery + // see: https://github.com/apache/lucene/pull/672 + assertProfileData(context, "ConstantScoreQuery", query -> { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); @@ -721,7 +750,9 @@ public void testIndexSortScrollOptimization() throws Exception { assertNull(context.queryResult().terminatedEarly()); assertThat(context.terminateAfter(), equalTo(0)); assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); - assertProfileData(context, "MatchAllDocsQuery", query -> { + // IndexSearcher#rewrite optimizes by rewriting non-scoring queries to ConstantScoreQuery + // see: https://github.com/apache/lucene/pull/672 + assertProfileData(context, "ConstantScoreQuery", query -> { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); @@ -829,7 +860,9 @@ public void testDisableTopScoreCollection() throws Exception { assertEquals(numDocs / 2, context.queryResult().topDocs().topDocs.totalHits.value); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(3)); assertEquals(context.queryResult().topDocs().topDocs.totalHits.relation, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO); - assertProfileData(context, "SpanNearQuery", query -> { + // IndexSearcher#rewrite optimizes by rewriting non-scoring queries to ConstantScoreQuery + // see: https://github.com/apache/lucene/pull/672 + assertProfileData(context, "ConstantScoreQuery", query -> { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); diff --git a/server/licenses/lucene-analysis-common-9.1.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.1.0.jar.sha1 deleted file mode 100644 index 4d2a9cf9451cc..0000000000000 --- a/server/licenses/lucene-analysis-common-9.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -240e3997fb139ff001e022124c89b686b5a8498d \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.2.0-snapshot-f4f1f70.jar.sha1 b/server/licenses/lucene-analysis-common-9.2.0-snapshot-f4f1f70.jar.sha1 new file mode 100644 index 0000000000000..5c667d1aec446 --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.2.0-snapshot-f4f1f70.jar.sha1 @@ -0,0 +1 @@ +1f7c4b91c8ef9f65e85c5190080b3f796076f355 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.1.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.1.0.jar.sha1 deleted file mode 100644 index b6df56db28cd6..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -de23bdacb09e8b39cbe876ff79c7a5b2ecc1faa6 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.2.0-snapshot-f4f1f70.jar.sha1 b/server/licenses/lucene-backward-codecs-9.2.0-snapshot-f4f1f70.jar.sha1 new file mode 100644 index 0000000000000..b8e8fc5a9e529 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.2.0-snapshot-f4f1f70.jar.sha1 @@ -0,0 +1 @@ +e3ee195405dc0cb249fe2eb3f3a6a848c4686645 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.1.0.jar.sha1 b/server/licenses/lucene-core-9.1.0.jar.sha1 deleted file mode 100644 index 45e7ae47dae3e..0000000000000 --- a/server/licenses/lucene-core-9.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0375603f1dacd8266526404faf0088a2ac8ec2ff \ No newline at end of file diff --git a/server/licenses/lucene-core-9.2.0-snapshot-f4f1f70.jar.sha1 b/server/licenses/lucene-core-9.2.0-snapshot-f4f1f70.jar.sha1 new file mode 100644 index 0000000000000..dd6af54584cc3 --- /dev/null +++ b/server/licenses/lucene-core-9.2.0-snapshot-f4f1f70.jar.sha1 @@ -0,0 +1 @@ +f1cb45d20f7f23c420c56a94e9153e96bfdd6e1f \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.1.0.jar.sha1 b/server/licenses/lucene-grouping-9.1.0.jar.sha1 deleted file mode 100644 index be423fdde04f7..0000000000000 --- a/server/licenses/lucene-grouping-9.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -703308505e62fa7dcb0bf64fdb6d95d335941bdc \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.2.0-snapshot-f4f1f70.jar.sha1 b/server/licenses/lucene-grouping-9.2.0-snapshot-f4f1f70.jar.sha1 new file mode 100644 index 0000000000000..3f8cede90a0a7 --- /dev/null +++ b/server/licenses/lucene-grouping-9.2.0-snapshot-f4f1f70.jar.sha1 @@ -0,0 +1 @@ +29052ac8f5255c8df2bb1d3d0da94e112c181679 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.1.0.jar.sha1 b/server/licenses/lucene-highlighter-9.1.0.jar.sha1 deleted file mode 100644 index c130c27ed4c37..0000000000000 --- a/server/licenses/lucene-highlighter-9.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f1925f6ef985000399a277ca17b8f67d3056838 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.2.0-snapshot-f4f1f70.jar.sha1 b/server/licenses/lucene-highlighter-9.2.0-snapshot-f4f1f70.jar.sha1 new file mode 100644 index 0000000000000..161099cac2dda --- /dev/null +++ b/server/licenses/lucene-highlighter-9.2.0-snapshot-f4f1f70.jar.sha1 @@ -0,0 +1 @@ +d710569c00d561c70d8290de4c4c15fe9735f94f \ No newline at end of file diff --git a/server/licenses/lucene-join-9.1.0.jar.sha1 b/server/licenses/lucene-join-9.1.0.jar.sha1 deleted file mode 100644 index b678051ddaf26..0000000000000 --- a/server/licenses/lucene-join-9.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e7d39da8e623c99ee8da8bcc0185b2d908aca4b3 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.2.0-snapshot-f4f1f70.jar.sha1 b/server/licenses/lucene-join-9.2.0-snapshot-f4f1f70.jar.sha1 new file mode 100644 index 0000000000000..5a256ed582f53 --- /dev/null +++ b/server/licenses/lucene-join-9.2.0-snapshot-f4f1f70.jar.sha1 @@ -0,0 +1 @@ +0f3081b32664d8ca6318e69dd054befb5f9a334b \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.1.0.jar.sha1 b/server/licenses/lucene-memory-9.1.0.jar.sha1 deleted file mode 100644 index a07b052e9c332..0000000000000 --- a/server/licenses/lucene-memory-9.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -209166fd48dae3261ccf26990fe600332b8fb373 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.2.0-snapshot-f4f1f70.jar.sha1 b/server/licenses/lucene-memory-9.2.0-snapshot-f4f1f70.jar.sha1 new file mode 100644 index 0000000000000..9504a0e4b2cf8 --- /dev/null +++ b/server/licenses/lucene-memory-9.2.0-snapshot-f4f1f70.jar.sha1 @@ -0,0 +1 @@ +81b63e23b87c054c140ff6a1e2e6696ca750d51c \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.1.0.jar.sha1 b/server/licenses/lucene-misc-9.1.0.jar.sha1 deleted file mode 100644 index 8627e481c6214..0000000000000 --- a/server/licenses/lucene-misc-9.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -905d93b6389060cf4b0cb464ffa8fa2db81b60e7 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.2.0-snapshot-f4f1f70.jar.sha1 b/server/licenses/lucene-misc-9.2.0-snapshot-f4f1f70.jar.sha1 new file mode 100644 index 0000000000000..7011446c479a4 --- /dev/null +++ b/server/licenses/lucene-misc-9.2.0-snapshot-f4f1f70.jar.sha1 @@ -0,0 +1 @@ +0cdea200c1890b877d26ce58b7d797f122bb8328 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.1.0.jar.sha1 b/server/licenses/lucene-queries-9.1.0.jar.sha1 deleted file mode 100644 index 9e81da7ca5c15..0000000000000 --- a/server/licenses/lucene-queries-9.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c50fc971573910ea239ee6f275e9257b6b6bdd48 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.2.0-snapshot-f4f1f70.jar.sha1 b/server/licenses/lucene-queries-9.2.0-snapshot-f4f1f70.jar.sha1 new file mode 100644 index 0000000000000..eac4f3a6abc7d --- /dev/null +++ b/server/licenses/lucene-queries-9.2.0-snapshot-f4f1f70.jar.sha1 @@ -0,0 +1 @@ +50d8395e3afc502f267cb308399ab783edfabec0 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.1.0.jar.sha1 b/server/licenses/lucene-queryparser-9.1.0.jar.sha1 deleted file mode 100644 index fb04adf2051d0..0000000000000 --- a/server/licenses/lucene-queryparser-9.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -383eb69b12f9d9c98c44237155f50c870c9a34b9 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.2.0-snapshot-f4f1f70.jar.sha1 b/server/licenses/lucene-queryparser-9.2.0-snapshot-f4f1f70.jar.sha1 new file mode 100644 index 0000000000000..b00bd083b9e4f --- /dev/null +++ b/server/licenses/lucene-queryparser-9.2.0-snapshot-f4f1f70.jar.sha1 @@ -0,0 +1 @@ +815b394c8be5cbb9673011953da38d39a843b0fa \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.1.0.jar.sha1 b/server/licenses/lucene-sandbox-9.1.0.jar.sha1 deleted file mode 100644 index 429a84de46f3c..0000000000000 --- a/server/licenses/lucene-sandbox-9.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0c728684e750a63f881998fbe27afd897f739762 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.2.0-snapshot-f4f1f70.jar.sha1 b/server/licenses/lucene-sandbox-9.2.0-snapshot-f4f1f70.jar.sha1 new file mode 100644 index 0000000000000..9f26af782d88c --- /dev/null +++ b/server/licenses/lucene-sandbox-9.2.0-snapshot-f4f1f70.jar.sha1 @@ -0,0 +1 @@ +7bb7c539172dc3513d4f34e7f29d2cd3a0352361 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.1.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.1.0.jar.sha1 deleted file mode 100644 index 7078cbc05fff7..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -94d7d107c399cd11d407b94fa62f5677fe86f63b \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.2.0-snapshot-f4f1f70.jar.sha1 b/server/licenses/lucene-spatial-extras-9.2.0-snapshot-f4f1f70.jar.sha1 new file mode 100644 index 0000000000000..8cf21bea02089 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.2.0-snapshot-f4f1f70.jar.sha1 @@ -0,0 +1 @@ +7bc2f2e37f866e3b376f083e4b7cc89a8cb45fd0 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.1.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.1.0.jar.sha1 deleted file mode 100644 index 604e8ed054ac1..0000000000000 --- a/server/licenses/lucene-spatial3d-9.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7717b300bc14dfa9eb4b7d5970d8e25a60010e64 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.2.0-snapshot-f4f1f70.jar.sha1 b/server/licenses/lucene-spatial3d-9.2.0-snapshot-f4f1f70.jar.sha1 new file mode 100644 index 0000000000000..1407cc666b3c3 --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.2.0-snapshot-f4f1f70.jar.sha1 @@ -0,0 +1 @@ +a5f79bb1f8337dbe6fc50fc5abd46d4eaaf4d433 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.1.0.jar.sha1 b/server/licenses/lucene-suggest-9.1.0.jar.sha1 deleted file mode 100644 index 4562a19706634..0000000000000 --- a/server/licenses/lucene-suggest-9.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -957fca507eba94dbc3ef0d02377839be49bbe619 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.2.0-snapshot-f4f1f70.jar.sha1 b/server/licenses/lucene-suggest-9.2.0-snapshot-f4f1f70.jar.sha1 new file mode 100644 index 0000000000000..30772395b4cda --- /dev/null +++ b/server/licenses/lucene-suggest-9.2.0-snapshot-f4f1f70.jar.sha1 @@ -0,0 +1 @@ +9e74f66171ea215e8c4936588381c0950c290c80 \ No newline at end of file diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index e68305df20869..ad3546b2498f2 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -83,7 +83,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_1_3_2 = new Version(1030299, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_2_0_0 = new Version(2000099, org.apache.lucene.util.Version.LUCENE_9_1_0); public static final Version V_2_1_0 = new Version(2010099, org.apache.lucene.util.Version.LUCENE_9_1_0); - public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_1_0); + public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_2_0); public static final Version CURRENT = V_3_0_0; public static Version readVersion(StreamInput in) throws IOException { diff --git a/server/src/test/java/org/opensearch/common/lucene/uid/VersionsTests.java b/server/src/test/java/org/opensearch/common/lucene/uid/VersionsTests.java index dace484f80c2b..8b22ff56e9abc 100644 --- a/server/src/test/java/org/opensearch/common/lucene/uid/VersionsTests.java +++ b/server/src/test/java/org/opensearch/common/lucene/uid/VersionsTests.java @@ -224,7 +224,7 @@ public void testLuceneVersionOnUnknownVersions() { assertEquals(VersionUtils.getFirstVersion().luceneVersion.major - 1, version.luceneVersion.major); // future version, should be the same version as today - version = Version.fromString("2.77.1"); + version = Version.fromString(Version.CURRENT.major + ".77.1"); assertEquals(Version.CURRENT.luceneVersion, version.luceneVersion); } } diff --git a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java index dfa41edb5cff2..1b168e7d5b16c 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java @@ -127,7 +127,7 @@ public void testPostFilterDisablesCountOptimization() throws Exception { QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value); - assertProfileData(context, "MatchAllDocsQuery", query -> { + assertProfileData(context, "ConstantScoreQuery", query -> { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); @@ -157,7 +157,7 @@ public void testPostFilterDisablesCountOptimization() throws Exception { assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); }, (query) -> { - assertThat(query.getQueryName(), equalTo("MatchAllDocsQuery")); + assertThat(query.getQueryName(), equalTo("ConstantScoreQuery")); assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); @@ -239,7 +239,9 @@ public void testMinScoreDisablesCountOptimization() throws Exception { context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers()); assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value); - assertProfileData(context, "MatchAllDocsQuery", query -> { + // IndexSearcher#rewrite optimizes by rewriting non-scoring queries to ConstantScoreQuery + // see: https://github.com/apache/lucene/pull/672 + assertProfileData(context, "ConstantScoreQuery", query -> { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); @@ -387,7 +389,9 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertTrue(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); - assertProfileData(context, "MatchAllDocsQuery", query -> { + // IndexSearcher#rewrite optimizes by rewriting non-scoring queries to ConstantScoreQuery + // see: https://github.com/apache/lucene/pull/672 + assertProfileData(context, "ConstantScoreQuery", query -> { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); @@ -463,27 +467,40 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); - assertProfileData(context, "BooleanQuery", query -> { + // IndexSearcher#rewrite optimizes by rewriting non-scoring queries to ConstantScoreQuery + // see: https://github.com/apache/lucene/pull/672 + assertProfileData(context, "ConstantScoreQuery", query -> { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); - assertThat(query.getProfiledChildren(), hasSize(2)); - assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("TermQuery")); + // rewritten as a ConstantScoreQuery wrapping the original BooleanQuery + // see: https://github.com/apache/lucene/pull/672 + assertThat(query.getProfiledChildren(), hasSize(1)); + assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("BooleanQuery")); assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L)); assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score"), equalTo(0L)); assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score_count"), equalTo(0L)); - assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery")); - assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L)); - assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L)); - assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); - assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score"), equalTo(0L)); - assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score_count"), equalTo(0L)); + List children = query.getProfiledChildren().get(0).getProfiledChildren(); + assertThat(children, hasSize(2)); + assertThat(children.get(0).getQueryName(), equalTo("TermQuery")); + assertThat(children.get(0).getTime(), greaterThan(0L)); + assertThat(children.get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(children.get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + assertThat(children.get(0).getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(children.get(0).getTimeBreakdown().get("score_count"), equalTo(0L)); + + assertThat(children.get(1).getQueryName(), equalTo("TermQuery")); + assertThat(children.get(1).getTime(), greaterThan(0L)); + assertThat(children.get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L)); + assertThat(children.get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); + assertThat(children.get(1).getTimeBreakdown().get("score"), equalTo(0L)); + assertThat(children.get(1).getTimeBreakdown().get("score_count"), equalTo(0L)); }, collector -> { assertThat(collector.getReason(), equalTo("search_terminate_after_count")); assertThat(collector.getTime(), greaterThan(0L)); @@ -571,7 +588,9 @@ public void testIndexSortingEarlyTermination() throws Exception { assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); FieldDoc fieldDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0]; assertThat(fieldDoc.fields[0], equalTo(1)); - assertProfileData(context, "MatchAllDocsQuery", query -> { + // IndexSearcher#rewrite optimizes by rewriting non-scoring queries to ConstantScoreQuery + // see: https://github.com/apache/lucene/pull/672 + assertProfileData(context, "ConstantScoreQuery", query -> { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); @@ -605,7 +624,9 @@ public void testIndexSortingEarlyTermination() throws Exception { assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); }, (query) -> { - assertThat(query.getQueryName(), equalTo("MatchAllDocsQuery")); + // IndexSearcher#rewrite optimizes by rewriting non-scoring queries to ConstantScoreQuery + // see: https://github.com/apache/lucene/pull/672 + assertThat(query.getQueryName(), equalTo("ConstantScoreQuery")); assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); @@ -623,7 +644,9 @@ public void testIndexSortingEarlyTermination() throws Exception { assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); - assertProfileData(context, "MatchAllDocsQuery", query -> { + // IndexSearcher#rewrite optimizes by rewriting non-scoring queries to ConstantScoreQuery + // see: https://github.com/apache/lucene/pull/672 + assertProfileData(context, "ConstantScoreQuery", query -> { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); @@ -640,7 +663,9 @@ public void testIndexSortingEarlyTermination() throws Exception { assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); - assertProfileData(context, "MatchAllDocsQuery", query -> { + // IndexSearcher#rewrite optimizes by rewriting non-scoring queries to ConstantScoreQuery + // see: https://github.com/apache/lucene/pull/672 + assertProfileData(context, "ConstantScoreQuery", query -> { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); @@ -695,7 +720,9 @@ public void testIndexSortScrollOptimization() throws Exception { assertNull(context.queryResult().terminatedEarly()); assertThat(context.terminateAfter(), equalTo(0)); assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); - assertProfileData(context, "MatchAllDocsQuery", query -> { + // IndexSearcher#rewrite optimizes by rewriting non-scoring queries to ConstantScoreQuery + // see: https://github.com/apache/lucene/pull/672 + assertProfileData(context, "ConstantScoreQuery", query -> { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); @@ -806,7 +833,9 @@ public void testDisableTopScoreCollection() throws Exception { assertEquals(numDocs / 2, context.queryResult().topDocs().topDocs.totalHits.value); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(3)); assertEquals(context.queryResult().topDocs().topDocs.totalHits.relation, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO); - assertProfileData(context, "SpanNearQuery", query -> { + // IndexSearcher#rewrite optimizes by rewriting non-scoring queries to ConstantScoreQuery + // see: https://github.com/apache/lucene/pull/672 + assertProfileData(context, "ConstantScoreQuery", query -> { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), equalTo(0L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L)); From f006afa969b25b9734d1fceef7c3860ed244dff9 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 22 Apr 2022 15:23:24 -0400 Subject: [PATCH 115/653] Temporary adding Apache Lucene repositories for snapshots (#3042) Signed-off-by: Andriy Redko --- gradle/code-coverage.gradle | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/gradle/code-coverage.gradle b/gradle/code-coverage.gradle index 61719282c1ca2..7f8af147e236c 100644 --- a/gradle/code-coverage.gradle +++ b/gradle/code-coverage.gradle @@ -11,6 +11,10 @@ apply plugin: 'jacoco' repositories { mavenCentral() gradlePluginPortal() + // TODO: Find the way to use the repositories from RepositoriesSetupPlugin + maven { + url = "https://d1nvenhzbhpy0q.cloudfront.net/snapshots/lucene/" + } } allprojects { From f43856161523125a6641358619bd76429e43a6c2 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 22 Apr 2022 16:31:14 -0400 Subject: [PATCH 116/653] Added explicit 'null' check for response listener to prevent obscure NullPointerException issues (#3048) Signed-off-by: Andriy Redko --- .../org/opensearch/client/RestHighLevelClient.java | 4 ++++ .../client/RestHighLevelClientTests.java | 14 ++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java index e69ca149d697d..16e6648e7747e 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java @@ -1917,6 +1917,10 @@ private Cancellable internalPerformRequestAsync( ActionListener listener, Set ignores ) { + if (listener == null) { + throw new IllegalArgumentException("The listener is required and cannot be null"); + } + Request req; try { req = requestConverter.apply(request); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java index 7766fa76d5cfe..efcc13921c398 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java @@ -284,6 +284,20 @@ public ActionRequestValidationException validate() { } } + public void testNullableActionListener() { + ActionRequest request = new ActionRequest() { + @Override + public ActionRequestValidationException validate() { + return null; + } + }; + + assertThrows( + IllegalArgumentException.class, + () -> restHighLevelClient.performRequestAsync(request, null, RequestOptions.DEFAULT, null, null, null) + ); + } + public void testParseEntity() throws IOException { { IllegalStateException ise = expectThrows(IllegalStateException.class, () -> restHighLevelClient.parseEntity(null, null)); From 88bc268e29f8153ae89bea97433ee0ea9917715b Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Sat, 23 Apr 2022 16:50:55 -0500 Subject: [PATCH 117/653] [Rename] ESTestCase stragglers to OpenSearchTestCase (#3053) A few places still referenced legacy ESTestCase name. This refactors those instances to OpenSearchTestCase. Signed-off-by: Nicholas Walter Knize --- .../src/main/resources/forbidden/opensearch-test-signatures.txt | 2 +- .../src/main/resources/org/opensearch/bootstrap/security.policy | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/buildSrc/src/main/resources/forbidden/opensearch-test-signatures.txt b/buildSrc/src/main/resources/forbidden/opensearch-test-signatures.txt index aeb5e25decf62..03dead38bd8b4 100644 --- a/buildSrc/src/main/resources/forbidden/opensearch-test-signatures.txt +++ b/buildSrc/src/main/resources/forbidden/opensearch-test-signatures.txt @@ -26,4 +26,4 @@ com.carrotsearch.randomizedtesting.annotations.Nightly @ We don't run nightly te org.junit.Test @defaultMessage Just name your test method testFooBar -java.lang.Math#random() @ Use one of the various randomization methods from LuceneTestCase or ESTestCase for reproducibility +java.lang.Math#random() @ Use one of the various randomization methods from LuceneTestCase or OpenSearchTestCase for reproducibility diff --git a/server/src/main/resources/org/opensearch/bootstrap/security.policy b/server/src/main/resources/org/opensearch/bootstrap/security.policy index 97b73aedf24bb..05d648212bc40 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/security.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/security.policy @@ -116,7 +116,7 @@ grant { permission java.util.PropertyPermission "solr.data.dir", "write"; permission java.util.PropertyPermission "solr.directoryFactory", "write"; - // set by ESTestCase to improve test reproducibility + // set by OpenSearchTestCase to improve test reproducibility // TODO: set this with gradle or some other way that repros with seed? permission java.util.PropertyPermission "processors.override", "write"; From 1c132495b009cf7924b5b620bc204211a73f1c43 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Apr 2022 08:33:14 -0700 Subject: [PATCH 118/653] Bump json-schema-validator from 1.0.68 to 1.0.69 in /buildSrc (#3060) Bumps [json-schema-validator](https://github.com/networknt/json-schema-validator) from 1.0.68 to 1.0.69. - [Release notes](https://github.com/networknt/json-schema-validator/releases) - [Changelog](https://github.com/networknt/json-schema-validator/blob/master/CHANGELOG.md) - [Commits](https://github.com/networknt/json-schema-validator/compare/1.0.68...1.0.69) --- updated-dependencies: - dependency-name: com.networknt:json-schema-validator dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- buildSrc/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 077064e33187c..a8b97a110d19a 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -115,7 +115,7 @@ dependencies { api 'de.thetaphi:forbiddenapis:3.3' api 'com.avast.gradle:gradle-docker-compose-plugin:0.15.2' api 'org.apache.maven:maven-model:3.6.2' - api 'com.networknt:json-schema-validator:1.0.68' + api 'com.networknt:json-schema-validator:1.0.69' api "com.fasterxml.jackson.core:jackson-databind:${props.getProperty('jackson_databind')}" testFixturesApi "junit:junit:${props.getProperty('junit')}" From a389d1e221de9799b2543bf33d56ccd196794382 Mon Sep 17 00:00:00 2001 From: "Daniel Doubrovkine (dB.)" Date: Mon, 25 Apr 2022 12:08:38 -0400 Subject: [PATCH 119/653] Revert "Support task resource tracking in OpenSearch (#2639)" (#3046) This reverts commit 6517eeca507943757475fbe4427305bfc10b3d17. Signed-off-by: dblock Signed-off-by: Nicholas Walter Knize --- .../admin/cluster/node/tasks/TasksIT.java | 6 - .../tasks/list/TransportListTasksAction.java | 13 +- .../action/search/SearchShardTask.java | 5 - .../opensearch/action/search/SearchTask.java | 5 - .../action/support/TransportAction.java | 78 +-- .../org/opensearch/cluster/ClusterModule.java | 2 - .../common/settings/ClusterSettings.java | 4 +- .../util/concurrent/OpenSearchExecutors.java | 52 +- .../common/util/concurrent/ThreadContext.java | 16 +- .../main/java/org/opensearch/node/Node.java | 13 +- .../main/java/org/opensearch/tasks/Task.java | 17 +- .../org/opensearch/tasks/TaskManager.java | 27 +- .../tasks/TaskResourceTrackingService.java | 255 ------- .../opensearch/tasks/ThreadResourceInfo.java | 10 +- .../AutoQueueAdjustingExecutorBuilder.java | 19 +- .../RunnableTaskExecutionListener.java | 33 - .../threadpool/TaskAwareRunnable.java | 90 --- .../org/opensearch/threadpool/ThreadPool.java | 22 +- .../transport/RequestHandlerRegistry.java | 4 - .../tasks/RecordingTaskManagerListener.java | 3 - .../node/tasks/ResourceAwareTasksTests.java | 633 ------------------ .../node/tasks/TaskManagerTestCase.java | 17 +- .../bulk/TransportBulkActionIngestTests.java | 3 +- .../util/concurrent/ThreadContextTests.java | 10 - .../snapshots/SnapshotResiliencyTests.java | 3 - .../opensearch/tasks/TaskManagerTests.java | 6 +- .../TaskResourceTrackingServiceTests.java | 97 --- .../test/tasks/MockTaskManager.java | 16 - .../test/tasks/MockTaskManagerListener.java | 3 - .../opensearch/threadpool/TestThreadPool.java | 20 +- 30 files changed, 61 insertions(+), 1421 deletions(-) delete mode 100644 server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java delete mode 100644 server/src/main/java/org/opensearch/threadpool/RunnableTaskExecutionListener.java delete mode 100644 server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java delete mode 100644 server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java delete mode 100644 server/src/test/java/org/opensearch/tasks/TaskResourceTrackingServiceTests.java diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java index c74f992970545..ac0ae44eb732e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java @@ -470,9 +470,6 @@ public void onTaskUnregistered(Task task) {} @Override public void waitForTaskCompletion(Task task) {} - - @Override - public void taskExecutionStarted(Task task, Boolean closeableInvoked) {} }); } // Need to run the task in a separate thread because node client's .execute() is blocked by our task listener @@ -654,9 +651,6 @@ public void waitForTaskCompletion(Task task) { waitForWaitingToStart.countDown(); } - @Override - public void taskExecutionStarted(Task task, Boolean closeableInvoked) {} - @Override public void onTaskRegistered(Task task) {} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java index df448d2665434..b7875c5f99774 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java @@ -42,7 +42,6 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskInfo; -import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -61,15 +60,8 @@ public static long waitForCompletionTimeout(TimeValue timeout) { private static final TimeValue DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT = timeValueSeconds(30); - private final TaskResourceTrackingService taskResourceTrackingService; - @Inject - public TransportListTasksAction( - ClusterService clusterService, - TransportService transportService, - ActionFilters actionFilters, - TaskResourceTrackingService taskResourceTrackingService - ) { + public TransportListTasksAction(ClusterService clusterService, TransportService transportService, ActionFilters actionFilters) { super( ListTasksAction.NAME, clusterService, @@ -80,7 +72,6 @@ public TransportListTasksAction( TaskInfo::new, ThreadPool.Names.MANAGEMENT ); - this.taskResourceTrackingService = taskResourceTrackingService; } @Override @@ -110,8 +101,6 @@ protected void processTasks(ListTasksRequest request, Consumer operation) } taskManager.waitForTaskCompletion(task, timeoutNanos); }); - } else { - operation = operation.andThen(taskResourceTrackingService::refreshResourceStats); } super.processTasks(request, operation); } diff --git a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java index f09701c7769eb..2e506c6fe181b 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java @@ -49,11 +49,6 @@ public SearchShardTask(long id, String type, String action, String description, super(id, type, action, description, parentTaskId, headers); } - @Override - public boolean supportsResourceTracking() { - return true; - } - @Override public boolean shouldCancelChildrenOnCancellation() { return false; diff --git a/server/src/main/java/org/opensearch/action/search/SearchTask.java b/server/src/main/java/org/opensearch/action/search/SearchTask.java index bf6f141a3e829..7f80f7836be6c 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTask.java @@ -78,11 +78,6 @@ public final String getDescription() { return descriptionSupplier.get(); } - @Override - public boolean supportsResourceTracking() { - return true; - } - /** * Attach a {@link SearchProgressListener} to this task. */ diff --git a/server/src/main/java/org/opensearch/action/support/TransportAction.java b/server/src/main/java/org/opensearch/action/support/TransportAction.java index 83fca715c7e28..84ece8cfec530 100644 --- a/server/src/main/java/org/opensearch/action/support/TransportAction.java +++ b/server/src/main/java/org/opensearch/action/support/TransportAction.java @@ -40,7 +40,6 @@ import org.opensearch.action.ActionResponse; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; -import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskCancelledException; import org.opensearch.tasks.TaskId; @@ -89,39 +88,31 @@ public final Task execute(Request request, ActionListener listener) { */ final Releasable unregisterChildNode = registerChildNode(request.getParentTask()); final Task task; - try { task = taskManager.register("transport", actionName, request); } catch (TaskCancelledException e) { unregisterChildNode.close(); throw e; } - - ThreadContext.StoredContext storedContext = taskManager.taskExecutionStarted(task); - try { - execute(task, request, new ActionListener() { - @Override - public void onResponse(Response response) { - try { - Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); - } finally { - listener.onResponse(response); - } + execute(task, request, new ActionListener() { + @Override + public void onResponse(Response response) { + try { + Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); + } finally { + listener.onResponse(response); } + } - @Override - public void onFailure(Exception e) { - try { - Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); - } finally { - listener.onFailure(e); - } + @Override + public void onFailure(Exception e) { + try { + Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); + } finally { + listener.onFailure(e); } - }); - } finally { - storedContext.close(); - } - + } + }); return task; } @@ -138,30 +129,25 @@ public final Task execute(Request request, TaskListener listener) { unregisterChildNode.close(); throw e; } - ThreadContext.StoredContext storedContext = taskManager.taskExecutionStarted(task); - try { - execute(task, request, new ActionListener() { - @Override - public void onResponse(Response response) { - try { - Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); - } finally { - listener.onResponse(task, response); - } + execute(task, request, new ActionListener() { + @Override + public void onResponse(Response response) { + try { + Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); + } finally { + listener.onResponse(task, response); } + } - @Override - public void onFailure(Exception e) { - try { - Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); - } finally { - listener.onFailure(task, e); - } + @Override + public void onFailure(Exception e) { + try { + Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); + } finally { + listener.onFailure(task, e); } - }); - } finally { - storedContext.close(); - } + } + }); return task; } diff --git a/server/src/main/java/org/opensearch/cluster/ClusterModule.java b/server/src/main/java/org/opensearch/cluster/ClusterModule.java index b9f3a2a99f0b7..c85691b80d7c3 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterModule.java @@ -94,7 +94,6 @@ import org.opensearch.script.ScriptMetadata; import org.opensearch.snapshots.SnapshotsInfoService; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.tasks.TaskResultsService; import java.util.ArrayList; @@ -395,7 +394,6 @@ protected void configure() { bind(NodeMappingRefreshAction.class).asEagerSingleton(); bind(MappingUpdatedAction.class).asEagerSingleton(); bind(TaskResultsService.class).asEagerSingleton(); - bind(TaskResourceTrackingService.class).asEagerSingleton(); bind(AllocationDeciders.class).toInstance(allocationDeciders); bind(ShardsAllocator.class).toInstance(shardsAllocator); } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 3cd9b62fc474a..c3f0212b99812 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -39,7 +39,6 @@ import org.opensearch.index.ShardIndexingPressureMemoryManager; import org.opensearch.index.ShardIndexingPressureSettings; import org.opensearch.index.ShardIndexingPressureStore; -import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.watcher.ResourceWatcherService; import org.opensearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; import org.opensearch.action.admin.indices.close.TransportCloseIndexAction; @@ -567,8 +566,7 @@ public void apply(Settings value, Settings current, Settings previous) { ShardIndexingPressureMemoryManager.THROUGHPUT_DEGRADATION_LIMITS, ShardIndexingPressureMemoryManager.SUCCESSFUL_REQUEST_ELAPSED_TIMEOUT, ShardIndexingPressureMemoryManager.MAX_OUTSTANDING_REQUESTS, - IndexingPressure.MAX_INDEXING_BYTES, - TaskResourceTrackingService.TASK_RESOURCE_TRACKING_ENABLED + IndexingPressure.MAX_INDEXING_BYTES ) ) ); diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java b/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java index 9e28bb2b795c3..5a967528a6ae2 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java @@ -40,8 +40,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.node.Node; -import org.opensearch.threadpool.RunnableTaskExecutionListener; -import org.opensearch.threadpool.TaskAwareRunnable; import java.util.List; import java.util.Optional; @@ -57,7 +55,6 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; public class OpenSearchExecutors { @@ -175,39 +172,14 @@ public static OpenSearchThreadPoolExecutor newFixed( ); } - public static OpenSearchThreadPoolExecutor newAutoQueueFixed( - String name, - int size, - int initialQueueCapacity, - int minQueueSize, - int maxQueueSize, - int frameSize, - TimeValue targetedResponseTime, - ThreadFactory threadFactory, - ThreadContext contextHolder - ) { - return newAutoQueueFixed( - name, - size, - initialQueueCapacity, - minQueueSize, - maxQueueSize, - frameSize, - targetedResponseTime, - threadFactory, - contextHolder, - null - ); - } - /** * Return a new executor that will automatically adjust the queue size based on queue throughput. * - * @param size number of fixed threads to use for executing tasks + * @param size number of fixed threads to use for executing tasks * @param initialQueueCapacity initial size of the executor queue - * @param minQueueSize minimum queue size that the queue can be adjusted to - * @param maxQueueSize maximum queue size that the queue can be adjusted to - * @param frameSize number of tasks during which stats are collected before adjusting queue size + * @param minQueueSize minimum queue size that the queue can be adjusted to + * @param maxQueueSize maximum queue size that the queue can be adjusted to + * @param frameSize number of tasks during which stats are collected before adjusting queue size */ public static OpenSearchThreadPoolExecutor newAutoQueueFixed( String name, @@ -218,8 +190,7 @@ public static OpenSearchThreadPoolExecutor newAutoQueueFixed( int frameSize, TimeValue targetedResponseTime, ThreadFactory threadFactory, - ThreadContext contextHolder, - AtomicReference runnableTaskListener + ThreadContext contextHolder ) { if (initialQueueCapacity <= 0) { throw new IllegalArgumentException( @@ -230,17 +201,6 @@ public static OpenSearchThreadPoolExecutor newAutoQueueFixed( ConcurrentCollections.newBlockingQueue(), initialQueueCapacity ); - - Function runnableWrapper; - if (runnableTaskListener != null) { - runnableWrapper = (runnable) -> { - TaskAwareRunnable taskAwareRunnable = new TaskAwareRunnable(contextHolder, runnable, runnableTaskListener); - return new TimedRunnable(taskAwareRunnable); - }; - } else { - runnableWrapper = TimedRunnable::new; - } - return new QueueResizingOpenSearchThreadPoolExecutor( name, size, @@ -250,7 +210,7 @@ public static OpenSearchThreadPoolExecutor newAutoQueueFixed( queue, minQueueSize, maxQueueSize, - runnableWrapper, + TimedRunnable::new, frameSize, targetedResponseTime, threadFactory, diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java index 35d7d925ce106..d844a8f158ea4 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java @@ -66,7 +66,6 @@ import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_COUNT; import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE; -import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; /** * A ThreadContext is a map of string headers and a transient map of keyed objects that are associated with @@ -135,23 +134,16 @@ public StoredContext stashContext() { * This is needed so the DeprecationLogger in another thread can see the value of X-Opaque-ID provided by a user. * Otherwise when context is stash, it should be empty. */ - - ThreadContextStruct threadContextStruct = DEFAULT_CONTEXT; - if (context.requestHeaders.containsKey(Task.X_OPAQUE_ID)) { - threadContextStruct = threadContextStruct.putHeaders( + ThreadContextStruct threadContextStruct = DEFAULT_CONTEXT.putHeaders( MapBuilder.newMapBuilder() .put(Task.X_OPAQUE_ID, context.requestHeaders.get(Task.X_OPAQUE_ID)) .immutableMap() ); + threadLocal.set(threadContextStruct); + } else { + threadLocal.set(DEFAULT_CONTEXT); } - - if (context.transientHeaders.containsKey(TASK_ID)) { - threadContextStruct = threadContextStruct.putTransient(TASK_ID, context.transientHeaders.get(TASK_ID)); - } - - threadLocal.set(threadContextStruct); - return () -> { // If the node and thus the threadLocal get closed while this task // is still executing, we don't want this runnable to fail with an diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index c929c7c013b13..46400e5c8d269 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -37,8 +37,6 @@ import org.apache.lucene.util.Constants; import org.apache.lucene.util.SetOnce; import org.opensearch.index.IndexingPressureService; -import org.opensearch.tasks.TaskResourceTrackingService; -import org.opensearch.threadpool.RunnableTaskExecutionListener; import org.opensearch.watcher.ResourceWatcherService; import org.opensearch.Assertions; import org.opensearch.Build; @@ -215,7 +213,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import java.util.function.UnaryOperator; import java.util.stream.Collectors; @@ -327,7 +324,6 @@ public static class DiscoverySettings { private final LocalNodeFactory localNodeFactory; private final NodeService nodeService; final NamedWriteableRegistry namedWriteableRegistry; - private final AtomicReference runnableTaskListener; public Node(Environment environment) { this(environment, Collections.emptyList(), true); @@ -437,8 +433,7 @@ protected Node( final List> executorBuilders = pluginsService.getExecutorBuilders(settings); - runnableTaskListener = new AtomicReference<>(); - final ThreadPool threadPool = new ThreadPool(settings, runnableTaskListener, executorBuilders.toArray(new ExecutorBuilder[0])); + final ThreadPool threadPool = new ThreadPool(settings, executorBuilders.toArray(new ExecutorBuilder[0])); resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS)); final ResourceWatcherService resourceWatcherService = new ResourceWatcherService(settings, threadPool); resourcesToClose.add(resourceWatcherService); @@ -1062,11 +1057,6 @@ public Node start() throws NodeValidationException { TransportService transportService = injector.getInstance(TransportService.class); transportService.getTaskManager().setTaskResultsService(injector.getInstance(TaskResultsService.class)); transportService.getTaskManager().setTaskCancellationService(new TaskCancellationService(transportService)); - - TaskResourceTrackingService taskResourceTrackingService = injector.getInstance(TaskResourceTrackingService.class); - transportService.getTaskManager().setTaskResourceTrackingService(taskResourceTrackingService); - runnableTaskListener.set(taskResourceTrackingService); - transportService.start(); assert localNodeFactory.getNode() != null; assert transportService.getLocalNode().equals(localNodeFactory.getNode()) @@ -1500,5 +1490,4 @@ DiscoveryNode getNode() { return localNode.get(); } } - } diff --git a/server/src/main/java/org/opensearch/tasks/Task.java b/server/src/main/java/org/opensearch/tasks/Task.java index a51af17ae8ea2..62453d08724ce 100644 --- a/server/src/main/java/org/opensearch/tasks/Task.java +++ b/server/src/main/java/org/opensearch/tasks/Task.java @@ -32,6 +32,8 @@ package org.opensearch.tasks; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.opensearch.action.ActionResponse; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.io.stream.NamedWriteable; @@ -51,6 +53,8 @@ */ public class Task { + private static final Logger logger = LogManager.getLogger(Task.class); + /** * The request header to mark tasks with specific ids */ @@ -285,7 +289,7 @@ public void startThreadResourceTracking(long threadId, ResourceStatsType statsTy ); } } - threadResourceInfoList.add(new ThreadResourceInfo(threadId, statsType, resourceUsageMetrics)); + threadResourceInfoList.add(new ThreadResourceInfo(statsType, resourceUsageMetrics)); } /** @@ -332,17 +336,6 @@ public void stopThreadResourceTracking(long threadId, ResourceStatsType statsTyp throw new IllegalStateException("cannot update final values if active thread resource entry is not present"); } - /** - * Individual tasks can override this if they want to support task resource tracking. We just need to make sure that - * the ThreadPool on which the task runs on have runnable wrapper similar to - * {@link org.opensearch.common.util.concurrent.OpenSearchExecutors#newAutoQueueFixed} - * - * @return true if resource tracking is supported by the task - */ - public boolean supportsResourceTracking() { - return false; - } - /** * Report of the internal status of a task. These can vary wildly from task * to task because each task is implemented differently but we should try diff --git a/server/src/main/java/org/opensearch/tasks/TaskManager.java b/server/src/main/java/org/opensearch/tasks/TaskManager.java index 37c10dfc0e6ab..1f6169768f245 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskManager.java +++ b/server/src/main/java/org/opensearch/tasks/TaskManager.java @@ -89,9 +89,7 @@ public class TaskManager implements ClusterStateApplier { private static final TimeValue WAIT_FOR_COMPLETION_POLL = timeValueMillis(100); - /** - * Rest headers that are copied to the task - */ + /** Rest headers that are copied to the task */ private final List taskHeaders; private final ThreadPool threadPool; @@ -105,7 +103,6 @@ public class TaskManager implements ClusterStateApplier { private final Map banedParents = new ConcurrentHashMap<>(); private TaskResultsService taskResultsService; - private final SetOnce taskResourceTrackingService = new SetOnce<>(); private volatile DiscoveryNodes lastDiscoveryNodes = DiscoveryNodes.EMPTY_NODES; @@ -128,10 +125,6 @@ public void setTaskCancellationService(TaskCancellationService taskCancellationS this.cancellationService.set(taskCancellationService); } - public void setTaskResourceTrackingService(TaskResourceTrackingService taskResourceTrackingService) { - this.taskResourceTrackingService.set(taskResourceTrackingService); - } - /** * Registers a task without parent task */ @@ -209,11 +202,6 @@ public void cancel(CancellableTask task, String reason, Runnable listener) { */ public Task unregister(Task task) { logger.trace("unregister task for id: {}", task.getId()); - - if (taskResourceTrackingService.get() != null && task.supportsResourceTracking()) { - taskResourceTrackingService.get().stopTracking(task); - } - if (task instanceof CancellableTask) { CancellableTaskHolder holder = cancellableTasks.remove(task.getId()); if (holder != null) { @@ -373,7 +361,6 @@ public int getBanCount() { * Bans all tasks with the specified parent task from execution, cancels all tasks that are currently executing. *

* This method is called when a parent task that has children is cancelled. - * * @return a list of pending cancellable child tasks */ public List setBan(TaskId parentTaskId, String reason) { @@ -461,18 +448,6 @@ public void waitForTaskCompletion(Task task, long untilInNanos) { throw new OpenSearchTimeoutException("Timed out waiting for completion of [{}]", task); } - /** - * Takes actions when a task is registered and its execution starts - * - * @param task getting executed. - * @return AutoCloseable to free up resources (clean up thread context) when task execution block returns - */ - public ThreadContext.StoredContext taskExecutionStarted(Task task) { - if (taskResourceTrackingService.get() == null) return () -> {}; - - return taskResourceTrackingService.get().startTracking(task); - } - private static class CancellableTaskHolder { private final CancellableTask task; private boolean finished = false; diff --git a/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java b/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java deleted file mode 100644 index 71b829e023385..0000000000000 --- a/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java +++ /dev/null @@ -1,255 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.tasks; - -import com.sun.management.ThreadMXBean; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.common.SuppressForbidden; -import org.opensearch.common.inject.Inject; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.concurrent.ConcurrentCollections; -import org.opensearch.common.util.concurrent.ConcurrentMapLong; -import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.threadpool.RunnableTaskExecutionListener; -import org.opensearch.threadpool.ThreadPool; - -import java.lang.management.ManagementFactory; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -import static org.opensearch.tasks.ResourceStatsType.WORKER_STATS; - -/** - * Service that helps track resource usage of tasks running on a node. - */ -@SuppressForbidden(reason = "ThreadMXBean#getThreadAllocatedBytes") -public class TaskResourceTrackingService implements RunnableTaskExecutionListener { - - private static final Logger logger = LogManager.getLogger(TaskManager.class); - - public static final Setting TASK_RESOURCE_TRACKING_ENABLED = Setting.boolSetting( - "task_resource_tracking.enabled", - true, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - public static final String TASK_ID = "TASK_ID"; - - private static final ThreadMXBean threadMXBean = (ThreadMXBean) ManagementFactory.getThreadMXBean(); - - private final ConcurrentMapLong resourceAwareTasks = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency(); - private final ThreadPool threadPool; - private volatile boolean taskResourceTrackingEnabled; - - @Inject - public TaskResourceTrackingService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { - this.taskResourceTrackingEnabled = TASK_RESOURCE_TRACKING_ENABLED.get(settings); - this.threadPool = threadPool; - clusterSettings.addSettingsUpdateConsumer(TASK_RESOURCE_TRACKING_ENABLED, this::setTaskResourceTrackingEnabled); - } - - public void setTaskResourceTrackingEnabled(boolean taskResourceTrackingEnabled) { - this.taskResourceTrackingEnabled = taskResourceTrackingEnabled; - } - - public boolean isTaskResourceTrackingEnabled() { - return taskResourceTrackingEnabled; - } - - public boolean isTaskResourceTrackingSupported() { - return threadMXBean.isThreadAllocatedMemorySupported() && threadMXBean.isThreadAllocatedMemoryEnabled(); - } - - /** - * Executes logic only if task supports resource tracking and resource tracking setting is enabled. - *

- * 1. Starts tracking the task in map of resourceAwareTasks. - * 2. Adds Task Id in thread context to make sure it's available while task is processed across multiple threads. - * - * @param task for which resources needs to be tracked - * @return Autocloseable stored context to restore ThreadContext to the state before this method changed it. - */ - public ThreadContext.StoredContext startTracking(Task task) { - if (task.supportsResourceTracking() == false - || isTaskResourceTrackingEnabled() == false - || isTaskResourceTrackingSupported() == false) { - return () -> {}; - } - - logger.debug("Starting resource tracking for task: {}", task.getId()); - resourceAwareTasks.put(task.getId(), task); - return addTaskIdToThreadContext(task); - } - - /** - * Stops tracking task registered earlier for tracking. - *

- * It doesn't have feature enabled check to avoid any issues if setting was disable while the task was in progress. - *

- * It's also responsible to stop tracking the current thread's resources against this task if not already done. - * This happens when the thread executing the request logic itself calls the unregister method. So in this case unregister - * happens before runnable finishes. - * - * @param task task which has finished and doesn't need resource tracking. - */ - public void stopTracking(Task task) { - logger.debug("Stopping resource tracking for task: {}", task.getId()); - try { - if (isCurrentThreadWorkingOnTask(task)) { - taskExecutionFinishedOnThread(task.getId(), Thread.currentThread().getId()); - } - - List threadsWorkingOnTask = getThreadsWorkingOnTask(task); - if (threadsWorkingOnTask.size() > 0) { - logger.warn("No thread should be active when task finishes. Active threads: {}", threadsWorkingOnTask); - assert false : "No thread should be marked active when task finishes"; - } - } catch (Exception e) { - logger.warn("Failed while trying to mark the task execution on current thread completed.", e); - assert false; - } finally { - resourceAwareTasks.remove(task.getId()); - } - } - - /** - * Refreshes the resource stats for the tasks provided by looking into which threads are actively working on these - * and how much resources these have consumed till now. - * - * @param tasks for which resource stats needs to be refreshed. - */ - public void refreshResourceStats(Task... tasks) { - if (isTaskResourceTrackingEnabled() == false || isTaskResourceTrackingSupported() == false) { - return; - } - - for (Task task : tasks) { - if (task.supportsResourceTracking() && resourceAwareTasks.containsKey(task.getId())) { - refreshResourceStats(task); - } - } - } - - private void refreshResourceStats(Task resourceAwareTask) { - try { - logger.debug("Refreshing resource stats for Task: {}", resourceAwareTask.getId()); - List threadsWorkingOnTask = getThreadsWorkingOnTask(resourceAwareTask); - threadsWorkingOnTask.forEach( - threadId -> resourceAwareTask.updateThreadResourceStats(threadId, WORKER_STATS, getResourceUsageMetricsForThread(threadId)) - ); - } catch (IllegalStateException e) { - logger.debug("Resource stats already updated."); - } - - } - - /** - * Called when a thread starts working on a task's runnable. - * - * @param taskId of the task for which runnable is starting - * @param threadId of the thread which will be executing the runnable and we need to check resource usage for this - * thread - */ - @Override - public void taskExecutionStartedOnThread(long taskId, long threadId) { - try { - if (resourceAwareTasks.containsKey(taskId)) { - logger.debug("Task execution started on thread. Task: {}, Thread: {}", taskId, threadId); - - resourceAwareTasks.get(taskId) - .startThreadResourceTracking(threadId, WORKER_STATS, getResourceUsageMetricsForThread(threadId)); - } - } catch (Exception e) { - logger.warn(new ParameterizedMessage("Failed to mark thread execution started for task: [{}]", taskId), e); - assert false; - } - - } - - /** - * Called when a thread finishes working on a task's runnable. - * - * @param taskId of the task for which runnable is complete - * @param threadId of the thread which executed the runnable and we need to check resource usage for this thread - */ - @Override - public void taskExecutionFinishedOnThread(long taskId, long threadId) { - try { - if (resourceAwareTasks.containsKey(taskId)) { - logger.debug("Task execution finished on thread. Task: {}, Thread: {}", taskId, threadId); - resourceAwareTasks.get(taskId) - .stopThreadResourceTracking(threadId, WORKER_STATS, getResourceUsageMetricsForThread(threadId)); - } - } catch (Exception e) { - logger.warn(new ParameterizedMessage("Failed to mark thread execution finished for task: [{}]", taskId), e); - assert false; - } - } - - public Map getResourceAwareTasks() { - return Collections.unmodifiableMap(resourceAwareTasks); - } - - private ResourceUsageMetric[] getResourceUsageMetricsForThread(long threadId) { - ResourceUsageMetric currentMemoryUsage = new ResourceUsageMetric( - ResourceStats.MEMORY, - threadMXBean.getThreadAllocatedBytes(threadId) - ); - ResourceUsageMetric currentCPUUsage = new ResourceUsageMetric(ResourceStats.CPU, threadMXBean.getThreadCpuTime(threadId)); - return new ResourceUsageMetric[] { currentMemoryUsage, currentCPUUsage }; - } - - private boolean isCurrentThreadWorkingOnTask(Task task) { - long threadId = Thread.currentThread().getId(); - List threadResourceInfos = task.getResourceStats().getOrDefault(threadId, Collections.emptyList()); - - for (ThreadResourceInfo threadResourceInfo : threadResourceInfos) { - if (threadResourceInfo.isActive()) { - return true; - } - } - return false; - } - - private List getThreadsWorkingOnTask(Task task) { - List activeThreads = new ArrayList<>(); - for (List threadResourceInfos : task.getResourceStats().values()) { - for (ThreadResourceInfo threadResourceInfo : threadResourceInfos) { - if (threadResourceInfo.isActive()) { - activeThreads.add(threadResourceInfo.getThreadId()); - } - } - } - return activeThreads; - } - - /** - * Adds Task Id in the ThreadContext. - *

- * Stashes the existing ThreadContext and preserves all the existing ThreadContext's data in the new ThreadContext - * as well. - * - * @param task for which Task Id needs to be added in ThreadContext. - * @return StoredContext reference to restore the ThreadContext from which we created a new one. - * Caller can call context.restore() to get the existing ThreadContext back. - */ - private ThreadContext.StoredContext addTaskIdToThreadContext(Task task) { - ThreadContext threadContext = threadPool.getThreadContext(); - ThreadContext.StoredContext storedContext = threadContext.newStoredContext(true, Collections.singletonList(TASK_ID)); - threadContext.putTransient(TASK_ID, task.getId()); - return storedContext; - } - -} diff --git a/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java b/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java index 9ee683e3928f6..8b45c38c8fb63 100644 --- a/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java +++ b/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java @@ -15,13 +15,11 @@ * for a specific stats type like worker_stats or response_stats etc., */ public class ThreadResourceInfo { - private final long threadId; private volatile boolean isActive = true; private final ResourceStatsType statsType; private final ResourceUsageInfo resourceUsageInfo; - public ThreadResourceInfo(long threadId, ResourceStatsType statsType, ResourceUsageMetric... resourceUsageMetrics) { - this.threadId = threadId; + public ThreadResourceInfo(ResourceStatsType statsType, ResourceUsageMetric... resourceUsageMetrics) { this.statsType = statsType; this.resourceUsageInfo = new ResourceUsageInfo(resourceUsageMetrics); } @@ -45,16 +43,12 @@ public ResourceStatsType getStatsType() { return statsType; } - public long getThreadId() { - return threadId; - } - public ResourceUsageInfo getResourceUsageInfo() { return resourceUsageInfo; } @Override public String toString() { - return resourceUsageInfo + ", stats_type=" + statsType + ", is_active=" + isActive + ", threadId=" + threadId; + return resourceUsageInfo + ", stats_type=" + statsType + ", is_active=" + isActive; } } diff --git a/server/src/main/java/org/opensearch/threadpool/AutoQueueAdjustingExecutorBuilder.java b/server/src/main/java/org/opensearch/threadpool/AutoQueueAdjustingExecutorBuilder.java index 55b92c5d8bfcb..2bac5eba9fc28 100644 --- a/server/src/main/java/org/opensearch/threadpool/AutoQueueAdjustingExecutorBuilder.java +++ b/server/src/main/java/org/opensearch/threadpool/AutoQueueAdjustingExecutorBuilder.java @@ -48,7 +48,6 @@ import java.util.Map; import java.util.concurrent.ExecutorService; import java.util.concurrent.ThreadFactory; -import java.util.concurrent.atomic.AtomicReference; /** * A builder for executors that automatically adjust the queue length as needed, depending on @@ -62,7 +61,6 @@ public final class AutoQueueAdjustingExecutorBuilder extends ExecutorBuilder maxQueueSizeSetting; private final Setting targetedResponseTimeSetting; private final Setting frameSizeSetting; - private final AtomicReference runnableTaskListener; AutoQueueAdjustingExecutorBuilder( final Settings settings, @@ -72,19 +70,6 @@ public final class AutoQueueAdjustingExecutorBuilder extends ExecutorBuilder runnableTaskListener ) { super(name); final String prefix = "thread_pool." + name; @@ -199,7 +184,6 @@ public Iterator> settings() { Setting.Property.Deprecated, Setting.Property.Deprecated ); - this.runnableTaskListener = runnableTaskListener; } @Override @@ -246,8 +230,7 @@ ThreadPool.ExecutorHolder build(final AutoExecutorSettings settings, final Threa frameSize, targetedResponseTime, threadFactory, - threadContext, - runnableTaskListener + threadContext ); // TODO: in a subsequent change we hope to extend ThreadPool.Info to be more specific for the thread pool type final ThreadPool.Info info = new ThreadPool.Info( diff --git a/server/src/main/java/org/opensearch/threadpool/RunnableTaskExecutionListener.java b/server/src/main/java/org/opensearch/threadpool/RunnableTaskExecutionListener.java deleted file mode 100644 index 03cd66f80d044..0000000000000 --- a/server/src/main/java/org/opensearch/threadpool/RunnableTaskExecutionListener.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.threadpool; - -/** - * Listener for events when a runnable execution starts or finishes on a thread and is aware of the task for which the - * runnable is associated to. - */ -public interface RunnableTaskExecutionListener { - - /** - * Sends an update when ever a task's execution start on a thread - * - * @param taskId of task which has started - * @param threadId of thread which is executing the task - */ - void taskExecutionStartedOnThread(long taskId, long threadId); - - /** - * - * Sends an update when task execution finishes on a thread - * - * @param taskId of task which has finished - * @param threadId of thread which executed the task - */ - void taskExecutionFinishedOnThread(long taskId, long threadId); -} diff --git a/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java b/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java deleted file mode 100644 index 183b9b2f4cf9a..0000000000000 --- a/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.threadpool; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.ExceptionsHelper; -import org.opensearch.common.util.concurrent.AbstractRunnable; -import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.common.util.concurrent.WrappedRunnable; -import org.opensearch.tasks.TaskManager; - -import java.util.Objects; -import java.util.concurrent.atomic.AtomicReference; - -import static java.lang.Thread.currentThread; -import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; - -/** - * Responsible for wrapping the original task's runnable and sending updates on when it starts and finishes to - * entities listening to the events. - * - * It's able to associate runnable with a task with the help of task Id available in thread context. - */ -public class TaskAwareRunnable extends AbstractRunnable implements WrappedRunnable { - - private static final Logger logger = LogManager.getLogger(TaskManager.class); - - private final Runnable original; - private final ThreadContext threadContext; - private final AtomicReference runnableTaskListener; - - public TaskAwareRunnable( - final ThreadContext threadContext, - final Runnable original, - final AtomicReference runnableTaskListener - ) { - this.original = original; - this.threadContext = threadContext; - this.runnableTaskListener = runnableTaskListener; - } - - @Override - public void onFailure(Exception e) { - ExceptionsHelper.reThrowIfNotNull(e); - } - - @Override - public boolean isForceExecution() { - return original instanceof AbstractRunnable && ((AbstractRunnable) original).isForceExecution(); - } - - @Override - public void onRejection(final Exception e) { - if (original instanceof AbstractRunnable) { - ((AbstractRunnable) original).onRejection(e); - } else { - ExceptionsHelper.reThrowIfNotNull(e); - } - } - - @Override - protected void doRun() throws Exception { - assert runnableTaskListener.get() != null : "Listener should be attached"; - Long taskId = threadContext.getTransient(TASK_ID); - if (Objects.nonNull(taskId)) { - runnableTaskListener.get().taskExecutionStartedOnThread(taskId, currentThread().getId()); - } else { - logger.debug("Task Id not available in thread context. Skipping update. Thread Info: {}", Thread.currentThread()); - } - try { - original.run(); - } finally { - if (Objects.nonNull(taskId)) { - runnableTaskListener.get().taskExecutionFinishedOnThread(taskId, currentThread().getId()); - } - } - } - - @Override - public Runnable unwrap() { - return original; - } -} diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index 5e8f515f6c577..c2530ccee5588 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -68,7 +68,6 @@ import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import static java.util.Collections.unmodifiableMap; @@ -185,14 +184,6 @@ public Collection builders() { ); public ThreadPool(final Settings settings, final ExecutorBuilder... customBuilders) { - this(settings, null, customBuilders); - } - - public ThreadPool( - final Settings settings, - final AtomicReference runnableTaskListener, - final ExecutorBuilder... customBuilders - ) { assert Node.NODE_NAME_SETTING.exists(settings); final Map builders = new HashMap<>(); @@ -206,20 +197,11 @@ public ThreadPool( builders.put(Names.ANALYZE, new FixedExecutorBuilder(settings, Names.ANALYZE, 1, 16)); builders.put( Names.SEARCH, - new AutoQueueAdjustingExecutorBuilder( - settings, - Names.SEARCH, - searchThreadPoolSize(allocatedProcessors), - 1000, - 1000, - 1000, - 2000, - runnableTaskListener - ) + new AutoQueueAdjustingExecutorBuilder(settings, Names.SEARCH, searchThreadPoolSize(allocatedProcessors), 1000, 1000, 1000, 2000) ); builders.put( Names.SEARCH_THROTTLED, - new AutoQueueAdjustingExecutorBuilder(settings, Names.SEARCH_THROTTLED, 1, 100, 100, 100, 200, runnableTaskListener) + new AutoQueueAdjustingExecutorBuilder(settings, Names.SEARCH_THROTTLED, 1, 100, 100, 100, 200) ); builders.put(Names.MANAGEMENT, new ScalingExecutorBuilder(Names.MANAGEMENT, 1, 5, TimeValue.timeValueMinutes(5))); // no queue as this means clients will need to handle rejections on listener queue even if the operation succeeded diff --git a/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java b/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java index 73be6e5b601e9..dcb021531f0ac 100644 --- a/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java +++ b/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java @@ -37,7 +37,6 @@ import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.search.internal.ShardSearchRequest; -import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; @@ -82,8 +81,6 @@ public Request newRequest(StreamInput in) throws IOException { public void processMessageReceived(Request request, TransportChannel channel) throws Exception { final Task task = taskManager.register(channel.getChannelType(), action, request); - ThreadContext.StoredContext contextToRestore = taskManager.taskExecutionStarted(task); - Releasable unregisterTask = () -> taskManager.unregister(task); try { if (channel instanceof TcpTransportChannel && task instanceof CancellableTask) { @@ -102,7 +99,6 @@ public void processMessageReceived(Request request, TransportChannel channel) th unregisterTask = null; } finally { Releasables.close(unregisterTask); - contextToRestore.restore(); } } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/RecordingTaskManagerListener.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/RecordingTaskManagerListener.java index 9bd44185baf24..7756eb12bb3f4 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/RecordingTaskManagerListener.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/RecordingTaskManagerListener.java @@ -75,9 +75,6 @@ public synchronized void onTaskUnregistered(Task task) { @Override public void waitForTaskCompletion(Task task) {} - @Override - public void taskExecutionStarted(Task task, Boolean closeableInvoked) {} - public synchronized List> getEvents() { return Collections.unmodifiableList(new ArrayList<>(events)); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java deleted file mode 100644 index 23877ac0b7395..0000000000000 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java +++ /dev/null @@ -1,633 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.action.admin.cluster.node.tasks; - -import com.sun.management.ThreadMXBean; -import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; -import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; -import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; -import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; -import org.opensearch.action.support.ActionTestUtils; -import org.opensearch.action.support.nodes.BaseNodeRequest; -import org.opensearch.action.support.nodes.BaseNodesRequest; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.SuppressForbidden; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.concurrent.AbstractRunnable; -import org.opensearch.tasks.CancellableTask; -import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskCancelledException; -import org.opensearch.tasks.TaskId; -import org.opensearch.tasks.TaskInfo; -import org.opensearch.test.tasks.MockTaskManager; -import org.opensearch.test.tasks.MockTaskManagerListener; -import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.TransportService; - -import java.io.IOException; -import java.lang.management.ManagementFactory; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Consumer; - -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; - -@SuppressForbidden(reason = "ThreadMXBean#getThreadAllocatedBytes") -public class ResourceAwareTasksTests extends TaskManagerTestCase { - - private static final ThreadMXBean threadMXBean = (ThreadMXBean) ManagementFactory.getThreadMXBean(); - - public static class ResourceAwareNodeRequest extends BaseNodeRequest { - protected String requestName; - - public ResourceAwareNodeRequest() { - super(); - } - - public ResourceAwareNodeRequest(StreamInput in) throws IOException { - super(in); - requestName = in.readString(); - } - - public ResourceAwareNodeRequest(NodesRequest request) { - requestName = request.requestName; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(requestName); - } - - @Override - public String getDescription() { - return "ResourceAwareNodeRequest[" + requestName + "]"; - } - - @Override - public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { - return new CancellableTask(id, type, action, getDescription(), parentTaskId, headers) { - @Override - public boolean shouldCancelChildrenOnCancellation() { - return false; - } - - @Override - public boolean supportsResourceTracking() { - return true; - } - }; - } - } - - public static class NodesRequest extends BaseNodesRequest { - private final String requestName; - - private NodesRequest(StreamInput in) throws IOException { - super(in); - requestName = in.readString(); - } - - public NodesRequest(String requestName, String... nodesIds) { - super(nodesIds); - this.requestName = requestName; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(requestName); - } - - @Override - public String getDescription() { - return "NodesRequest[" + requestName + "]"; - } - - @Override - public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { - return new CancellableTask(id, type, action, getDescription(), parentTaskId, headers) { - @Override - public boolean shouldCancelChildrenOnCancellation() { - return true; - } - }; - } - } - - /** - * Simulates a task which executes work on search executor. - */ - class ResourceAwareNodesAction extends AbstractTestNodesAction { - private final TaskTestContext taskTestContext; - private final boolean blockForCancellation; - - ResourceAwareNodesAction( - String actionName, - ThreadPool threadPool, - ClusterService clusterService, - TransportService transportService, - boolean shouldBlock, - TaskTestContext taskTestContext - ) { - super(actionName, threadPool, clusterService, transportService, NodesRequest::new, ResourceAwareNodeRequest::new); - this.taskTestContext = taskTestContext; - this.blockForCancellation = shouldBlock; - } - - @Override - protected ResourceAwareNodeRequest newNodeRequest(NodesRequest request) { - return new ResourceAwareNodeRequest(request); - } - - @Override - protected NodeResponse nodeOperation(ResourceAwareNodeRequest request, Task task) { - assert task.supportsResourceTracking(); - - AtomicLong threadId = new AtomicLong(); - Future result = threadPool.executor(ThreadPool.Names.SEARCH).submit(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - ExceptionsHelper.reThrowIfNotNull(e); - } - - @Override - @SuppressForbidden(reason = "ThreadMXBean#getThreadAllocatedBytes") - protected void doRun() { - taskTestContext.memoryConsumptionWhenExecutionStarts = threadMXBean.getThreadAllocatedBytes( - Thread.currentThread().getId() - ); - threadId.set(Thread.currentThread().getId()); - - if (taskTestContext.operationStartValidator != null) { - try { - taskTestContext.operationStartValidator.accept(threadId.get()); - } catch (AssertionError error) { - throw new RuntimeException(error); - } - } - - Object[] allocation1 = new Object[1000000]; // 4MB - - if (blockForCancellation) { - // Simulate a job that takes forever to finish - // Using periodic checks method to identify that the task was cancelled - try { - boolean taskCancelled = waitUntil(((CancellableTask) task)::isCancelled); - if (taskCancelled) { - throw new TaskCancelledException("Task Cancelled"); - } else { - fail("It should have thrown an exception"); - } - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - } - - } - - Object[] allocation2 = new Object[1000000]; // 4MB - } - }); - - try { - result.get(); - } catch (InterruptedException | ExecutionException e) { - throw new RuntimeException(e.getCause()); - } finally { - if (taskTestContext.operationFinishedValidator != null) { - taskTestContext.operationFinishedValidator.accept(threadId.get()); - } - } - - return new NodeResponse(clusterService.localNode()); - } - - @Override - protected NodeResponse nodeOperation(ResourceAwareNodeRequest request) { - throw new UnsupportedOperationException("the task parameter is required"); - } - } - - private TaskTestContext startResourceAwareNodesAction( - TestNode node, - boolean blockForCancellation, - TaskTestContext taskTestContext, - ActionListener listener - ) { - NodesRequest request = new NodesRequest("Test Request", node.getNodeId()); - - taskTestContext.requestCompleteLatch = new CountDownLatch(1); - - ResourceAwareNodesAction action = new ResourceAwareNodesAction( - "internal:resourceAction", - threadPool, - node.clusterService, - node.transportService, - blockForCancellation, - taskTestContext - ); - taskTestContext.mainTask = action.execute(request, listener); - return taskTestContext; - } - - private static class TaskTestContext { - private Task mainTask; - private CountDownLatch requestCompleteLatch; - private Consumer operationStartValidator; - private Consumer operationFinishedValidator; - private long memoryConsumptionWhenExecutionStarts; - } - - public void testBasicTaskResourceTracking() throws Exception { - setup(true, false); - - final AtomicReference throwableReference = new AtomicReference<>(); - final AtomicReference responseReference = new AtomicReference<>(); - TaskTestContext taskTestContext = new TaskTestContext(); - - Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); - - taskTestContext.operationStartValidator = threadId -> { - Task task = resourceTasks.values().stream().findAny().get(); - - // One thread is currently working on task but not finished - assertEquals(1, resourceTasks.size()); - assertEquals(1, task.getResourceStats().size()); - assertEquals(1, task.getResourceStats().get(threadId).size()); - assertTrue(task.getResourceStats().get(threadId).get(0).isActive()); - assertEquals(0, task.getTotalResourceStats().getCpuTimeInNanos()); - assertEquals(0, task.getTotalResourceStats().getMemoryInBytes()); - }; - - taskTestContext.operationFinishedValidator = threadId -> { - Task task = resourceTasks.values().stream().findAny().get(); - - // Thread has finished working on the task's runnable - assertEquals(1, resourceTasks.size()); - assertEquals(1, task.getResourceStats().size()); - assertEquals(1, task.getResourceStats().get(threadId).size()); - assertFalse(task.getResourceStats().get(threadId).get(0).isActive()); - - long expectedArrayAllocationOverhead = 2 * 4012688; // Task's memory overhead due to array allocations - long actualTaskMemoryOverhead = task.getTotalResourceStats().getMemoryInBytes(); - - assertTrue(actualTaskMemoryOverhead - expectedArrayAllocationOverhead < taskTestContext.memoryConsumptionWhenExecutionStarts); - assertTrue(task.getTotalResourceStats().getCpuTimeInNanos() > 0); - }; - - startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { - @Override - public void onResponse(NodesResponse listTasksResponse) { - responseReference.set(listTasksResponse); - taskTestContext.requestCompleteLatch.countDown(); - } - - @Override - public void onFailure(Exception e) { - throwableReference.set(e); - taskTestContext.requestCompleteLatch.countDown(); - } - }); - - // Waiting for whole request to complete and return successfully till client - taskTestContext.requestCompleteLatch.await(); - - assertTasksRequestFinishedSuccessfully(resourceTasks.size(), responseReference.get(), throwableReference.get()); - } - - public void testTaskResourceTrackingDuringTaskCancellation() throws Exception { - setup(true, false); - - final AtomicReference throwableReference = new AtomicReference<>(); - final AtomicReference responseReference = new AtomicReference<>(); - TaskTestContext taskTestContext = new TaskTestContext(); - - Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); - - taskTestContext.operationStartValidator = threadId -> { - Task task = resourceTasks.values().stream().findAny().get(); - - // One thread is currently working on task but not finished - assertEquals(1, resourceTasks.size()); - assertEquals(1, task.getResourceStats().size()); - assertEquals(1, task.getResourceStats().get(threadId).size()); - assertTrue(task.getResourceStats().get(threadId).get(0).isActive()); - assertEquals(0, task.getTotalResourceStats().getCpuTimeInNanos()); - assertEquals(0, task.getTotalResourceStats().getMemoryInBytes()); - }; - - taskTestContext.operationFinishedValidator = threadId -> { - Task task = resourceTasks.values().stream().findAny().get(); - - // Thread has finished working on the task's runnable - assertEquals(1, resourceTasks.size()); - assertEquals(1, task.getResourceStats().size()); - assertEquals(1, task.getResourceStats().get(threadId).size()); - assertFalse(task.getResourceStats().get(threadId).get(0).isActive()); - - // allocations are completed before the task is cancelled - long expectedArrayAllocationOverhead = 4012688; // Task's memory overhead due to array allocations - long taskCancellationOverhead = 30000; // Task cancellation overhead ~ 30Kb - long actualTaskMemoryOverhead = task.getTotalResourceStats().getMemoryInBytes(); - - long expectedOverhead = expectedArrayAllocationOverhead + taskCancellationOverhead; - assertTrue(actualTaskMemoryOverhead - expectedOverhead < taskTestContext.memoryConsumptionWhenExecutionStarts); - assertTrue(task.getTotalResourceStats().getCpuTimeInNanos() > 0); - }; - - startResourceAwareNodesAction(testNodes[0], true, taskTestContext, new ActionListener() { - @Override - public void onResponse(NodesResponse listTasksResponse) { - responseReference.set(listTasksResponse); - taskTestContext.requestCompleteLatch.countDown(); - } - - @Override - public void onFailure(Exception e) { - throwableReference.set(e); - taskTestContext.requestCompleteLatch.countDown(); - } - }); - - // Cancel main task - CancelTasksRequest request = new CancelTasksRequest(); - request.setReason("Cancelling request to verify Task resource tracking behaviour"); - request.setTaskId(new TaskId(testNodes[0].getNodeId(), taskTestContext.mainTask.getId())); - ActionTestUtils.executeBlocking(testNodes[0].transportCancelTasksAction, request); - - // Waiting for whole request to complete and return successfully till client - taskTestContext.requestCompleteLatch.await(); - - assertEquals(0, resourceTasks.size()); - assertNull(throwableReference.get()); - assertNotNull(responseReference.get()); - assertEquals(1, responseReference.get().failureCount()); - assertEquals(TaskCancelledException.class, findActualException(responseReference.get().failures().get(0)).getClass()); - } - - public void testTaskResourceTrackingDisabled() throws Exception { - setup(false, false); - - final AtomicReference throwableReference = new AtomicReference<>(); - final AtomicReference responseReference = new AtomicReference<>(); - TaskTestContext taskTestContext = new TaskTestContext(); - - Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); - - taskTestContext.operationStartValidator = threadId -> { assertEquals(0, resourceTasks.size()); }; - - taskTestContext.operationFinishedValidator = threadId -> { assertEquals(0, resourceTasks.size()); }; - - startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { - @Override - public void onResponse(NodesResponse listTasksResponse) { - responseReference.set(listTasksResponse); - taskTestContext.requestCompleteLatch.countDown(); - } - - @Override - public void onFailure(Exception e) { - throwableReference.set(e); - taskTestContext.requestCompleteLatch.countDown(); - } - }); - - // Waiting for whole request to complete and return successfully till client - taskTestContext.requestCompleteLatch.await(); - - assertTasksRequestFinishedSuccessfully(resourceTasks.size(), responseReference.get(), throwableReference.get()); - } - - public void testTaskResourceTrackingDisabledWhileTaskInProgress() throws Exception { - setup(true, false); - - final AtomicReference throwableReference = new AtomicReference<>(); - final AtomicReference responseReference = new AtomicReference<>(); - TaskTestContext taskTestContext = new TaskTestContext(); - - Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); - - taskTestContext.operationStartValidator = threadId -> { - Task task = resourceTasks.values().stream().findAny().get(); - // One thread is currently working on task but not finished - assertEquals(1, resourceTasks.size()); - assertEquals(1, task.getResourceStats().size()); - assertEquals(1, task.getResourceStats().get(threadId).size()); - assertTrue(task.getResourceStats().get(threadId).get(0).isActive()); - assertEquals(0, task.getTotalResourceStats().getCpuTimeInNanos()); - assertEquals(0, task.getTotalResourceStats().getMemoryInBytes()); - - testNodes[0].taskResourceTrackingService.setTaskResourceTrackingEnabled(false); - }; - - taskTestContext.operationFinishedValidator = threadId -> { - Task task = resourceTasks.values().stream().findAny().get(); - // Thread has finished working on the task's runnable - assertEquals(1, resourceTasks.size()); - assertEquals(1, task.getResourceStats().size()); - assertEquals(1, task.getResourceStats().get(threadId).size()); - assertFalse(task.getResourceStats().get(threadId).get(0).isActive()); - - long expectedArrayAllocationOverhead = 2 * 4012688; // Task's memory overhead due to array allocations - long actualTaskMemoryOverhead = task.getTotalResourceStats().getMemoryInBytes(); - - assertTrue(actualTaskMemoryOverhead - expectedArrayAllocationOverhead < taskTestContext.memoryConsumptionWhenExecutionStarts); - assertTrue(task.getTotalResourceStats().getCpuTimeInNanos() > 0); - }; - - startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { - @Override - public void onResponse(NodesResponse listTasksResponse) { - responseReference.set(listTasksResponse); - taskTestContext.requestCompleteLatch.countDown(); - } - - @Override - public void onFailure(Exception e) { - throwableReference.set(e); - taskTestContext.requestCompleteLatch.countDown(); - } - }); - - // Waiting for whole request to complete and return successfully till client - taskTestContext.requestCompleteLatch.await(); - - assertTasksRequestFinishedSuccessfully(resourceTasks.size(), responseReference.get(), throwableReference.get()); - } - - public void testTaskResourceTrackingEnabledWhileTaskInProgress() throws Exception { - setup(false, false); - - final AtomicReference throwableReference = new AtomicReference<>(); - final AtomicReference responseReference = new AtomicReference<>(); - TaskTestContext taskTestContext = new TaskTestContext(); - - Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); - - taskTestContext.operationStartValidator = threadId -> { - assertEquals(0, resourceTasks.size()); - - testNodes[0].taskResourceTrackingService.setTaskResourceTrackingEnabled(true); - }; - - taskTestContext.operationFinishedValidator = threadId -> { assertEquals(0, resourceTasks.size()); }; - - startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { - @Override - public void onResponse(NodesResponse listTasksResponse) { - responseReference.set(listTasksResponse); - taskTestContext.requestCompleteLatch.countDown(); - } - - @Override - public void onFailure(Exception e) { - throwableReference.set(e); - taskTestContext.requestCompleteLatch.countDown(); - } - }); - - // Waiting for whole request to complete and return successfully till client - taskTestContext.requestCompleteLatch.await(); - - assertTasksRequestFinishedSuccessfully(resourceTasks.size(), responseReference.get(), throwableReference.get()); - } - - public void testOnDemandRefreshWhileFetchingTasks() throws InterruptedException { - setup(true, false); - - final AtomicReference throwableReference = new AtomicReference<>(); - final AtomicReference responseReference = new AtomicReference<>(); - - TaskTestContext taskTestContext = new TaskTestContext(); - - Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); - - taskTestContext.operationStartValidator = threadId -> { - ListTasksResponse listTasksResponse = ActionTestUtils.executeBlocking( - testNodes[0].transportListTasksAction, - new ListTasksRequest().setActions("internal:resourceAction*").setDetailed(true) - ); - - TaskInfo taskInfo = listTasksResponse.getTasks().get(1); - - assertNotNull(taskInfo.getResourceStats()); - assertNotNull(taskInfo.getResourceStats().getResourceUsageInfo()); - assertTrue(taskInfo.getResourceStats().getResourceUsageInfo().get("total").getCpuTimeInNanos() > 0); - assertTrue(taskInfo.getResourceStats().getResourceUsageInfo().get("total").getMemoryInBytes() > 0); - }; - - startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { - @Override - public void onResponse(NodesResponse listTasksResponse) { - responseReference.set(listTasksResponse); - taskTestContext.requestCompleteLatch.countDown(); - } - - @Override - public void onFailure(Exception e) { - throwableReference.set(e); - taskTestContext.requestCompleteLatch.countDown(); - } - }); - - // Waiting for whole request to complete and return successfully till client - taskTestContext.requestCompleteLatch.await(); - - assertTasksRequestFinishedSuccessfully(resourceTasks.size(), responseReference.get(), throwableReference.get()); - } - - public void testTaskIdPersistsInThreadContext() throws InterruptedException { - setup(true, true); - - final List taskIdsAddedToThreadContext = new ArrayList<>(); - final List taskIdsRemovedFromThreadContext = new ArrayList<>(); - AtomicLong actualTaskIdInThreadContext = new AtomicLong(-1); - AtomicLong expectedTaskIdInThreadContext = new AtomicLong(-2); - - ((MockTaskManager) testNodes[0].transportService.getTaskManager()).addListener(new MockTaskManagerListener() { - @Override - public void waitForTaskCompletion(Task task) {} - - @Override - public void taskExecutionStarted(Task task, Boolean closeableInvoked) { - if (closeableInvoked) { - taskIdsRemovedFromThreadContext.add(task.getId()); - } else { - taskIdsAddedToThreadContext.add(task.getId()); - } - } - - @Override - public void onTaskRegistered(Task task) {} - - @Override - public void onTaskUnregistered(Task task) { - if (task.getAction().equals("internal:resourceAction[n]")) { - expectedTaskIdInThreadContext.set(task.getId()); - actualTaskIdInThreadContext.set(threadPool.getThreadContext().getTransient(TASK_ID)); - } - } - }); - - TaskTestContext taskTestContext = new TaskTestContext(); - startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { - @Override - public void onResponse(NodesResponse listTasksResponse) { - taskTestContext.requestCompleteLatch.countDown(); - } - - @Override - public void onFailure(Exception e) { - taskTestContext.requestCompleteLatch.countDown(); - } - }); - - taskTestContext.requestCompleteLatch.await(); - - assertEquals(expectedTaskIdInThreadContext.get(), actualTaskIdInThreadContext.get()); - assertThat(taskIdsAddedToThreadContext, containsInAnyOrder(taskIdsRemovedFromThreadContext.toArray())); - } - - private void setup(boolean resourceTrackingEnabled, boolean useMockTaskManager) { - Settings settings = Settings.builder() - .put("task_resource_tracking.enabled", resourceTrackingEnabled) - .put(MockTaskManager.USE_MOCK_TASK_MANAGER_SETTING.getKey(), useMockTaskManager) - .build(); - setupTestNodes(settings); - connectNodes(testNodes[0]); - - runnableTaskListener.set(testNodes[0].taskResourceTrackingService); - } - - private Throwable findActualException(Exception e) { - Throwable throwable = e.getCause(); - while (throwable.getCause() != null) { - throwable = throwable.getCause(); - } - return throwable; - } - - private void assertTasksRequestFinishedSuccessfully(int activeResourceTasks, NodesResponse nodesResponse, Throwable throwable) { - assertEquals(0, activeResourceTasks); - assertNull(throwable); - assertNotNull(nodesResponse); - assertEquals(0, nodesResponse.failureCount()); - } - -} diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index 51fc5d80f2de3..c8411b31e0709 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -59,10 +59,8 @@ import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.tasks.TaskCancellationService; import org.opensearch.tasks.TaskManager; -import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.tasks.MockTaskManager; -import org.opensearch.threadpool.RunnableTaskExecutionListener; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -76,7 +74,6 @@ import java.util.List; import java.util.Set; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import static java.util.Collections.emptyMap; @@ -92,12 +89,10 @@ public abstract class TaskManagerTestCase extends OpenSearchTestCase { protected ThreadPool threadPool; protected TestNode[] testNodes; protected int nodesCount; - protected AtomicReference runnableTaskListener; @Before public void setupThreadPool() { - runnableTaskListener = new AtomicReference<>(); - threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName(), runnableTaskListener); + threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName()); } public void setupTestNodes(Settings settings) { @@ -230,22 +225,14 @@ protected TaskManager createTaskManager(Settings settings, ThreadPool threadPool transportService.start(); clusterService = createClusterService(threadPool, discoveryNode.get()); clusterService.addStateApplier(transportService.getTaskManager()); - taskResourceTrackingService = new TaskResourceTrackingService(settings, clusterService.getClusterSettings(), threadPool); - transportService.getTaskManager().setTaskResourceTrackingService(taskResourceTrackingService); ActionFilters actionFilters = new ActionFilters(emptySet()); - transportListTasksAction = new TransportListTasksAction( - clusterService, - transportService, - actionFilters, - taskResourceTrackingService - ); + transportListTasksAction = new TransportListTasksAction(clusterService, transportService, actionFilters); transportCancelTasksAction = new TransportCancelTasksAction(clusterService, transportService, actionFilters); transportService.acceptIncomingRequests(); } public final ClusterService clusterService; public final TransportService transportService; - public final TaskResourceTrackingService taskResourceTrackingService; private final SetOnce discoveryNode = new SetOnce<>(); public final TransportListTasksAction transportListTasksAction; public final TransportCancelTasksAction transportCancelTasksAction; diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java index 202f1b7dcb5b4..4b98870422ce8 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java @@ -91,7 +91,6 @@ import static java.util.Collections.emptyMap; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.sameInstance; -import static org.mockito.Answers.RETURNS_MOCKS; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.anyString; @@ -225,7 +224,7 @@ public void setupAction() { remoteResponseHandler = ArgumentCaptor.forClass(TransportResponseHandler.class); // setup services that will be called by action - transportService = mock(TransportService.class, RETURNS_MOCKS); + transportService = mock(TransportService.class); clusterService = mock(ClusterService.class); localIngest = true; // setup nodes for local and remote diff --git a/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java b/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java index 64286e47b4966..9c70accaca3e4 100644 --- a/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java +++ b/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java @@ -48,7 +48,6 @@ import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.sameInstance; -import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; public class ThreadContextTests extends OpenSearchTestCase { @@ -155,15 +154,6 @@ public void testNewContextWithClearedTransients() { assertEquals(1, threadContext.getResponseHeaders().get("baz").size()); } - public void testStashContextWithPreservedTransients() { - ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - threadContext.putTransient("foo", "bar"); - threadContext.putTransient(TASK_ID, 1); - threadContext.stashContext(); - assertNull(threadContext.getTransient("foo")); - assertEquals(1, (int) threadContext.getTransient(TASK_ID)); - } - public void testStashWithOrigin() { final String origin = randomAlphaOfLengthBetween(4, 16); final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 5f303bc774930..a896aab0f70c9 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -198,7 +198,6 @@ import org.opensearch.search.fetch.FetchPhase; import org.opensearch.search.query.QueryPhase; import org.opensearch.snapshots.mockstore.MockEventuallyConsistentRepository; -import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.disruption.DisruptableMockTransport; import org.opensearch.threadpool.ThreadPool; @@ -1739,8 +1738,6 @@ public void onFailure(final Exception e) { final IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver( new ThreadContext(Settings.EMPTY) ); - transportService.getTaskManager() - .setTaskResourceTrackingService(new TaskResourceTrackingService(settings, clusterSettings, threadPool)); repositoriesService = new RepositoriesService( settings, clusterService, diff --git a/server/src/test/java/org/opensearch/tasks/TaskManagerTests.java b/server/src/test/java/org/opensearch/tasks/TaskManagerTests.java index ab49109eb8247..0f09b0de34206 100644 --- a/server/src/test/java/org/opensearch/tasks/TaskManagerTests.java +++ b/server/src/test/java/org/opensearch/tasks/TaskManagerTests.java @@ -40,7 +40,6 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.threadpool.RunnableTaskExecutionListener; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.FakeTcpChannel; @@ -60,7 +59,6 @@ import java.util.Set; import java.util.concurrent.Phaser; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; @@ -69,12 +67,10 @@ public class TaskManagerTests extends OpenSearchTestCase { private ThreadPool threadPool; - private AtomicReference runnableTaskListener; @Before public void setupThreadPool() { - runnableTaskListener = new AtomicReference<>(); - threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName(), runnableTaskListener); + threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName()); } @After diff --git a/server/src/test/java/org/opensearch/tasks/TaskResourceTrackingServiceTests.java b/server/src/test/java/org/opensearch/tasks/TaskResourceTrackingServiceTests.java deleted file mode 100644 index 8ba23c5d3219c..0000000000000 --- a/server/src/test/java/org/opensearch/tasks/TaskResourceTrackingServiceTests.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.tasks; - -import org.junit.After; -import org.junit.Before; -import org.opensearch.action.admin.cluster.node.tasks.TransportTasksActionTests; -import org.opensearch.action.search.SearchTask; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.threadpool.TestThreadPool; -import org.opensearch.threadpool.ThreadPool; - -import java.util.HashMap; -import java.util.concurrent.atomic.AtomicReference; - -import static org.opensearch.tasks.ResourceStats.MEMORY; -import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; - -public class TaskResourceTrackingServiceTests extends OpenSearchTestCase { - - private ThreadPool threadPool; - private TaskResourceTrackingService taskResourceTrackingService; - - @Before - public void setup() { - threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName(), new AtomicReference<>()); - taskResourceTrackingService = new TaskResourceTrackingService( - Settings.EMPTY, - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - threadPool - ); - } - - @After - public void terminateThreadPool() { - terminate(threadPool); - } - - public void testThreadContextUpdateOnTrackingStart() { - taskResourceTrackingService.setTaskResourceTrackingEnabled(true); - - Task task = new SearchTask(1, "test", "test", () -> "Test", TaskId.EMPTY_TASK_ID, new HashMap<>()); - - String key = "KEY"; - String value = "VALUE"; - - // Prepare thread context - threadPool.getThreadContext().putHeader(key, value); - threadPool.getThreadContext().putTransient(key, value); - threadPool.getThreadContext().addResponseHeader(key, value); - - ThreadContext.StoredContext storedContext = taskResourceTrackingService.startTracking(task); - - // All headers should be preserved and Task Id should also be included in thread context - verifyThreadContextFixedHeaders(key, value); - assertEquals((long) threadPool.getThreadContext().getTransient(TASK_ID), task.getId()); - - storedContext.restore(); - - // Post restore only task id should be removed from the thread context - verifyThreadContextFixedHeaders(key, value); - assertNull(threadPool.getThreadContext().getTransient(TASK_ID)); - } - - public void testStopTrackingHandlesCurrentActiveThread() { - taskResourceTrackingService.setTaskResourceTrackingEnabled(true); - Task task = new SearchTask(1, "test", "test", () -> "Test", TaskId.EMPTY_TASK_ID, new HashMap<>()); - ThreadContext.StoredContext storedContext = taskResourceTrackingService.startTracking(task); - long threadId = Thread.currentThread().getId(); - taskResourceTrackingService.taskExecutionStartedOnThread(task.getId(), threadId); - - assertTrue(task.getResourceStats().get(threadId).get(0).isActive()); - assertEquals(0, task.getResourceStats().get(threadId).get(0).getResourceUsageInfo().getStatsInfo().get(MEMORY).getTotalValue()); - - taskResourceTrackingService.stopTracking(task); - - // Makes sure stop tracking marks the current active thread inactive and refreshes the resource stats before returning. - assertFalse(task.getResourceStats().get(threadId).get(0).isActive()); - assertTrue(task.getResourceStats().get(threadId).get(0).getResourceUsageInfo().getStatsInfo().get(MEMORY).getTotalValue() > 0); - } - - private void verifyThreadContextFixedHeaders(String key, String value) { - assertEquals(threadPool.getThreadContext().getHeader(key), value); - assertEquals(threadPool.getThreadContext().getTransient(key), value); - assertEquals(threadPool.getThreadContext().getResponseHeaders().get(key).get(0), value); - } - -} diff --git a/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManager.java b/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManager.java index 677ec7a0a6600..e60871f67ea54 100644 --- a/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManager.java +++ b/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManager.java @@ -39,7 +39,6 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskAwareRequest; import org.opensearch.tasks.TaskManager; @@ -128,21 +127,6 @@ public void waitForTaskCompletion(Task task, long untilInNanos) { super.waitForTaskCompletion(task, untilInNanos); } - @Override - public ThreadContext.StoredContext taskExecutionStarted(Task task) { - for (MockTaskManagerListener listener : listeners) { - listener.taskExecutionStarted(task, false); - } - - ThreadContext.StoredContext storedContext = super.taskExecutionStarted(task); - return () -> { - for (MockTaskManagerListener listener : listeners) { - listener.taskExecutionStarted(task, true); - } - storedContext.restore(); - }; - } - public void addListener(MockTaskManagerListener listener) { listeners.add(listener); } diff --git a/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManagerListener.java b/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManagerListener.java index f15f878995aa2..eb8361ac552fc 100644 --- a/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManagerListener.java +++ b/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManagerListener.java @@ -43,7 +43,4 @@ public interface MockTaskManagerListener { void onTaskUnregistered(Task task); void waitForTaskCompletion(Task task); - - void taskExecutionStarted(Task task, Boolean closeableInvoked); - } diff --git a/test/framework/src/main/java/org/opensearch/threadpool/TestThreadPool.java b/test/framework/src/main/java/org/opensearch/threadpool/TestThreadPool.java index 2d97d5bffee01..5f8611d99f0a0 100644 --- a/test/framework/src/main/java/org/opensearch/threadpool/TestThreadPool.java +++ b/test/framework/src/main/java/org/opensearch/threadpool/TestThreadPool.java @@ -40,7 +40,6 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.atomic.AtomicReference; public class TestThreadPool extends ThreadPool { @@ -48,29 +47,12 @@ public class TestThreadPool extends ThreadPool { private volatile boolean returnRejectingExecutor = false; private volatile ThreadPoolExecutor rejectingExecutor; - public TestThreadPool( - String name, - AtomicReference runnableTaskListener, - ExecutorBuilder... customBuilders - ) { - this(name, Settings.EMPTY, runnableTaskListener, customBuilders); - } - public TestThreadPool(String name, ExecutorBuilder... customBuilders) { this(name, Settings.EMPTY, customBuilders); } public TestThreadPool(String name, Settings settings, ExecutorBuilder... customBuilders) { - this(name, settings, null, customBuilders); - } - - public TestThreadPool( - String name, - Settings settings, - AtomicReference runnableTaskListener, - ExecutorBuilder... customBuilders - ) { - super(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), name).put(settings).build(), runnableTaskListener, customBuilders); + super(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), name).put(settings).build(), customBuilders); } @Override From f3404fdeecf056a27201c98a1fb52a26b01ae161 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Apr 2022 11:45:43 -0500 Subject: [PATCH 120/653] Bump protobuf-java from 3.20.0 to 3.20.1 in /plugins/repository-hdfs (#3062) * Bump protobuf-java from 3.20.0 to 3.20.1 in /plugins/repository-hdfs Bumps [protobuf-java](https://github.com/protocolbuffers/protobuf) from 3.20.0 to 3.20.1. - [Release notes](https://github.com/protocolbuffers/protobuf/releases) - [Changelog](https://github.com/protocolbuffers/protobuf/blob/main/generate_changelog.py) - [Commits](https://github.com/protocolbuffers/protobuf/compare/v3.20.0...v3.20.1) --- updated-dependencies: - dependency-name: com.google.protobuf:protobuf-java dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] --- plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/protobuf-java-3.20.0.jar.sha1 | 1 - plugins/repository-hdfs/licenses/protobuf-java-3.20.1.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/protobuf-java-3.20.0.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/protobuf-java-3.20.1.jar.sha1 diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 02ac822f94995..0a1e0bde3af2f 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -67,7 +67,7 @@ dependencies { api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api 'com.google.code.gson:gson:2.9.0' runtimeOnly 'com.google.guava:guava:31.1-jre' - api 'com.google.protobuf:protobuf-java:3.20.0' + api 'com.google.protobuf:protobuf-java:3.20.1' api "commons-logging:commons-logging:${versions.commonslogging}" api 'commons-cli:commons-cli:1.2' api "commons-codec:commons-codec:${versions.commonscodec}" diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.20.0.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.20.0.jar.sha1 deleted file mode 100644 index c5b0169ce0dba..0000000000000 --- a/plugins/repository-hdfs/licenses/protobuf-java-3.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3c72ddaaab7ffafe789e4f732c1fd614eb798bf4 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.20.1.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.20.1.jar.sha1 new file mode 100644 index 0000000000000..1ebc9838b7bea --- /dev/null +++ b/plugins/repository-hdfs/licenses/protobuf-java-3.20.1.jar.sha1 @@ -0,0 +1 @@ +5472700cd39a46060efbd35e29cb36b3fb89517b \ No newline at end of file From abad0c5e6e3359e35b4230d5b07527c6e47504a6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Apr 2022 11:48:13 -0500 Subject: [PATCH 121/653] Bump com.gradle.enterprise from 3.9 to 3.10 (#3055) Bumps com.gradle.enterprise from 3.9 to 3.10. --- updated-dependencies: - dependency-name: com.gradle.enterprise dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- settings.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/settings.gradle b/settings.gradle index 183a5ec8d1ae1..52e1e16fc1c01 100644 --- a/settings.gradle +++ b/settings.gradle @@ -10,7 +10,7 @@ */ plugins { - id "com.gradle.enterprise" version "3.9" + id "com.gradle.enterprise" version "3.10" } rootProject.name = "OpenSearch" From c71dba0dfe4af550abeef8fb7b4146e7c3f9ba66 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Apr 2022 11:50:44 -0500 Subject: [PATCH 122/653] Bump reactor-netty-http from 1.0.16 to 1.0.18 in /plugins/repository-azure (#3057) * Bump reactor-netty-http in /plugins/repository-azure Bumps [reactor-netty-http](https://github.com/reactor/reactor-netty) from 1.0.16 to 1.0.18. - [Release notes](https://github.com/reactor/reactor-netty/releases) - [Commits](https://github.com/reactor/reactor-netty/compare/v1.0.16...v1.0.18) --- updated-dependencies: - dependency-name: io.projectreactor.netty:reactor-netty-http dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] --- plugins/repository-azure/build.gradle | 2 +- .../licenses/reactor-netty-http-1.0.16.jar.sha1 | 1 - .../licenses/reactor-netty-http-1.0.18.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-azure/licenses/reactor-netty-http-1.0.16.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactor-netty-http-1.0.18.jar.sha1 diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index a18f18cea185e..d5bbd23325cd0 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -59,7 +59,7 @@ dependencies { api 'io.projectreactor:reactor-core:3.4.17' api 'io.projectreactor.netty:reactor-netty:1.0.17' api 'io.projectreactor.netty:reactor-netty-core:1.0.16' - api 'io.projectreactor.netty:reactor-netty-http:1.0.16' + api 'io.projectreactor.netty:reactor-netty-http:1.0.18' api "org.slf4j:slf4j-api:${versions.slf4j}" api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.0.16.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.0.16.jar.sha1 deleted file mode 100644 index d737315b06b62..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-http-1.0.16.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -93edb9a1dc774d843551a616e0f316e11ffa81ed \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.0.18.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.0.18.jar.sha1 new file mode 100644 index 0000000000000..43599c0b6c691 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-http-1.0.18.jar.sha1 @@ -0,0 +1 @@ +a34930cbd46b53ffdb19d2089605f39589eb2b99 \ No newline at end of file From ee7b731975d9044d3239ee3a4fe97f50d45af3e5 Mon Sep 17 00:00:00 2001 From: Kartik Ganesh Date: Mon, 25 Apr 2022 10:51:02 -0700 Subject: [PATCH 123/653] Adds the replication type index setting, alongside a formal notion of feature flags (#3037) * This change formalizes the notion of feature flags, and adds a "replication type" setting that will differentiate between document and segment replication, gated by a feature flag. Since seg-rep is currently an incomplete implementation, the feature flag ensures that the setting is not visible to users without explicitly setting a system property. We can then continue to merge seg-rep related changes from the feature branch to `main` safely hidden behind the feature flag gate. Signed-off-by: Kartik Ganesh * Update security policy for testing feature flags Signed-off-by: Nicholas Walter Knize Co-authored-by: Nicholas Walter Knize --- .../cluster/metadata/IndexMetadata.java | 13 ++++++ .../common/settings/IndexScopedSettings.java | 11 +++++ .../common/settings/SettingsModule.java | 7 +++ .../opensearch/common/util/FeatureFlags.java | 32 ++++++++++++++ .../org/opensearch/index/IndexSettings.java | 10 +++++ .../replication/common/ReplicationType.java | 30 +++++++++++++ .../org/opensearch/bootstrap/security.policy | 3 ++ .../common/util/FeatureFlagTests.java | 43 +++++++++++++++++++ 8 files changed, 149 insertions(+) create mode 100644 server/src/main/java/org/opensearch/common/util/FeatureFlags.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/common/ReplicationType.java create mode 100644 server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 6510c57060fe0..9139cbac2b0be 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -70,6 +70,7 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.ShardId; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.rest.RestStatus; import java.io.IOException; @@ -260,6 +261,18 @@ public Iterator> settings() { Property.IndexScope ); + /** + * Used to specify the replication type for the index. By default, document replication is used. + */ + public static final String SETTING_REPLICATION_TYPE = "index.replication.type"; + public static final Setting INDEX_REPLICATION_TYPE_SETTING = new Setting<>( + SETTING_REPLICATION_TYPE, + ReplicationType.DOCUMENT.toString(), + ReplicationType::parseString, + Property.IndexScope, + Property.Final + ); + public static final String SETTING_AUTO_EXPAND_REPLICAS = "index.auto_expand_replicas"; public static final Setting INDEX_AUTO_EXPAND_REPLICAS_SETTING = AutoExpandReplicas.SETTING; diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 528d6cc9f5e23..68e1b5b598d40 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -40,6 +40,7 @@ import org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.opensearch.common.logging.Loggers; import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.index.IndexSortConfig; @@ -207,6 +208,16 @@ public final class IndexScopedSettings extends AbstractScopedSettings { ) ); + /** + * Map of feature flag name to feature-flagged index setting. Once each feature + * is ready for production release, the feature flag can be removed, and the + * setting should be moved to {@link #BUILT_IN_INDEX_SETTINGS}. + */ + public static final Map FEATURE_FLAGGED_INDEX_SETTINGS = Map.of( + FeatureFlags.REPLICATION_TYPE, + IndexMetadata.INDEX_REPLICATION_TYPE_SETTING + ); + public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, BUILT_IN_INDEX_SETTINGS); public IndexScopedSettings(Settings settings, Set> settingsSet) { diff --git a/server/src/main/java/org/opensearch/common/settings/SettingsModule.java b/server/src/main/java/org/opensearch/common/settings/SettingsModule.java index 79ee0bf9f975a..0874814f940d4 100644 --- a/server/src/main/java/org/opensearch/common/settings/SettingsModule.java +++ b/server/src/main/java/org/opensearch/common/settings/SettingsModule.java @@ -37,6 +37,7 @@ import org.opensearch.common.Strings; import org.opensearch.common.inject.Binder; import org.opensearch.common.inject.Module; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.ToXContent; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentType; @@ -85,6 +86,12 @@ public SettingsModule( registerSetting(setting); } + for (Map.Entry featureFlaggedSetting : IndexScopedSettings.FEATURE_FLAGGED_INDEX_SETTINGS.entrySet()) { + if (FeatureFlags.isEnabled(featureFlaggedSetting.getKey())) { + registerSetting(featureFlaggedSetting.getValue()); + } + } + for (Setting setting : additionalSettings) { registerSetting(setting); } diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java new file mode 100644 index 0000000000000..34c613f5423d0 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +/** + * Utility class to manage feature flags. Feature flags are system properties that must be set on the JVM. + * These are used to gate the visibility/availability of incomplete features. Fore more information, see + * https://featureflags.io/feature-flag-introduction/ + */ +public class FeatureFlags { + + /** + * Gates the visibility of the index setting that allows changing of replication type. + * Once the feature is ready for production release, this feature flag can be removed. + */ + public static final String REPLICATION_TYPE = "opensearch.experimental.feature.replication_type.enabled"; + + /** + * Used to test feature flags whose values are expected to be booleans. + * This method returns true if the value is "true" (case-insensitive), + * and false otherwise. + */ + public static boolean isEnabled(String featureFlagName) { + return "true".equalsIgnoreCase(System.getProperty(featureFlagName)); + } +} diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index aa69417af1897..8ba9c47902115 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -46,6 +46,7 @@ import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.index.translog.Translog; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.ingest.IngestService; import org.opensearch.node.Node; @@ -530,6 +531,7 @@ public final class IndexSettings { private final String nodeName; private final Settings nodeSettings; private final int numberOfShards; + private final ReplicationType replicationType; // volatile fields are updated via #updateIndexMetadata(IndexMetadata) under lock private volatile Settings settings; private volatile IndexMetadata indexMetadata; @@ -681,6 +683,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti nodeName = Node.NODE_NAME_SETTING.get(settings); this.indexMetadata = indexMetadata; numberOfShards = settings.getAsInt(IndexMetadata.SETTING_NUMBER_OF_SHARDS, null); + replicationType = ReplicationType.parseString(settings.get(IndexMetadata.SETTING_REPLICATION_TYPE)); this.searchThrottled = INDEX_SEARCH_THROTTLED.get(settings); this.queryStringLenient = QUERY_STRING_LENIENT_SETTING.get(settings); @@ -915,6 +918,13 @@ public int getNumberOfReplicas() { return settings.getAsInt(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, null); } + /** + * Returns true if segment replication is enabled on the index. + */ + public boolean isSegRepEnabled() { + return ReplicationType.SEGMENT.equals(replicationType); + } + /** * Returns the node settings. The settings returned from {@link #getSettings()} are a merged version of the * index settings and the node settings where node settings are overwritten by index settings. diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationType.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationType.java new file mode 100644 index 0000000000000..98d68d67ba5e3 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationType.java @@ -0,0 +1,30 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication.common; + +/** + * Enumerates the types of replication strategies supported by OpenSearch. + * For more information, see https://github.com/opensearch-project/OpenSearch/issues/1694 + */ +public enum ReplicationType { + + DOCUMENT, + SEGMENT; + + public static ReplicationType parseString(String replicationType) { + try { + return ReplicationType.valueOf(replicationType); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("Could not parse ReplicationStrategy for [" + replicationType + "]"); + } catch (NullPointerException npe) { + // return a default value for null input + return DOCUMENT; + } + } +} diff --git a/server/src/main/resources/org/opensearch/bootstrap/security.policy b/server/src/main/resources/org/opensearch/bootstrap/security.policy index 05d648212bc40..3671782b9d12f 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/security.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/security.policy @@ -120,6 +120,9 @@ grant { // TODO: set this with gradle or some other way that repros with seed? permission java.util.PropertyPermission "processors.override", "write"; + // needed for feature flags + permission java.util.PropertyPermission "opensearch.experimental.feature.*", "write"; + // TODO: these simply trigger a noisy warning if its unable to clear the properties // fix that in randomizedtesting permission java.util.PropertyPermission "junit4.childvm.count", "write"; diff --git a/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java b/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java new file mode 100644 index 0000000000000..1084f9c658db4 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import org.junit.BeforeClass; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.test.OpenSearchTestCase; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +public class FeatureFlagTests extends OpenSearchTestCase { + + @SuppressForbidden(reason = "sets the feature flag") + @BeforeClass + public static void enableFeature() { + AccessController.doPrivileged((PrivilegedAction) () -> System.setProperty(FeatureFlags.REPLICATION_TYPE, "true")); + } + + public void testReplicationTypeFeatureFlag() { + String replicationTypeFlag = FeatureFlags.REPLICATION_TYPE; + assertNotNull(System.getProperty(replicationTypeFlag)); + assertTrue(FeatureFlags.isEnabled(replicationTypeFlag)); + } + + public void testMissingFeatureFlag() { + String testFlag = "missingFeatureFlag"; + assertNull(System.getProperty(testFlag)); + assertFalse(FeatureFlags.isEnabled(testFlag)); + } + + public void testNonBooleanFeatureFlag() { + String javaVersionProperty = "java.version"; + assertNotNull(System.getProperty(javaVersionProperty)); + assertFalse(FeatureFlags.isEnabled(javaVersionProperty)); + } +} From 0bab4730b602408901843df02c8d10a0f4d26744 Mon Sep 17 00:00:00 2001 From: Peng Huo Date: Mon, 25 Apr 2022 14:07:14 -0700 Subject: [PATCH 124/653] Correct the skip version, multi_terms aggregation is supported on 2.1 (#3072) Signed-off-by: Peng Huo --- .../search.aggregation/370_multi_terms.yml | 48 +++++++++---------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/370_multi_terms.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/370_multi_terms.yml index a0e4762ea9b53..4da0f8eeed39b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/370_multi_terms.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/370_multi_terms.yml @@ -46,8 +46,8 @@ setup: --- "Basic test": - skip: - version: "- 2.9.99" - reason: multi_terms aggregation is introduced in 3.0.0 + version: "- 2.0.99" + reason: multi_terms aggregation is introduced in 2.1.0 - do: bulk: @@ -96,8 +96,8 @@ setup: --- "IP test": - skip: - version: "- 2.9.99" - reason: multi_terms aggregation is introduced in 3.0.0 + version: "- 2.0.99" + reason: multi_terms aggregation is introduced in 2.1.0 - do: bulk: @@ -146,8 +146,8 @@ setup: --- "Boolean test": - skip: - version: "- 2.9.99" - reason: multi_terms aggregation is introduced in 3.0.0 + version: "- 2.0.99" + reason: multi_terms aggregation is introduced in 2.1.0 - do: bulk: @@ -196,8 +196,8 @@ setup: --- "Double test": - skip: - version: "- 2.9.99" - reason: multi_terms aggregation is introduced in 3.0.0 + version: "- 2.0.99" + reason: multi_terms aggregation is introduced in 2.1.0 - do: bulk: @@ -239,8 +239,8 @@ setup: --- "Date test": - skip: - version: "- 2.9.99" - reason: multi_terms aggregation is introduced in 3.0.0 + version: "- 2.0.99" + reason: multi_terms aggregation is introduced in 2.1.0 - do: bulk: @@ -282,8 +282,8 @@ setup: --- "Unmapped keywords": - skip: - version: "- 2.9.99" - reason: multi_terms aggregation is introduced in 3.0.0 + version: "- 2.0.99" + reason: multi_terms aggregation is introduced in 2.1.0 - do: bulk: @@ -322,8 +322,8 @@ setup: --- "Null value": - skip: - version: "- 2.9.99" - reason: multi_terms aggregation is introduced in 3.0.0 + version: "- 2.0.99" + reason: multi_terms aggregation is introduced in 2.1.0 - do: bulk: @@ -357,8 +357,8 @@ setup: --- "multiple multi_terms bucket": - skip: - version: "- 2.9.99" - reason: multi_terms aggregation is introduced in 3.0.0 + version: "- 2.0.99" + reason: multi_terms aggregation is introduced in 2.1.0 - do: bulk: @@ -409,8 +409,8 @@ setup: --- "ordered by metrics": - skip: - version: "- 3.0.0" - reason: multi_terms aggregation is introduced in 3.0.0 + version: "- 2.0.99" + reason: multi_terms aggregation is introduced in 2.1.0 - do: bulk: @@ -457,8 +457,8 @@ setup: --- "top 1 ordered by metrics ": - skip: - version: "- 2.9.99" - reason: multi_terms aggregation is introduced in 3.0.0 + version: "- 2.0.99" + reason: multi_terms aggregation is introduced in 2.1.0 - do: bulk: @@ -502,8 +502,8 @@ setup: --- "min_doc_count": - skip: - version: "- 2.9.99" - reason: multi_terms aggregation is introduced in 3.0.0 + version: "- 2.0.99" + reason: multi_terms aggregation is introduced in 2.1.0 - do: bulk: @@ -574,8 +574,8 @@ setup: --- "sum_other_doc_count": - skip: - version: "- 2.9.99" - reason: multi_terms aggregation is introduced in 3.0.0 + version: "- 2.0.99" + reason: multi_terms aggregation is introduced in 2.1.0 - do: bulk: From c5ff8d62bf93c78493a36a3a5ef22fad8d0a4bce Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Tue, 26 Apr 2022 10:21:31 -0700 Subject: [PATCH 125/653] [Type removal] Remove _type support in NOOP bulk indexing from client benchmark (#3076) * [Type removal] Remove _type support in bulk indexing from client benchmark Signed-off-by: Suraj Singh * Update README Signed-off-by: Suraj Singh --- client/benchmark/README.md | 3 +-- .../client/benchmark/AbstractBenchmark.java | 13 ++++++------- .../client/benchmark/rest/RestClientBenchmark.java | 10 +++++----- 3 files changed, 12 insertions(+), 14 deletions(-) diff --git a/client/benchmark/README.md b/client/benchmark/README.md index ee99a1384d27e..2732586b9e575 100644 --- a/client/benchmark/README.md +++ b/client/benchmark/README.md @@ -29,7 +29,7 @@ Example invocation: wget http://benchmarks.elasticsearch.org.s3.amazonaws.com/corpora/geonames/documents-2.json.bz2 bzip2 -d documents-2.json.bz2 mv documents-2.json client/benchmark/build -gradlew -p client/benchmark run --args ' rest bulk localhost build/documents-2.json geonames type 8647880 5000' +gradlew -p client/benchmark run --args ' rest bulk localhost build/documents-2.json geonames 8647880 5000' ``` The parameters are all in the `'`s and are in order: @@ -39,7 +39,6 @@ The parameters are all in the `'`s and are in order: * Benchmark target host IP (the host where OpenSearch is running) * full path to the file that should be bulk indexed * name of the index -* name of the (sole) type in the index * number of documents in the file * bulk size diff --git a/client/benchmark/src/main/java/org/opensearch/client/benchmark/AbstractBenchmark.java b/client/benchmark/src/main/java/org/opensearch/client/benchmark/AbstractBenchmark.java index de9d075cb9a16..ab0a0d6b8a19c 100644 --- a/client/benchmark/src/main/java/org/opensearch/client/benchmark/AbstractBenchmark.java +++ b/client/benchmark/src/main/java/org/opensearch/client/benchmark/AbstractBenchmark.java @@ -49,7 +49,7 @@ public abstract class AbstractBenchmark { protected abstract T client(String benchmarkTargetHost) throws Exception; - protected abstract BulkRequestExecutor bulkRequestExecutor(T client, String indexName, String typeName); + protected abstract BulkRequestExecutor bulkRequestExecutor(T client, String indexName); protected abstract SearchRequestExecutor searchRequestExecutor(T client, String indexName); @@ -76,16 +76,15 @@ public final void run(String[] args) throws Exception { @SuppressForbidden(reason = "system out is ok for a command line tool") private void runBulkIndexBenchmark(String[] args) throws Exception { - if (args.length != 7) { - System.err.println("usage: 'bulk' benchmarkTargetHostIp indexFilePath indexName typeName numberOfDocuments bulkSize"); + if (args.length != 6) { + System.err.println("usage: 'bulk' benchmarkTargetHostIp indexFilePath indexName numberOfDocuments bulkSize"); System.exit(1); } String benchmarkTargetHost = args[1]; String indexFilePath = args[2]; String indexName = args[3]; - String typeName = args[4]; - int totalDocs = Integer.valueOf(args[5]); - int bulkSize = Integer.valueOf(args[6]); + int totalDocs = Integer.valueOf(args[4]); + int bulkSize = Integer.valueOf(args[5]); int totalIterationCount = (int) Math.floor(totalDocs / bulkSize); // consider 40% of all iterations as warmup iterations @@ -97,7 +96,7 @@ private void runBulkIndexBenchmark(String[] args) throws Exception { BenchmarkRunner benchmark = new BenchmarkRunner( warmupIterations, iterations, - new BulkBenchmarkTask(bulkRequestExecutor(client, indexName, typeName), indexFilePath, warmupIterations, iterations, bulkSize) + new BulkBenchmarkTask(bulkRequestExecutor(client, indexName), indexFilePath, warmupIterations, iterations, bulkSize) ); try { diff --git a/client/benchmark/src/main/java/org/opensearch/client/benchmark/rest/RestClientBenchmark.java b/client/benchmark/src/main/java/org/opensearch/client/benchmark/rest/RestClientBenchmark.java index 073fd5eab5c46..d2d7163b8dee2 100644 --- a/client/benchmark/src/main/java/org/opensearch/client/benchmark/rest/RestClientBenchmark.java +++ b/client/benchmark/src/main/java/org/opensearch/client/benchmark/rest/RestClientBenchmark.java @@ -65,8 +65,8 @@ protected RestClient client(String benchmarkTargetHost) { } @Override - protected BulkRequestExecutor bulkRequestExecutor(RestClient client, String indexName, String typeName) { - return new RestBulkRequestExecutor(client, indexName, typeName); + protected BulkRequestExecutor bulkRequestExecutor(RestClient client, String indexName) { + return new RestBulkRequestExecutor(client, indexName); } @Override @@ -78,9 +78,9 @@ private static final class RestBulkRequestExecutor implements BulkRequestExecuto private final RestClient client; private final String actionMetadata; - RestBulkRequestExecutor(RestClient client, String index, String type) { + RestBulkRequestExecutor(RestClient client, String index) { this.client = client; - this.actionMetadata = String.format(Locale.ROOT, "{ \"index\" : { \"_index\" : \"%s\", \"_type\" : \"%s\" } }%n", index, type); + this.actionMetadata = String.format(Locale.ROOT, "{ \"index\" : { \"_index\" : \"%s\" } }%n", index); } @Override @@ -91,7 +91,7 @@ public boolean bulkIndex(List bulkData) { bulkRequestBody.append(bulkItem); bulkRequestBody.append("\n"); } - Request request = new Request("POST", "/geonames/type/_noop_bulk"); + Request request = new Request("POST", "/geonames/_noop_bulk"); request.setJsonEntity(bulkRequestBody.toString()); try { Response response = client.performRequest(request); From 6b641d2fd29b9542b94f419fc083847fa5bdf55b Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Tue, 26 Apr 2022 15:20:10 -0500 Subject: [PATCH 126/653] [Remove] Type from nested fields using new metadata field mapper (#3004) * [Remove] Type from nested fields using new metadata field mapper types support is removed yet nested documents use the _type field to store the path for nested documents. A new _nested_path metadata field mapper is added to take the place of the _type field in order to remove the type dependency in nested documents. BWC is handled in the new field mapper to ensure compatibility with older versions. Signed-off-by: Nicholas Walter Knize * pr fixes Signed-off-by: Nicholas Walter Knize * add test to merge same mapping with empty index settings Signed-off-by: Nicholas Walter Knize --- .../index/mapper/DocumentParser.java | 12 ++- .../index/mapper/NestedPathFieldMapper.java | 96 +++++++++++++++++++ .../opensearch/index/mapper/ObjectMapper.java | 21 ++-- .../org/opensearch/indices/IndicesModule.java | 2 + .../indices/mapper/MapperRegistry.java | 7 +- .../index/mapper/DocumentParserTests.java | 6 +- .../FieldAliasMapperValidationTests.java | 4 +- .../index/mapper/NestedObjectMapperTests.java | 6 +- .../mapper/NestedPathFieldMapperTests.java | 47 +++++++++ .../index/search/NestedHelperTests.java | 7 +- .../search/nested/NestedSortingTests.java | 66 ++++++------- .../index/shard/ShardSplittingQueryTests.java | 10 +- .../indices/IndicesModuleTests.java | 16 +++- .../indices/IndicesServiceTests.java | 9 +- .../bucket/nested/NestedAggregatorTests.java | 30 +++--- .../nested/ReverseNestedAggregatorTests.java | 6 +- .../terms/RareTermsAggregatorTests.java | 4 +- .../bucket/terms/TermsAggregatorTests.java | 4 +- .../search/sort/FieldSortBuilderTests.java | 4 +- .../sort/GeoDistanceSortBuilderTests.java | 4 +- .../search/sort/ScriptSortBuilderTests.java | 4 +- 21 files changed, 264 insertions(+), 101 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/mapper/NestedPathFieldMapper.java create mode 100644 server/src/test/java/org/opensearch/index/mapper/NestedPathFieldMapperTests.java diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java b/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java index bcafddd6d5816..f9d6187d60eb8 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java @@ -455,21 +455,23 @@ private static void innerParseObject( private static void nested(ParseContext context, ObjectMapper.Nested nested) { ParseContext.Document nestedDoc = context.doc(); ParseContext.Document parentDoc = nestedDoc.getParent(); + Version indexVersion = context.indexSettings().getIndexVersionCreated(); if (nested.isIncludeInParent()) { - addFields(nestedDoc, parentDoc); + addFields(indexVersion, nestedDoc, parentDoc); } if (nested.isIncludeInRoot()) { ParseContext.Document rootDoc = context.rootDoc(); // don't add it twice, if its included in parent, and we are handling the master doc... if (!nested.isIncludeInParent() || parentDoc != rootDoc) { - addFields(nestedDoc, rootDoc); + addFields(indexVersion, nestedDoc, rootDoc); } } } - private static void addFields(ParseContext.Document nestedDoc, ParseContext.Document rootDoc) { + private static void addFields(Version indexVersion, ParseContext.Document nestedDoc, ParseContext.Document rootDoc) { + String nestedPathFieldName = NestedPathFieldMapper.name(indexVersion); for (IndexableField field : nestedDoc.getFields()) { - if (!field.name().equals(TypeFieldMapper.NAME)) { + if (field.name().equals(nestedPathFieldName) == false) { rootDoc.add(field); } } @@ -498,7 +500,7 @@ private static ParseContext nestedContext(ParseContext context, ObjectMapper map // the type of the nested doc starts with __, so we can identify that its a nested one in filters // note, we don't prefix it with the type of the doc since it allows us to execute a nested query // across types (for example, with similar nested objects) - nestedDoc.add(new Field(TypeFieldMapper.NAME, mapper.nestedTypePathAsString(), TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + nestedDoc.add(NestedPathFieldMapper.field(context.indexSettings().getIndexVersionCreated(), mapper.nestedTypePath())); return context; } diff --git a/server/src/main/java/org/opensearch/index/mapper/NestedPathFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/NestedPathFieldMapper.java new file mode 100644 index 0000000000000..f420897ca187f --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/NestedPathFieldMapper.java @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.util.BytesRef; +import org.opensearch.Version; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.search.lookup.SearchLookup; + +import java.util.Collections; + +public class NestedPathFieldMapper extends MetadataFieldMapper { + // OpenSearch version 2.0 removed types; this name is used for bwc + public static final String LEGACY_NAME = "_type"; + public static final String NAME = "_nested_path"; + + public static class Defaults { + public static final FieldType FIELD_TYPE = new FieldType(); + static { + FIELD_TYPE.setIndexOptions(IndexOptions.DOCS); + FIELD_TYPE.setTokenized(false); + FIELD_TYPE.setStored(false); + FIELD_TYPE.setOmitNorms(true); + FIELD_TYPE.freeze(); + } + } + + /** private ctor; using SINGLETON to control BWC */ + private NestedPathFieldMapper(String name) { + super(new NestedPathFieldType(name)); + } + + /** returns the field name */ + public static String name(Version version) { + if (version.before(Version.V_2_0_0)) { + return LEGACY_NAME; + } + return NAME; + } + + @Override + protected String contentType() { + return NAME; + } + + private static final NestedPathFieldMapper LEGACY_INSTANCE = new NestedPathFieldMapper(LEGACY_NAME); + private static final NestedPathFieldMapper INSTANCE = new NestedPathFieldMapper(NAME); + + public static final TypeParser PARSER = new FixedTypeParser( + c -> c.indexVersionCreated().before(Version.V_2_0_0) ? LEGACY_INSTANCE : INSTANCE + ); + + /** helper method to create a lucene field based on the opensearch version */ + public static Field field(Version version, String path) { + return new Field(name(version), path, Defaults.FIELD_TYPE); + } + + /** helper method to create a query based on the opensearch version */ + public static Query filter(Version version, String path) { + return new TermQuery(new Term(name(version), new BytesRef(path))); + } + + /** field type for the NestPath field */ + public static final class NestedPathFieldType extends StringFieldType { + private NestedPathFieldType(String name) { + super(name, true, false, false, TextSearchInfo.SIMPLE_MATCH_ONLY, Collections.emptyMap()); + } + + @Override + public String typeName() { + return NAME; + } + + @Override + public Query existsQuery(QueryShardContext context) { + throw new UnsupportedOperationException("Cannot run exists() query against the nested field path"); + } + + @Override + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { + throw new UnsupportedOperationException("Cannot fetch values for internal field [" + name() + "]."); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java index a9923d7c6d756..d3c2e7f1e5372 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java @@ -32,11 +32,9 @@ package org.opensearch.index.mapper; -import org.apache.lucene.index.Term; import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.util.BytesRef; import org.opensearch.OpenSearchParseException; +import org.opensearch.Version; import org.opensearch.common.Explicit; import org.opensearch.common.Nullable; import org.opensearch.common.collect.CopyOnWriteHashMap; @@ -388,8 +386,7 @@ protected static void parseProperties(ObjectMapper.Builder objBuilder, Map initBuiltInMetadataMa builtInMetadataMappers.put(IndexFieldMapper.NAME, IndexFieldMapper.PARSER); builtInMetadataMappers.put(DataStreamFieldMapper.NAME, DataStreamFieldMapper.PARSER); builtInMetadataMappers.put(SourceFieldMapper.NAME, SourceFieldMapper.PARSER); + builtInMetadataMappers.put(NestedPathFieldMapper.NAME, NestedPathFieldMapper.PARSER); builtInMetadataMappers.put(VersionFieldMapper.NAME, VersionFieldMapper.PARSER); builtInMetadataMappers.put(SeqNoFieldMapper.NAME, SeqNoFieldMapper.PARSER); // _field_names must be added last so that it has a chance to see all the other mappers diff --git a/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java b/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java index f56b2f98f0f6e..23ce1b277aeeb 100644 --- a/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java +++ b/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java @@ -35,6 +35,7 @@ import org.opensearch.Version; import org.opensearch.index.mapper.Mapper; import org.opensearch.index.mapper.MetadataFieldMapper; +import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.plugins.MapperPlugin; import java.util.Collections; @@ -50,6 +51,7 @@ public final class MapperRegistry { private final Map mapperParsers; private final Map metadataMapperParsers; + private final Map metadataMapperParsersPre20; private final Function> fieldFilter; public MapperRegistry( @@ -59,6 +61,9 @@ public MapperRegistry( ) { this.mapperParsers = Collections.unmodifiableMap(new LinkedHashMap<>(mapperParsers)); this.metadataMapperParsers = Collections.unmodifiableMap(new LinkedHashMap<>(metadataMapperParsers)); + Map tempPre20 = new LinkedHashMap<>(metadataMapperParsers); + tempPre20.remove(NestedPathFieldMapper.NAME); + this.metadataMapperParsersPre20 = Collections.unmodifiableMap(tempPre20); this.fieldFilter = fieldFilter; } @@ -75,7 +80,7 @@ public Map getMapperParsers() { * returned map uses the name of the field as a key. */ public Map getMetadataMapperParsers(Version indexCreatedVersion) { - return metadataMapperParsers; + return indexCreatedVersion.onOrAfter(Version.V_2_0_0) ? metadataMapperParsers : metadataMapperParsersPre20; } /** diff --git a/server/src/test/java/org/opensearch/index/mapper/DocumentParserTests.java b/server/src/test/java/org/opensearch/index/mapper/DocumentParserTests.java index 0ad8dc3f138e0..659042c37d650 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DocumentParserTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DocumentParserTests.java @@ -248,14 +248,14 @@ public void testNestedHaveIdAndTypeFields() throws Exception { assertNotNull(result.docs().get(0).getField(IdFieldMapper.NAME)); assertEquals(Uid.encodeId("1"), result.docs().get(0).getField(IdFieldMapper.NAME).binaryValue()); assertEquals(IdFieldMapper.Defaults.NESTED_FIELD_TYPE, result.docs().get(0).getField(IdFieldMapper.NAME).fieldType()); - assertNotNull(result.docs().get(0).getField(TypeFieldMapper.NAME)); - assertEquals("__foo", result.docs().get(0).getField(TypeFieldMapper.NAME).stringValue()); + assertNotNull(result.docs().get(0).getField(NestedPathFieldMapper.NAME)); + assertEquals("foo", result.docs().get(0).getField(NestedPathFieldMapper.NAME).stringValue()); assertEquals("value1", result.docs().get(0).getField("foo.bar").binaryValue().utf8ToString()); // Root document: assertNotNull(result.docs().get(1).getField(IdFieldMapper.NAME)); assertEquals(Uid.encodeId("1"), result.docs().get(1).getField(IdFieldMapper.NAME).binaryValue()); assertEquals(IdFieldMapper.Defaults.FIELD_TYPE, result.docs().get(1).getField(IdFieldMapper.NAME).fieldType()); - assertNull(result.docs().get(1).getField(TypeFieldMapper.NAME)); + assertNull(result.docs().get(1).getField(NestedPathFieldMapper.NAME)); assertEquals("value2", result.docs().get(1).getField("baz").binaryValue().utf8ToString()); } diff --git a/server/src/test/java/org/opensearch/index/mapper/FieldAliasMapperValidationTests.java b/server/src/test/java/org/opensearch/index/mapper/FieldAliasMapperValidationTests.java index 7ffc22f92d839..92de2707078f3 100644 --- a/server/src/test/java/org/opensearch/index/mapper/FieldAliasMapperValidationTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/FieldAliasMapperValidationTests.java @@ -220,7 +220,7 @@ private static ObjectMapper createObjectMapper(String name) { ObjectMapper.Nested.NO, ObjectMapper.Dynamic.FALSE, emptyMap(), - Settings.EMPTY + SETTINGS ); } @@ -232,7 +232,7 @@ private static ObjectMapper createNestedObjectMapper(String name) { ObjectMapper.Nested.newNested(), ObjectMapper.Dynamic.FALSE, emptyMap(), - Settings.EMPTY + SETTINGS ); } } diff --git a/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java index fe3ce5da6c90a..245ba1404cb5c 100644 --- a/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java @@ -149,7 +149,7 @@ public void testSingleNested() throws Exception { ); assertThat(doc.docs().size(), equalTo(2)); - assertThat(doc.docs().get(0).get(TypeFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePathAsString())); + assertThat(doc.docs().get(0).get(NestedPathFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePath())); assertThat(doc.docs().get(0).get("nested1.field1"), equalTo("1")); assertThat(doc.docs().get(0).get("nested1.field2"), equalTo("2")); @@ -180,10 +180,10 @@ public void testSingleNested() throws Exception { ); assertThat(doc.docs().size(), equalTo(3)); - assertThat(doc.docs().get(0).get(TypeFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePathAsString())); + assertThat(doc.docs().get(0).get(NestedPathFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePath())); assertThat(doc.docs().get(0).get("nested1.field1"), equalTo("1")); assertThat(doc.docs().get(0).get("nested1.field2"), equalTo("2")); - assertThat(doc.docs().get(1).get(TypeFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePathAsString())); + assertThat(doc.docs().get(1).get(NestedPathFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePath())); assertThat(doc.docs().get(1).get("nested1.field1"), equalTo("3")); assertThat(doc.docs().get(1).get("nested1.field2"), equalTo("4")); diff --git a/server/src/test/java/org/opensearch/index/mapper/NestedPathFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/NestedPathFieldMapperTests.java new file mode 100644 index 0000000000000..6ad1d0f7f09b9 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/NestedPathFieldMapperTests.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.index.IndexableField; +import org.opensearch.common.bytes.BytesArray; +import org.opensearch.common.compress.CompressedXContent; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.test.OpenSearchSingleNodeTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; + +/** tests for {@link org.opensearch.index.mapper.NestedPathFieldMapper} */ +public class NestedPathFieldMapperTests extends OpenSearchSingleNodeTestCase { + + public void testDefaultConfig() throws IOException { + Settings indexSettings = Settings.EMPTY; + MapperService mapperService = createIndex("test", indexSettings).mapperService(); + DocumentMapper mapper = mapperService.merge( + MapperService.SINGLE_MAPPING_NAME, + new CompressedXContent("{\"" + MapperService.SINGLE_MAPPING_NAME + "\":{}}"), + MapperService.MergeReason.MAPPING_UPDATE + ); + ParsedDocument document = mapper.parse(new SourceToParse("index", "id", new BytesArray("{}"), XContentType.JSON)); + assertEquals(Collections.emptyList(), Arrays.asList(document.rootDoc().getFields(NestedPathFieldMapper.NAME))); + } + + public void testUpdatesWithSameMappings() throws IOException { + Settings indexSettings = Settings.EMPTY; + MapperService mapperService = createIndex("test", indexSettings).mapperService(); + DocumentMapper mapper = mapperService.merge( + MapperService.SINGLE_MAPPING_NAME, + new CompressedXContent("{\"" + MapperService.SINGLE_MAPPING_NAME + "\":{}}"), + MapperService.MergeReason.MAPPING_UPDATE + ); + mapper.merge(mapper.mapping(), MapperService.MergeReason.MAPPING_UPDATE); + } +} diff --git a/server/src/test/java/org/opensearch/index/search/NestedHelperTests.java b/server/src/test/java/org/opensearch/index/search/NestedHelperTests.java index c02df8168afee..7c9895a9e0642 100644 --- a/server/src/test/java/org/opensearch/index/search/NestedHelperTests.java +++ b/server/src/test/java/org/opensearch/index/search/NestedHelperTests.java @@ -47,6 +47,7 @@ import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.index.IndexService; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.NestedQueryBuilder; import org.opensearch.index.query.QueryShardContext; @@ -324,7 +325,7 @@ public void testNested() throws IOException { Query expectedChildQuery = new BooleanQuery.Builder().add(new MatchAllDocsQuery(), Occur.MUST) // we automatically add a filter since the inner query might match non-nested docs - .add(new TermQuery(new Term("_type", "__nested1")), Occur.FILTER) + .add(new TermQuery(new Term(NestedPathFieldMapper.NAME, "nested1")), Occur.FILTER) .build(); assertEquals(expectedChildQuery, query.getChildQuery()); @@ -352,7 +353,7 @@ public void testNested() throws IOException { // we need to add the filter again because of include_in_parent expectedChildQuery = new BooleanQuery.Builder().add(new TermQuery(new Term("nested2.foo", "bar")), Occur.MUST) - .add(new TermQuery(new Term("_type", "__nested2")), Occur.FILTER) + .add(new TermQuery(new Term(NestedPathFieldMapper.NAME, "nested2")), Occur.FILTER) .build(); assertEquals(expectedChildQuery, query.getChildQuery()); @@ -367,7 +368,7 @@ public void testNested() throws IOException { // we need to add the filter again because of include_in_root expectedChildQuery = new BooleanQuery.Builder().add(new TermQuery(new Term("nested3.foo", "bar")), Occur.MUST) - .add(new TermQuery(new Term("_type", "__nested3")), Occur.FILTER) + .add(new TermQuery(new Term(NestedPathFieldMapper.NAME, "nested3")), Occur.FILTER) .build(); assertEquals(expectedChildQuery, query.getChildQuery()); diff --git a/server/src/test/java/org/opensearch/index/search/nested/NestedSortingTests.java b/server/src/test/java/org/opensearch/index/search/nested/NestedSortingTests.java index a8cd6c5411875..726e9f56f98c1 100644 --- a/server/src/test/java/org/opensearch/index/search/nested/NestedSortingTests.java +++ b/server/src/test/java/org/opensearch/index/search/nested/NestedSortingTests.java @@ -68,6 +68,7 @@ import org.opensearch.index.fielddata.NoOrdinalsStringFieldDataTests; import org.opensearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; import org.opensearch.index.fielddata.plain.PagedBytesIndexFieldData; +import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.NestedQueryBuilder; import org.opensearch.index.query.QueryBuilder; @@ -103,14 +104,14 @@ public void testDuel() throws Exception { for (int j = 0; j < numChildren; ++j) { Document doc = new Document(); doc.add(new StringField("f", TestUtil.randomSimpleString(random(), 2), Field.Store.NO)); - doc.add(new StringField("__type", "child", Field.Store.NO)); + doc.add(new StringField(NestedPathFieldMapper.NAME, "child", Field.Store.NO)); docs.add(doc); } if (randomBoolean()) { docs.add(new Document()); } Document parent = new Document(); - parent.add(new StringField("__type", "parent", Field.Store.NO)); + parent.add(new StringField(NestedPathFieldMapper.NAME, "parent", Field.Store.NO)); docs.add(parent); writer.addDocuments(docs); if (rarely()) { // we need to have a bit more segments than what RandomIndexWriter would do by default @@ -149,8 +150,8 @@ private TopDocs getTopDocs( int n, boolean reverse ) throws IOException { - Query parentFilter = new TermQuery(new Term("__type", "parent")); - Query childFilter = new TermQuery(new Term("__type", "child")); + Query parentFilter = new TermQuery(new Term(NestedPathFieldMapper.NAME, "parent")); + Query childFilter = new TermQuery(new Term(NestedPathFieldMapper.NAME, "child")); SortField sortField = indexFieldData.sortField(missingValue, sortMode, createNested(searcher, parentFilter, childFilter), reverse); Query query = new ConstantScoreQuery(parentFilter); Sort sort = new Sort(sortField); @@ -172,7 +173,7 @@ public void testNestedSorting() throws Exception { document.add(new StringField("filter_1", "T", Field.Store.NO)); docs.add(document); document = new Document(); - document.add(new StringField("__type", "parent", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "parent", Field.Store.NO)); document.add(new StringField("field1", "a", Field.Store.NO)); docs.add(document); writer.addDocuments(docs); @@ -192,7 +193,7 @@ public void testNestedSorting() throws Exception { document.add(new StringField("filter_1", "T", Field.Store.NO)); docs.add(document); document = new Document(); - document.add(new StringField("__type", "parent", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "parent", Field.Store.NO)); document.add(new StringField("field1", "b", Field.Store.NO)); docs.add(document); writer.addDocuments(docs); @@ -211,7 +212,7 @@ public void testNestedSorting() throws Exception { document.add(new StringField("filter_1", "T", Field.Store.NO)); docs.add(document); document = new Document(); - document.add(new StringField("__type", "parent", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "parent", Field.Store.NO)); document.add(new StringField("field1", "c", Field.Store.NO)); docs.add(document); writer.addDocuments(docs); @@ -230,7 +231,7 @@ public void testNestedSorting() throws Exception { document.add(new StringField("filter_1", "F", Field.Store.NO)); docs.add(document); document = new Document(); - document.add(new StringField("__type", "parent", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "parent", Field.Store.NO)); document.add(new StringField("field1", "d", Field.Store.NO)); docs.add(document); writer.addDocuments(docs); @@ -250,7 +251,7 @@ public void testNestedSorting() throws Exception { document.add(new StringField("filter_1", "F", Field.Store.NO)); docs.add(document); document = new Document(); - document.add(new StringField("__type", "parent", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "parent", Field.Store.NO)); document.add(new StringField("field1", "f", Field.Store.NO)); docs.add(document); writer.addDocuments(docs); @@ -269,14 +270,14 @@ public void testNestedSorting() throws Exception { document.add(new StringField("filter_1", "T", Field.Store.NO)); docs.add(document); document = new Document(); - document.add(new StringField("__type", "parent", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "parent", Field.Store.NO)); document.add(new StringField("field1", "g", Field.Store.NO)); docs.add(document); writer.addDocuments(docs); // This doc will not be included, because it doesn't have nested docs document = new Document(); - document.add(new StringField("__type", "parent", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "parent", Field.Store.NO)); document.add(new StringField("field1", "h", Field.Store.NO)); writer.addDocument(document); @@ -294,7 +295,7 @@ public void testNestedSorting() throws Exception { document.add(new StringField("filter_1", "F", Field.Store.NO)); docs.add(document); document = new Document(); - document.add(new StringField("__type", "parent", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "parent", Field.Store.NO)); document.add(new StringField("field1", "i", Field.Store.NO)); docs.add(document); writer.addDocuments(docs); @@ -316,7 +317,7 @@ public void testNestedSorting() throws Exception { reader = OpenSearchDirectoryReader.wrap(reader, new ShardId(indexService.index(), 0)); IndexSearcher searcher = new IndexSearcher(reader); PagedBytesIndexFieldData indexFieldData = getForField("field2"); - Query parentFilter = new TermQuery(new Term("__type", "parent")); + Query parentFilter = new TermQuery(new Term(NestedPathFieldMapper.NAME, "parent")); Query childFilter = Queries.not(parentFilter); BytesRefFieldComparatorSource nestedComparatorSource = new BytesRefFieldComparatorSource( indexFieldData, @@ -472,53 +473,52 @@ public void testMultiLevelNestedSorting() throws IOException { List book = new ArrayList<>(); Document document = new Document(); document.add(new TextField("chapters.paragraphs.header", "Paragraph 1", Field.Store.NO)); - document.add(new StringField("_type", "__chapters.paragraphs", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters.paragraphs", Field.Store.NO)); document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO)); document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 743)); document.add(new IntPoint("chapters.paragraphs.word_count", 743)); book.add(document); document = new Document(); document.add(new TextField("chapters.title", "chapter 3", Field.Store.NO)); - document.add(new StringField("_type", "__chapters", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters", Field.Store.NO)); document.add(new IntPoint("chapters.read_time_seconds", 400)); document.add(new NumericDocValuesField("chapters.read_time_seconds", 400)); book.add(document); document = new Document(); document.add(new TextField("chapters.paragraphs.header", "Paragraph 1", Field.Store.NO)); - document.add(new StringField("_type", "__chapters.paragraphs", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters.paragraphs", Field.Store.NO)); document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO)); document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 234)); document.add(new IntPoint("chapters.paragraphs.word_count", 234)); book.add(document); document = new Document(); document.add(new TextField("chapters.title", "chapter 2", Field.Store.NO)); - document.add(new StringField("_type", "__chapters", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters", Field.Store.NO)); document.add(new IntPoint("chapters.read_time_seconds", 200)); document.add(new NumericDocValuesField("chapters.read_time_seconds", 200)); book.add(document); document = new Document(); document.add(new TextField("chapters.paragraphs.header", "Paragraph 2", Field.Store.NO)); - document.add(new StringField("_type", "__chapters.paragraphs", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters.paragraphs", Field.Store.NO)); document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO)); document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 478)); document.add(new IntPoint("chapters.paragraphs.word_count", 478)); book.add(document); document = new Document(); document.add(new TextField("chapters.paragraphs.header", "Paragraph 1", Field.Store.NO)); - document.add(new StringField("_type", "__chapters.paragraphs", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters.paragraphs", Field.Store.NO)); document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO)); document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 849)); document.add(new IntPoint("chapters.paragraphs.word_count", 849)); book.add(document); document = new Document(); document.add(new TextField("chapters.title", "chapter 1", Field.Store.NO)); - document.add(new StringField("_type", "__chapters", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters", Field.Store.NO)); document.add(new IntPoint("chapters.read_time_seconds", 1400)); document.add(new NumericDocValuesField("chapters.read_time_seconds", 1400)); book.add(document); document = new Document(); document.add(new StringField("genre", "science fiction", Field.Store.NO)); - document.add(new StringField("_type", "_doc", Field.Store.NO)); document.add(new StringField("_id", "1", Field.Store.YES)); document.add(new NumericDocValuesField(PRIMARY_TERM_NAME, 0)); book.add(document); @@ -528,20 +528,19 @@ public void testMultiLevelNestedSorting() throws IOException { List book = new ArrayList<>(); Document document = new Document(); document.add(new TextField("chapters.paragraphs.header", "Introduction", Field.Store.NO)); - document.add(new StringField("_type", "__chapters.paragraphs", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters.paragraphs", Field.Store.NO)); document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO)); document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 76)); document.add(new IntPoint("chapters.paragraphs.word_count", 76)); book.add(document); document = new Document(); document.add(new TextField("chapters.title", "chapter 1", Field.Store.NO)); - document.add(new StringField("_type", "__chapters", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters", Field.Store.NO)); document.add(new IntPoint("chapters.read_time_seconds", 20)); document.add(new NumericDocValuesField("chapters.read_time_seconds", 20)); book.add(document); document = new Document(); document.add(new StringField("genre", "romance", Field.Store.NO)); - document.add(new StringField("_type", "_doc", Field.Store.NO)); document.add(new StringField("_id", "2", Field.Store.YES)); document.add(new NumericDocValuesField(PRIMARY_TERM_NAME, 0)); book.add(document); @@ -551,20 +550,19 @@ public void testMultiLevelNestedSorting() throws IOException { List book = new ArrayList<>(); Document document = new Document(); document.add(new TextField("chapters.paragraphs.header", "A bad dream", Field.Store.NO)); - document.add(new StringField("_type", "__chapters.paragraphs", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters.paragraphs", Field.Store.NO)); document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO)); document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 976)); document.add(new IntPoint("chapters.paragraphs.word_count", 976)); book.add(document); document = new Document(); document.add(new TextField("chapters.title", "The beginning of the end", Field.Store.NO)); - document.add(new StringField("_type", "__chapters", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters", Field.Store.NO)); document.add(new IntPoint("chapters.read_time_seconds", 1200)); document.add(new NumericDocValuesField("chapters.read_time_seconds", 1200)); book.add(document); document = new Document(); document.add(new StringField("genre", "horror", Field.Store.NO)); - document.add(new StringField("_type", "_doc", Field.Store.NO)); document.add(new StringField("_id", "3", Field.Store.YES)); document.add(new NumericDocValuesField(PRIMARY_TERM_NAME, 0)); book.add(document); @@ -574,47 +572,46 @@ public void testMultiLevelNestedSorting() throws IOException { List book = new ArrayList<>(); Document document = new Document(); document.add(new TextField("chapters.paragraphs.header", "macaroni", Field.Store.NO)); - document.add(new StringField("_type", "__chapters.paragraphs", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters.paragraphs", Field.Store.NO)); document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO)); document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 180)); document.add(new IntPoint("chapters.paragraphs.word_count", 180)); book.add(document); document = new Document(); document.add(new TextField("chapters.paragraphs.header", "hamburger", Field.Store.NO)); - document.add(new StringField("_type", "__chapters.paragraphs", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters.paragraphs", Field.Store.NO)); document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO)); document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 150)); document.add(new IntPoint("chapters.paragraphs.word_count", 150)); book.add(document); document = new Document(); document.add(new TextField("chapters.paragraphs.header", "tosti", Field.Store.NO)); - document.add(new StringField("_type", "__chapters.paragraphs", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters.paragraphs", Field.Store.NO)); document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO)); document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 120)); document.add(new IntPoint("chapters.paragraphs.word_count", 120)); book.add(document); document = new Document(); document.add(new TextField("chapters.title", "easy meals", Field.Store.NO)); - document.add(new StringField("_type", "__chapters", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters", Field.Store.NO)); document.add(new IntPoint("chapters.read_time_seconds", 800)); document.add(new NumericDocValuesField("chapters.read_time_seconds", 800)); book.add(document); document = new Document(); document.add(new TextField("chapters.paragraphs.header", "introduction", Field.Store.NO)); - document.add(new StringField("_type", "__chapters.paragraphs", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters.paragraphs", Field.Store.NO)); document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO)); document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 87)); document.add(new IntPoint("chapters.paragraphs.word_count", 87)); book.add(document); document = new Document(); document.add(new TextField("chapters.title", "introduction", Field.Store.NO)); - document.add(new StringField("_type", "__chapters", Field.Store.NO)); + document.add(new StringField(NestedPathFieldMapper.NAME, "chapters", Field.Store.NO)); document.add(new IntPoint("chapters.read_time_seconds", 10)); document.add(new NumericDocValuesField("chapters.read_time_seconds", 10)); book.add(document); document = new Document(); document.add(new StringField("genre", "cooking", Field.Store.NO)); - document.add(new StringField("_type", "_doc", Field.Store.NO)); document.add(new StringField("_id", "4", Field.Store.YES)); document.add(new NumericDocValuesField(PRIMARY_TERM_NAME, 0)); book.add(document); @@ -624,7 +621,6 @@ public void testMultiLevelNestedSorting() throws IOException { List book = new ArrayList<>(); Document document = new Document(); document.add(new StringField("genre", "unknown", Field.Store.NO)); - document.add(new StringField("_type", "_doc", Field.Store.NO)); document.add(new StringField("_id", "5", Field.Store.YES)); document.add(new NumericDocValuesField(PRIMARY_TERM_NAME, 0)); book.add(document); diff --git a/server/src/test/java/org/opensearch/index/shard/ShardSplittingQueryTests.java b/server/src/test/java/org/opensearch/index/shard/ShardSplittingQueryTests.java index b39ff0c9b97b3..04dcea210640c 100644 --- a/server/src/test/java/org/opensearch/index/shard/ShardSplittingQueryTests.java +++ b/server/src/test/java/org/opensearch/index/shard/ShardSplittingQueryTests.java @@ -52,9 +52,9 @@ import org.opensearch.cluster.routing.OperationRouting; import org.opensearch.common.settings.Settings; import org.opensearch.index.mapper.IdFieldMapper; +import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.mapper.RoutingFieldMapper; import org.opensearch.index.mapper.SeqNoFieldMapper; -import org.opensearch.index.mapper.TypeFieldMapper; import org.opensearch.index.mapper.Uid; import org.opensearch.test.OpenSearchTestCase; @@ -88,7 +88,7 @@ public void testSplitOnID() throws IOException { docs.add( Arrays.asList( new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES), - new StringField(TypeFieldMapper.NAME, "__nested", Field.Store.YES), + new StringField(NestedPathFieldMapper.NAME, "__nested", Field.Store.YES), new SortedNumericDocValuesField("shard_id", shardId) ) ); @@ -142,7 +142,7 @@ public void testSplitOnRouting() throws IOException { docs.add( Arrays.asList( new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES), - new StringField(TypeFieldMapper.NAME, "__nested", Field.Store.YES), + new StringField(NestedPathFieldMapper.NAME, "__nested", Field.Store.YES), new SortedNumericDocValuesField("shard_id", shardId) ) ); @@ -215,7 +215,7 @@ public void testSplitOnIdOrRouting() throws IOException { docs.add( Arrays.asList( new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES), - new StringField(TypeFieldMapper.NAME, "__nested", Field.Store.YES), + new StringField(NestedPathFieldMapper.NAME, "__nested", Field.Store.YES), new SortedNumericDocValuesField("shard_id", shardId) ) ); @@ -258,7 +258,7 @@ public void testSplitOnRoutingPartitioned() throws IOException { docs.add( Arrays.asList( new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES), - new StringField(TypeFieldMapper.NAME, "__nested", Field.Store.YES), + new StringField(NestedPathFieldMapper.NAME, "__nested", Field.Store.YES), new SortedNumericDocValuesField("shard_id", shardId) ) ); diff --git a/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java b/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java index 8123f044798bd..afcc6aa006500 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java @@ -41,6 +41,7 @@ import org.opensearch.index.mapper.Mapper; import org.opensearch.index.mapper.MapperParsingException; import org.opensearch.index.mapper.MetadataFieldMapper; +import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.mapper.RoutingFieldMapper; import org.opensearch.index.mapper.SeqNoFieldMapper; import org.opensearch.index.mapper.SourceFieldMapper; @@ -94,6 +95,7 @@ public Map getMetadataMappers() { IndexFieldMapper.NAME, DataStreamFieldMapper.NAME, SourceFieldMapper.NAME, + NestedPathFieldMapper.NAME, VersionFieldMapper.NAME, SeqNoFieldMapper.NAME, FieldNamesFieldMapper.NAME }; @@ -101,11 +103,7 @@ public Map getMetadataMappers() { public void testBuiltinMappers() { IndicesModule module = new IndicesModule(Collections.emptyList()); { - Version version = VersionUtils.randomVersionBetween( - random(), - Version.CURRENT.minimumIndexCompatibilityVersion(), - Version.CURRENT - ); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); assertFalse(module.getMapperRegistry().getMapperParsers().isEmpty()); assertFalse(module.getMapperRegistry().getMetadataMapperParsers(version).isEmpty()); Map metadataMapperParsers = module.getMapperRegistry() @@ -116,6 +114,14 @@ public void testBuiltinMappers() { assertEquals(EXPECTED_METADATA_FIELDS[i++], field); } } + { + Version version = VersionUtils.randomVersionBetween( + random(), + Version.V_1_0_0, + VersionUtils.getPreviousVersion(Version.V_2_0_0) + ); + assertEquals(EXPECTED_METADATA_FIELDS.length - 1, module.getMapperRegistry().getMetadataMapperParsers(version).size()); + } } public void testBuiltinWithPlugins() { diff --git a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java index 8dd156dfcd0d2..da984084321e1 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java @@ -66,6 +66,7 @@ import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.Mapper; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.shard.IllegalIndexShardStateException; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; @@ -567,7 +568,13 @@ public void testIsMetadataField() { final Version randVersion = VersionUtils.randomIndexCompatibleVersion(random()); assertFalse(indicesService.isMetadataField(randVersion, randomAlphaOfLengthBetween(10, 15))); for (String builtIn : IndicesModule.getBuiltInMetadataFields()) { - assertTrue(indicesService.isMetadataField(randVersion, builtIn)); + if (NestedPathFieldMapper.NAME.equals(builtIn) && randVersion.before(Version.V_2_0_0)) { + continue; // nested field mapper does not exist prior to 2.0 + } + assertTrue( + "Expected " + builtIn + " to be a metadata field for version " + randVersion, + indicesService.isMetadataField(randVersion, builtIn) + ); } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregatorTests.java index 8ab0cc0023346..65ce02333bae0 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregatorTests.java @@ -57,9 +57,9 @@ import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.mapper.SeqNoFieldMapper; -import org.opensearch.index.mapper.TypeFieldMapper; import org.opensearch.index.mapper.Uid; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.script.MockScriptEngine; @@ -343,15 +343,15 @@ public void testResetRootDocId() throws Exception { // 1 segment with, 1 root document, with 3 nested sub docs Document document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("1"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_field", NestedPathFieldMapper.Defaults.FIELD_TYPE)); documents.add(document); document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("1"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_field", NestedPathFieldMapper.Defaults.FIELD_TYPE)); documents.add(document); document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("1"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_field", NestedPathFieldMapper.Defaults.FIELD_TYPE)); documents.add(document); document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("1"), IdFieldMapper.Defaults.FIELD_TYPE)); @@ -365,7 +365,7 @@ public void testResetRootDocId() throws Exception { // 1 document, with 1 nested subdoc document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("2"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_field", NestedPathFieldMapper.Defaults.FIELD_TYPE)); documents.add(document); document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("2"), IdFieldMapper.Defaults.FIELD_TYPE)); @@ -376,7 +376,7 @@ public void testResetRootDocId() throws Exception { // and 1 document, with 1 nested subdoc document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("3"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_field", NestedPathFieldMapper.Defaults.FIELD_TYPE)); documents.add(document); document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("3"), IdFieldMapper.Defaults.FIELD_TYPE)); @@ -613,13 +613,13 @@ public void testPreGetChildLeafCollectors() throws IOException { List documents = new ArrayList<>(); Document document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("1"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_field", NestedPathFieldMapper.Defaults.FIELD_TYPE)); document.add(new SortedDocValuesField("key", new BytesRef("key1"))); document.add(new SortedDocValuesField("value", new BytesRef("a1"))); documents.add(document); document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("1"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_field", NestedPathFieldMapper.Defaults.FIELD_TYPE)); document.add(new SortedDocValuesField("key", new BytesRef("key2"))); document.add(new SortedDocValuesField("value", new BytesRef("b1"))); documents.add(document); @@ -633,13 +633,13 @@ public void testPreGetChildLeafCollectors() throws IOException { document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("2"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_field", NestedPathFieldMapper.Defaults.FIELD_TYPE)); document.add(new SortedDocValuesField("key", new BytesRef("key1"))); document.add(new SortedDocValuesField("value", new BytesRef("a2"))); documents.add(document); document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("2"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_field", NestedPathFieldMapper.Defaults.FIELD_TYPE)); document.add(new SortedDocValuesField("key", new BytesRef("key2"))); document.add(new SortedDocValuesField("value", new BytesRef("b2"))); documents.add(document); @@ -653,13 +653,13 @@ public void testPreGetChildLeafCollectors() throws IOException { document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("3"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_field", NestedPathFieldMapper.Defaults.FIELD_TYPE)); document.add(new SortedDocValuesField("key", new BytesRef("key1"))); document.add(new SortedDocValuesField("value", new BytesRef("a3"))); documents.add(document); document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("3"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_field", NestedPathFieldMapper.Defaults.FIELD_TYPE)); document.add(new SortedDocValuesField("key", new BytesRef("key2"))); document.add(new SortedDocValuesField("value", new BytesRef("b3"))); documents.add(document); @@ -863,7 +863,7 @@ public static CheckedConsumer buildResellerData( } Document document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(p)), IdFieldMapper.Defaults.FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_field", NestedPathFieldMapper.Defaults.FIELD_TYPE)); document.add(sequenceIDFields.primaryTerm); document.add(new SortedNumericDocValuesField("product_id", p)); documents.add(document); @@ -891,7 +891,7 @@ private static double[] generateDocuments(List documents, int numNeste for (int nested = 0; nested < numNestedDocs; nested++) { Document document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(id)), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__" + path, TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, path, NestedPathFieldMapper.Defaults.FIELD_TYPE)); long value = randomNonNegativeLong() % 10000; document.add(new SortedNumericDocValuesField(fieldName, value)); documents.add(document); @@ -906,7 +906,7 @@ private List generateBook(String id, String[] authors, int[] numPages) for (int numPage : numPages) { Document document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId(id), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_chapters", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_chapters", NestedPathFieldMapper.Defaults.FIELD_TYPE)); document.add(new SortedNumericDocValuesField("num_pages", numPage)); documents.add(document); } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregatorTests.java index cf0e31bc63467..61df6d01aef64 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregatorTests.java @@ -42,9 +42,9 @@ import org.apache.lucene.store.Directory; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.mapper.SeqNoFieldMapper; -import org.opensearch.index.mapper.TypeFieldMapper; import org.opensearch.index.mapper.Uid; import org.opensearch.search.aggregations.AggregationBuilder; import org.opensearch.search.aggregations.AggregatorTestCase; @@ -133,7 +133,7 @@ public void testMaxFromParentDocs() throws IOException { document.add( new Field(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(i)), IdFieldMapper.Defaults.NESTED_FIELD_TYPE) ); - document.add(new Field(TypeFieldMapper.NAME, "__" + NESTED_OBJECT, TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, NESTED_OBJECT, NestedPathFieldMapper.Defaults.FIELD_TYPE)); documents.add(document); expectedNestedDocs++; } @@ -193,7 +193,7 @@ public void testFieldAlias() throws IOException { document.add( new Field(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(i)), IdFieldMapper.Defaults.NESTED_FIELD_TYPE) ); - document.add(new Field(TypeFieldMapper.NAME, "__" + NESTED_OBJECT, TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, NESTED_OBJECT, NestedPathFieldMapper.Defaults.FIELD_TYPE)); documents.add(document); } Document document = new Document(); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorTests.java index 9a9a03e715644..678bc2fc6f536 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorTests.java @@ -60,11 +60,11 @@ import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.mapper.RangeFieldMapper; import org.opensearch.index.mapper.RangeType; import org.opensearch.index.mapper.SeqNoFieldMapper; -import org.opensearch.index.mapper.TypeFieldMapper; import org.opensearch.index.mapper.Uid; import org.opensearch.search.SearchHit; import org.opensearch.search.aggregations.Aggregation; @@ -551,7 +551,7 @@ private List generateDocsWithNested(String id, int value, int[] nested for (int nestedValue : nestedValues) { Document document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId(id), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_object", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_object", NestedPathFieldMapper.Defaults.FIELD_TYPE)); document.add(new SortedNumericDocValuesField("nested_value", nestedValue)); documents.add(document); } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java index a9e819e7cbaf2..cb47bf6cba6a9 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java @@ -66,11 +66,11 @@ import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.mapper.RangeFieldMapper; import org.opensearch.index.mapper.RangeType; import org.opensearch.index.mapper.SeqNoFieldMapper; -import org.opensearch.index.mapper.TypeFieldMapper; import org.opensearch.index.mapper.Uid; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.QueryBuilders; @@ -1464,7 +1464,7 @@ private List generateDocsWithNested(String id, int value, int[] nested for (int nestedValue : nestedValues) { Document document = new Document(); document.add(new Field(IdFieldMapper.NAME, Uid.encodeId(id), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_object", TypeFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(NestedPathFieldMapper.NAME, "nested_object", NestedPathFieldMapper.Defaults.FIELD_TYPE)); document.add(new SortedNumericDocValuesField("nested_value", nestedValue)); documents.add(document); } diff --git a/server/src/test/java/org/opensearch/search/sort/FieldSortBuilderTests.java b/server/src/test/java/org/opensearch/search/sort/FieldSortBuilderTests.java index 44d48e9073e23..bcf458c5028cd 100644 --- a/server/src/test/java/org/opensearch/search/sort/FieldSortBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/sort/FieldSortBuilderTests.java @@ -63,8 +63,8 @@ import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.mapper.NumberFieldMapper; -import org.opensearch.index.mapper.TypeFieldMapper; import org.opensearch.index.query.MatchNoneQueryBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; @@ -325,7 +325,7 @@ public void testBuildNested() throws IOException { comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); nested = comparatorSource.nested(); assertNotNull(nested); - assertEquals(new TermQuery(new Term(TypeFieldMapper.NAME, "__path")), nested.getInnerQuery()); + assertEquals(new TermQuery(new Term(NestedPathFieldMapper.NAME, "path")), nested.getInnerQuery()); sortBuilder = new FieldSortBuilder("fieldName").setNestedPath("path") .setNestedFilter(QueryBuilders.termQuery(MAPPED_STRING_FIELDNAME, "value")); diff --git a/server/src/test/java/org/opensearch/search/sort/GeoDistanceSortBuilderTests.java b/server/src/test/java/org/opensearch/search/sort/GeoDistanceSortBuilderTests.java index c14deb6add083..87adbd9532665 100644 --- a/server/src/test/java/org/opensearch/search/sort/GeoDistanceSortBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/sort/GeoDistanceSortBuilderTests.java @@ -48,7 +48,7 @@ import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.opensearch.index.mapper.GeoPointFieldMapper; import org.opensearch.index.mapper.MappedFieldType; -import org.opensearch.index.mapper.TypeFieldMapper; +import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.query.GeoValidationMethod; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.MatchNoneQueryBuilder; @@ -552,7 +552,7 @@ public void testBuildNested() throws IOException { comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); nested = comparatorSource.nested(); assertNotNull(nested); - assertEquals(new TermQuery(new Term(TypeFieldMapper.NAME, "__path")), nested.getInnerQuery()); + assertEquals(new TermQuery(new Term(NestedPathFieldMapper.NAME, "path")), nested.getInnerQuery()); sortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0).setNestedPath("path") .setNestedFilter(QueryBuilders.matchAllQuery()); diff --git a/server/src/test/java/org/opensearch/search/sort/ScriptSortBuilderTests.java b/server/src/test/java/org/opensearch/search/sort/ScriptSortBuilderTests.java index c1e430abbe3d2..53e15c1c094ab 100644 --- a/server/src/test/java/org/opensearch/search/sort/ScriptSortBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/sort/ScriptSortBuilderTests.java @@ -43,7 +43,7 @@ import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.opensearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; import org.opensearch.index.fielddata.fieldcomparator.DoubleValuesComparatorSource; -import org.opensearch.index.mapper.TypeFieldMapper; +import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.query.MatchNoneQueryBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; @@ -344,7 +344,7 @@ public void testBuildNested() throws IOException { comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); nested = comparatorSource.nested(); assertNotNull(nested); - assertEquals(new TermQuery(new Term(TypeFieldMapper.NAME, "__path")), nested.getInnerQuery()); + assertEquals(new TermQuery(new Term(NestedPathFieldMapper.NAME, "path")), nested.getInnerQuery()); sortBuilder = new ScriptSortBuilder(mockScript(MOCK_SCRIPT_NAME), ScriptSortType.NUMBER).setNestedPath("path") .setNestedFilter(QueryBuilders.matchAllQuery()); From 9da0a867b8a370d3340d4f276eb079fafedcb14f Mon Sep 17 00:00:00 2001 From: Poojita Raj Date: Tue, 26 Apr 2022 18:13:15 -0700 Subject: [PATCH 127/653] updated release note (#3044) Signed-off-by: Poojita Raj --- .../opensearch.release-notes-2.0.0-rc1.md | 628 ++++++++++++++++++ 1 file changed, 628 insertions(+) create mode 100644 release-notes/opensearch.release-notes-2.0.0-rc1.md diff --git a/release-notes/opensearch.release-notes-2.0.0-rc1.md b/release-notes/opensearch.release-notes-2.0.0-rc1.md new file mode 100644 index 0000000000000..5171424203c62 --- /dev/null +++ b/release-notes/opensearch.release-notes-2.0.0-rc1.md @@ -0,0 +1,628 @@ +## 2022-04-26 Version 2.0.0-rc1 Release Notes + + +### Breaking Changes in 2.0 + +#### Remove Mapping types + +* Remove type mapping from document index API ([#2026](https://github.com/opensearch-project/OpenSearch/pull/2026)) +* [Remove] Type mapping parameter from document update API ([#2204](https://github.com/opensearch-project/OpenSearch/pull/2204)) +* [Remove] Types from DocWrite Request and Response ([#2239](https://github.com/opensearch-project/OpenSearch/pull/2239)) +* [Remove] Types from GET/MGET ([#2168](https://github.com/opensearch-project/OpenSearch/pull/2168)) +* [Remove] types from SearchHit and Explain API ([#2205](https://github.com/opensearch-project/OpenSearch/pull/2205)) +* [Remove] type support from Bulk API ([#2215](https://github.com/opensearch-project/OpenSearch/pull/2215)) +* Remove type end-points from no-op bulk and search action ([#2261](https://github.com/opensearch-project/OpenSearch/pull/2261)) +* Remove type end-points from search and related APIs ([#2263](https://github.com/opensearch-project/OpenSearch/pull/2263)) +* [Remove] Type mapping end-points from RestMultiSearchTemplateAction ([#2433](https://github.com/opensearch-project/OpenSearch/pull/2433)) +* Removes type mappings from mapping APIs ([#2238](https://github.com/opensearch-project/OpenSearch/pull/2238)) +* Remove type end-points from count action ([#2379](https://github.com/opensearch-project/OpenSearch/pull/2379)) +* Remove type from validate query API ([#2255](https://github.com/opensearch-project/OpenSearch/pull/2255)) +* [Remove] Type parameter from TermVectors API ([#2104](https://github.com/opensearch-project/OpenSearch/pull/2104)) +* [Remove] types from rest-api-spec endpoints (#2689) ([#2698](https://github.com/opensearch-project/OpenSearch/pull/2698)) +* [Type removal] Remove deprecation warning on use of _type in doc scripts (#2564) ([#2568](https://github.com/opensearch-project/OpenSearch/pull/2568)) +* [Remove] Types from PutIndexTemplateRequest and builder to reduce mapping to a string ([#2510](https://github.com/opensearch-project/OpenSearch/pull/2510)) +* [Remove] Type from Percolate query API ([#2490](https://github.com/opensearch-project/OpenSearch/pull/2490)) +* [Remove] types from CreateIndexRequest and companion Builder's mapping method ([#2498](https://github.com/opensearch-project/OpenSearch/pull/2498)) +* [Remove] Type from PutIndexTemplateRequest and PITRB ([#2497](https://github.com/opensearch-project/OpenSearch/pull/2497)) +* [Remove] Type metadata from ingest documents ([#2491](https://github.com/opensearch-project/OpenSearch/pull/2491)) +* [Remove] type from CIR.mapping and CIRB.mapping ([#2478](https://github.com/opensearch-project/OpenSearch/pull/2478)) +* [Remove] types based addMapping method from CreateIndexRequest and Builder ([#2460](https://github.com/opensearch-project/OpenSearch/pull/2460)) +* [Remove] type from TaskResults index and IndexMetadata.getMappings ([#2469](https://github.com/opensearch-project/OpenSearch/pull/2469)) +* [Remove] Type query ([#2448](https://github.com/opensearch-project/OpenSearch/pull/2448)) +* [Remove] Type from TermsLookUp ([#2459](https://github.com/opensearch-project/OpenSearch/pull/2459)) +* [Remove] types from Uid and remaining types/Uid from translog ([#2450](https://github.com/opensearch-project/OpenSearch/pull/2450)) +* [Remove] types from translog ([#2439](https://github.com/opensearch-project/OpenSearch/pull/2439)) +* [Remove] Multiple Types from IndexTemplateMetadata ([#2400](https://github.com/opensearch-project/OpenSearch/pull/2400)) +* Remove inclue_type_name parameter from rest api spec ([#2410](https://github.com/opensearch-project/OpenSearch/pull/2410)) +* [Remove] include_type_name from HLRC ([#2397](https://github.com/opensearch-project/OpenSearch/pull/2397)) +* [Remove] Type mappings from GeoShapeQueryBuilder ([#2322](https://github.com/opensearch-project/OpenSearch/pull/2322)) +* [Remove] types from PutMappingRequest ([#2335](https://github.com/opensearch-project/OpenSearch/pull/2335)) +* [Remove] deprecated getMapping API from IndicesClient ([#2262](https://github.com/opensearch-project/OpenSearch/pull/2262)) +* [Remove] remaining type usage in Client and AbstractClient ([#2258](https://github.com/opensearch-project/OpenSearch/pull/2258)) +* [Remove] Type from Client.prepare(Index,Delete,Update) ([#2253](https://github.com/opensearch-project/OpenSearch/pull/2253)) +* [Remove] Type Specific Index Stats ([#2198](https://github.com/opensearch-project/OpenSearch/pull/2198)) +* [Remove] Type from Search Internals ([#2109](https://github.com/opensearch-project/OpenSearch/pull/2109)) + + +#### Upgrades + +* [Upgrade] 1.2 BWC to Lucene 8.10.1 ([#1460](https://github.com/opensearch-project/OpenSearch/pull/1460)) +* [Upgrade] Lucene 9.1 release (#2560) ([#2565](https://github.com/opensearch-project/OpenSearch/pull/2565)) +* [Upgrade] Lucene 9.1.0-snapshot-ea989fe8f30 ([#2487](https://github.com/opensearch-project/OpenSearch/pull/2487)) +* [Upgrade] Lucene 9.0.0 release ([#1109](https://github.com/opensearch-project/OpenSearch/pull/1109)) +* Set target and source compatibility to 11, required by Lucene 9. ([#2407](https://github.com/opensearch-project/OpenSearch/pull/2407)) +* Upgrade to Lucene 8.10.1 ([#1440](https://github.com/opensearch-project/OpenSearch/pull/1440)) +* Upgrade to Lucene 8.9 ([#1080](https://github.com/opensearch-project/OpenSearch/pull/1080)) +* Update lucene version to 8.8.2 ([#557](https://github.com/opensearch-project/OpenSearch/pull/557)) +* Support Gradle 7. Fixing 'eclipse' plugin dependencies ([#1648](https://github.com/opensearch-project/OpenSearch/pull/1648)) +* Update to Gradle 7.3.3 ([#1803](https://github.com/opensearch-project/OpenSearch/pull/1803)) +* Support Gradle 7. More reliable tasks dependencies for Maven plugins publishing ([#1630](https://github.com/opensearch-project/OpenSearch/pull/1630)) +* Support Gradle 7. Fixing publishing to Maven Local for plugins ([#1624](https://github.com/opensearch-project/OpenSearch/pull/1624)) +* Support Gradle 7 ([#1609](https://github.com/opensearch-project/OpenSearch/pull/1609)) + +#### Deprecations + +* [Remove] Deprecated Synced Flush API ([#1761](https://github.com/opensearch-project/OpenSearch/pull/1761)) +* Remove deprecated search.remote settings ([#1870](https://github.com/opensearch-project/OpenSearch/pull/1870)) +* [Remove] Default Mapping ([#2151](https://github.com/opensearch-project/OpenSearch/pull/2151)) +* Remove Deprecated SimpleFS ([#1639](https://github.com/opensearch-project/OpenSearch/pull/1639)) +* [Remove] Deprecated Zen1 Discovery ([#1216](https://github.com/opensearch-project/OpenSearch/pull/1216)) +* Remove LegacyESVersion.V_6_8_x constants ([#1869](https://github.com/opensearch-project/OpenSearch/pull/1869)) +* Remove LegacyESVersion.V_6_7_x constants ([#1807](https://github.com/opensearch-project/OpenSearch/pull/1807)) +* Remove LegacyESVersion.V_6_6_x constants ([#1804](https://github.com/opensearch-project/OpenSearch/pull/1804)) +* Remove LegacyESVersion.V_6_5_x constants ([#1794](https://github.com/opensearch-project/OpenSearch/pull/1794)) +* Remove deprecated transport client ([#1781](https://github.com/opensearch-project/OpenSearch/pull/1781)) +* Remove LegacyVersion.v6.4.x constants ([#1787](https://github.com/opensearch-project/OpenSearch/pull/1787)) +* Remove LegacyESVersion.V_6_3_x constants ([#1691](https://github.com/opensearch-project/OpenSearch/pull/1691)) +* Remove LegacyESVersion.V_6_2_x constants ([#1686](https://github.com/opensearch-project/OpenSearch/pull/1686)) +* Remove LegacyESVersion.V_6_1_x constants ([#1681](https://github.com/opensearch-project/OpenSearch/pull/1681)) +* Remove 6.0.* version constants ([#1658](https://github.com/opensearch-project/OpenSearch/pull/1658)) +* [Remove] 6x skip from yml ([#2153](https://github.com/opensearch-project/OpenSearch/pull/2153)) + +### Security Fixes + +* [CVE] Upgrade dependencies for Azure related plugins to mitigate CVEs ([#688](https://github.com/opensearch-project/OpenSearch/pull/688)) +* [CVE] Upgrade dependencies to mitigate CVEs ([#657](https://github.com/opensearch-project/OpenSearch/pull/657)) +* [CVE-2018-11765] Upgrade hadoop dependencies for hdfs plugin ([#654](https://github.com/opensearch-project/OpenSearch/pull/654)) +* [CVE-2020-7692] Upgrade google-oauth clients for goolge cloud plugins ([#662](https://github.com/opensearch-project/OpenSearch/pull/662)) +* [CVE-2020-36518] Update jackson-databind to 2.13.2.2 (#2599) ([#2647](https://github.com/opensearch-project/OpenSearch/pull/2647)) +* Remove old ES libraries used in reindex due to CVEs ([#1359](https://github.com/opensearch-project/OpenSearch/pull/1359)) + +### Features/Enhancements + +* Allowing custom folder name for plugin installation ([#848](https://github.com/opensearch-project/OpenSearch/pull/848)) +* A CLI tool to assist during an upgrade to OpenSearch. ([#846](https://github.com/opensearch-project/OpenSearch/pull/846)) +* Enable adding experimental features through sandbox modules ([#691](https://github.com/opensearch-project/OpenSearch/pull/691)) +* Rank feature - unknown field linear ([#983](https://github.com/opensearch-project/OpenSearch/pull/983)) +* [FEATURE] Add OPENSEARCH_JAVA_HOME env to override JAVA_HOME ([#2001](https://github.com/opensearch-project/OpenSearch/pull/2001)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Ingest APIs and Script APIs (#2682) ([#2891](https://github.com/opensearch-project/OpenSearch/pull/2891)) +* Change deprecation message for API parameter value 'master_node' of parameter 'metric' (#2880) ([#2882](https://github.com/opensearch-project/OpenSearch/pull/2882)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Snapshot APIs (#2680) ([#2871](https://github.com/opensearch-project/OpenSearch/pull/2871)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Index Template APIs (#2678) ([#2867](https://github.com/opensearch-project/OpenSearch/pull/2867)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Index APIs except index template APIs (#2660) ([#2771](https://github.com/opensearch-project/OpenSearch/pull/2771)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Cluster APIs (#2658) ([#2755](https://github.com/opensearch-project/OpenSearch/pull/2755)) +* [Backport 2.0] Add request parameter 'cluster_manager_timeout' as the alternative for 'master_timeout', and deprecate 'master_timeout' - in CAT APIs ([#2717](https://github.com/opensearch-project/OpenSearch/pull/2717)) +* Add 'cluster_manager_node' into ClusterState Metric as an alternative to 'master_node' ([#2415](https://github.com/opensearch-project/OpenSearch/pull/2415)) +* Add a new node role 'cluster_manager' as the alternative for 'master' role and deprecate 'master' role ([#2424](https://github.com/opensearch-project/OpenSearch/pull/2424)) +* Replace 'master' with 'cluster_manager' in 'GET Cat Nodes' API ([#2441](https://github.com/opensearch-project/OpenSearch/pull/2441)) +* Replace 'discovered_master' with 'discovered_cluster_manager' in 'GET Cat Health' API ([#2438](https://github.com/opensearch-project/OpenSearch/pull/2438)) +* Add a field discovered_cluster_manager in get cluster health api ([#2437](https://github.com/opensearch-project/OpenSearch/pull/2437)) +* Add request parameter 'cluster_manager_timeout' as the alternative for 'master_timeout', and deprecate 'master_timeout' - in CAT Nodes API ([#2435](https://github.com/opensearch-project/OpenSearch/pull/2435)) +* Add a new REST API endpoint 'GET _cat/cluster_manager' as the replacement of 'GET _cat/master' ([#2404](https://github.com/opensearch-project/OpenSearch/pull/2404)) +* Deprecate setting 'cluster.no_master_block' and introduce the alternative setting 'cluster.no_cluster_manager_block' ([#2453](https://github.com/opensearch-project/OpenSearch/pull/2453)) +* Deprecate setting 'cluster.service.slow_master_task_logging_threshold' and introduce the alternative setting 'cluster.service.slow_cluster_manager_task_logging_threshold' ([#2451](https://github.com/opensearch-project/OpenSearch/pull/2451)) +* Deprecate setting 'cluster.initial_master_nodes' and introduce the alternative setting 'cluster.initial_cluster_manager_nodes' ([#2463](https://github.com/opensearch-project/OpenSearch/pull/2463)) +* Replace remaining 'blacklist' with 'denylist' in internal class and method names (#2784) ([#2813](https://github.com/opensearch-project/OpenSearch/pull/2813)) +* Centralize codes related to 'master_timeout' deprecation for eaiser removal - in CAT Nodes API (#2670) ([#2696](https://github.com/opensearch-project/OpenSearch/pull/2696)) +* Make Rest-High-Rest-Level tests allow deprecation warning temporarily, during deprecation of request parameter 'master_timeout' (#2702) ([#2741](https://github.com/opensearch-project/OpenSearch/pull/2741)) +* Replaced "master" terminology in Log message (#2575) ([#2594](https://github.com/opensearch-project/OpenSearch/pull/2594)) +* Deprecate setting 'reindex.remote.whitelist' and introduce the alternative setting 'reindex.remote.allowlist' ([#2221](https://github.com/opensearch-project/OpenSearch/pull/2221)) +* Replace exclusionary words whitelist and blacklist in the places that won't impact backwards compatibility ([#2178](https://github.com/opensearch-project/OpenSearch/pull/2178)) +* Support for geo_bounding_box queries on geo_shape fields ([#2506](https://github.com/opensearch-project/OpenSearch/pull/2506)) +* Support for geo_distance queries on geo_shape fields ([#2516](https://github.com/opensearch-project/OpenSearch/pull/2516)) +* Add '_name' field support to score functions and provide it back in explanation response ([#2244](https://github.com/opensearch-project/OpenSearch/pull/2244)) +* Add support of SOCKS proxies for S3 repository ([#2160](https://github.com/opensearch-project/OpenSearch/pull/2160)) +* Case Insensitive Support in Regexp Interval ([#2237](https://github.com/opensearch-project/OpenSearch/pull/2237)) +* Support unordered non-overlapping intervals ([#2103](https://github.com/opensearch-project/OpenSearch/pull/2103)) +* Support _first and _last parameter for missing bucket ordering in composite aggregation ([#1942](https://github.com/opensearch-project/OpenSearch/pull/1942)) +* Concurrent Searching (Experimental): modify profiling implementation to support concurrent data collection ([#1673](https://github.com/opensearch-project/OpenSearch/pull/1673)) +* Changes to support retrieval of operations from translog based on specified range ([#1210](https://github.com/opensearch-project/OpenSearch/pull/1210)) +* Support for translog pruning based on retention leases ([#1038](https://github.com/opensearch-project/OpenSearch/pull/1038)) +* Support for bwc tests for plugins ([#1051](https://github.com/opensearch-project/OpenSearch/pull/1051)) +* Part 1: Support for cancel_after_timeinterval parameter in search and msearch request ([#986](https://github.com/opensearch-project/OpenSearch/pull/986)) +* alt bash path support ([#1047](https://github.com/opensearch-project/OpenSearch/pull/1047)) +* Support Data Streams in OpenSearch ([#690](https://github.com/opensearch-project/OpenSearch/pull/690)) +* Support for Heap after GC stats (correction after backport to 1.2.0) ([#1315](https://github.com/opensearch-project/OpenSearch/pull/1315)) +* Support for Heap after GC stats ([#1265](https://github.com/opensearch-project/OpenSearch/pull/1265)) +* Add deprecated API for creating History Ops Snapshot from translog (#2886) ([#2917](https://github.com/opensearch-project/OpenSearch/pull/2917)) +* Introduce QueryPhaseSearcher extension point (SearchPlugin) ([#1931](https://github.com/opensearch-project/OpenSearch/pull/1931)) +* Add default for EnginePlugin.getEngineFactory ([#2419](https://github.com/opensearch-project/OpenSearch/pull/2419)) +* Add valuesField in PercentilesAggregationBuilder streamInput constructor ([#2308](https://github.com/opensearch-project/OpenSearch/pull/2308)) +* Reintroduce negative epoch_millis #1991 ([#2232](https://github.com/opensearch-project/OpenSearch/pull/2232)) +* Install plugin command help ([#2193](https://github.com/opensearch-project/OpenSearch/pull/2193)) +* Always use Lucene index in peer recovery ([#2077](https://github.com/opensearch-project/OpenSearch/pull/2077)) +* Add Factory to enable Lucene ConcatenateGraphFilter (#1278) ([#2152](https://github.com/opensearch-project/OpenSearch/pull/2152)) +* Add proxy settings for GCS repository ([#2096](https://github.com/opensearch-project/OpenSearch/pull/2096)) +* Add proxy username and password settings for Azure repository ([#2098](https://github.com/opensearch-project/OpenSearch/pull/2098)) +* Add regexp interval source ([#1917](https://github.com/opensearch-project/OpenSearch/pull/1917)) +* Delay the request size calculation until required by the indexing pressure framework ([#1592](https://github.com/opensearch-project/OpenSearch/pull/1592)) +* Enabling Sort Optimization to make use of Lucene ([#1974](https://github.com/opensearch-project/OpenSearch/pull/1974)) +* Add max_expansions option to wildcard interval ([#1916](https://github.com/opensearch-project/OpenSearch/pull/1916)) +* Prefer adaptive replica selection over awareness attribute based routing ([#1107](https://github.com/opensearch-project/OpenSearch/pull/1107)) +* Prioritize primary shard movement during shard allocation ([#1445](https://github.com/opensearch-project/OpenSearch/pull/1445)) +* Enforce soft deletes ([#1903](https://github.com/opensearch-project/OpenSearch/pull/1903)) +* Make SortBuilders pluggable ([#1856](https://github.com/opensearch-project/OpenSearch/pull/1856)) +* Use try-with-resources with MockLogAppender ([#1595](https://github.com/opensearch-project/OpenSearch/pull/1595)) +* Bridging the gap in network overhead measurement in the profiler ([#1360](https://github.com/opensearch-project/OpenSearch/pull/1360)) +* Adding a cancelled field to tell if a cancellable task is cancelled ([#1732](https://github.com/opensearch-project/OpenSearch/pull/1732)) +* Avoid logging duplicate deprecation warnings multiple times ([#1660](https://github.com/opensearch-project/OpenSearch/pull/1660)) +* Added more detailed logging for SSLHandshakeException ([#1602](https://github.com/opensearch-project/OpenSearch/pull/1602)) +* Rename field_masking_span to span_field_masking ([#1606](https://github.com/opensearch-project/OpenSearch/pull/1606)) +* Giving informative error messages for double slashes in API call URLs ([#1568](https://github.com/opensearch-project/OpenSearch/pull/1568)) +* Renaming slave to replica in filebeat-6.0.template.json file. ([#1569](https://github.com/opensearch-project/OpenSearch/pull/1569)) +* Enable RestHighLevel-Client to set parameter require_alias for bulk index and reindex requests ([#1533](https://github.com/opensearch-project/OpenSearch/pull/1533)) +* Improve leader node-left logging to indicate timeout/coordination state rejection ([#1584](https://github.com/opensearch-project/OpenSearch/pull/1584)) +* Added logic to allow {dot} files on startup ([#1437](https://github.com/opensearch-project/OpenSearch/pull/1437)) +* remove codeQL warning about implicit narrowing conversion in compound assignment ([#1403](https://github.com/opensearch-project/OpenSearch/pull/1403)) +* Make TranslogDeletionPolicy abstract for extension ([#1456](https://github.com/opensearch-project/OpenSearch/pull/1456)) +* Remove deprecated settings and logic for translog pruning by retention lease. ([#1416](https://github.com/opensearch-project/OpenSearch/pull/1416)) +* Adjust CodeCache size to eliminate JVM warnings (and crashes) ([#1426](https://github.com/opensearch-project/OpenSearch/pull/1426)) +* Add extension point for custom TranslogDeletionPolicy in EnginePlugin. ([#1404](https://github.com/opensearch-project/OpenSearch/pull/1404)) +* Update node attribute check to version update (1.2) check for shard indexing pressure serialization. ([#1395](https://github.com/opensearch-project/OpenSearch/pull/1395)) +* Add EngineConfig extensions to EnginePlugin ([#1387](https://github.com/opensearch-project/OpenSearch/pull/1387)) +* Add Shard Level Indexing Pressure ([#1336](https://github.com/opensearch-project/OpenSearch/pull/1336)) +* Making GeneralScriptException an Implementation of OpensearchWrapperException ([#1066](https://github.com/opensearch-project/OpenSearch/pull/1066)) +* Handle shard over allocation during partial zone/rack or independent node failures ([#1149](https://github.com/opensearch-project/OpenSearch/pull/1149)) +* Introduce FS Health HEALTHY threshold to fail stuck node ([#1167](https://github.com/opensearch-project/OpenSearch/pull/1167)) +* Drop mocksocket in favour of custom security manager checks (tests only) ([#1205](https://github.com/opensearch-project/OpenSearch/pull/1205)) +* Improving the Grok circular reference check to prevent stack overflow ([#1079](https://github.com/opensearch-project/OpenSearch/pull/1079)) +* Introduce replaceRoutes() method and 2 new constructors to RestHandler.java ([#947](https://github.com/opensearch-project/OpenSearch/pull/947)) +* Fail fast when BytesRestResponse ctor throws exception ([#923](https://github.com/opensearch-project/OpenSearch/pull/923)) +* Restricting logs permissions ([#966](https://github.com/opensearch-project/OpenSearch/pull/966)) +* Avoid override of routes() in BaseRestHandler to respect the default behavior defined in RestHandler ([#889](https://github.com/opensearch-project/OpenSearch/pull/889)) +* Replacing docs-beta links with /docs ([#957](https://github.com/opensearch-project/OpenSearch/pull/957)) +* Adding broken links checker ([#877](https://github.com/opensearch-project/OpenSearch/pull/877)) +* Pass interceptor to super constructor ([#876](https://github.com/opensearch-project/OpenSearch/pull/876)) +* Add 'tagline' back to MainResponse in server that was removed in PR #427 ([#913](https://github.com/opensearch-project/OpenSearch/pull/913)) +* Remove distribution from main response in compatibility mode ([#898](https://github.com/opensearch-project/OpenSearch/pull/898)) +* Replace metadata keys in OpenSearchException during serialization and deserialization ([#905](https://github.com/opensearch-project/OpenSearch/pull/905)) +* Add cluster setting to spoof version number returned from MainResponse ([#847](https://github.com/opensearch-project/OpenSearch/pull/847)) +* Add URL for lucene snapshots ([#858](https://github.com/opensearch-project/OpenSearch/pull/858)) +* Decouple throttling limits for new and old indices. ([#778](https://github.com/opensearch-project/OpenSearch/pull/778)) +* Verbose plugin not found exception ([#849](https://github.com/opensearch-project/OpenSearch/pull/849)) +* Enable BWC checks ([#796](https://github.com/opensearch-project/OpenSearch/pull/796)) +* Add a method to use fallback setting to set the memory size ([#755](https://github.com/opensearch-project/OpenSearch/pull/755)) +* An allocation constraint mechanism, that de-prioritizes nodes from getting picked for allocation if they breach certain constraints ([#680](https://github.com/opensearch-project/OpenSearch/pull/680)) +* Create group settings with fallback. ([#743](https://github.com/opensearch-project/OpenSearch/pull/743)) +* Add timeout on cat/stats API ([#552](https://github.com/opensearch-project/OpenSearch/pull/552)) +* Make allocation decisions at node level first for pending task optimi… ([#534](https://github.com/opensearch-project/OpenSearch/pull/534)) +* Decouples primaries_recoveries limit from concurrent recoveries limit. ([#546](https://github.com/opensearch-project/OpenSearch/pull/546)) +* Merging javadoc feature branch changes to main ([#715](https://github.com/opensearch-project/OpenSearch/pull/715)) +* Add read_only block argument to opensearch-node unsafe-bootstrap command ([#599](https://github.com/opensearch-project/OpenSearch/pull/599)) +* Catch runtime exceptions to make class loader race conditions easier to debug. ([#608](https://github.com/opensearch-project/OpenSearch/pull/608)) +* Remove URL content from Reindex error response ([#630](https://github.com/opensearch-project/OpenSearch/pull/630)) +* Standardize int, long, double and float Setting constructors. ([#665](https://github.com/opensearch-project/OpenSearch/pull/665)) +* Add Remote Reindex SPI extension ([#547](https://github.com/opensearch-project/OpenSearch/pull/547)) +* Make default number of shards configurable ([#625](https://github.com/opensearch-project/OpenSearch/pull/625)) +* Converted all .asciidoc to .md. ([#658](https://github.com/opensearch-project/OpenSearch/pull/658)) +* Make -Dtests.output=always actually work. ([#648](https://github.com/opensearch-project/OpenSearch/pull/648)) +* Handle inefficiencies while fetching the delayed unassigned shards during cluster health ([#588](https://github.com/opensearch-project/OpenSearch/pull/588)) +* Replace elastic.co with opensearch.org ([#611](https://github.com/opensearch-project/OpenSearch/pull/611)) +* Speedup lang-painless tests ([#605](https://github.com/opensearch-project/OpenSearch/pull/605)) +* Speedup snapshot stale indices delete ([#613](https://github.com/opensearch-project/OpenSearch/pull/613)) +* Speed ups to test suite and precommit tasks. ([#580](https://github.com/opensearch-project/OpenSearch/pull/580)) +* [Versioning] Rebase to OpenSearch version 1.0.0 ([#555](https://github.com/opensearch-project/OpenSearch/pull/555)) +* Prevent setting maxParallelForks=0 on single-cpu machines ([#558](https://github.com/opensearch-project/OpenSearch/pull/558)) +* Use alternate example data in OpenSearch test cases. ([#454](https://github.com/opensearch-project/OpenSearch/pull/454)) + +### Bug Fixes + +* Adding a null pointer check to fix index_prefix query (#2879) ([#2903](https://github.com/opensearch-project/OpenSearch/pull/2903)) +* Fix issue that deprecated setting 'cluster.initial_master_nodes' is not identified in node bootstrap check (#2779) ([#2794](https://github.com/opensearch-project/OpenSearch/pull/2794)) +* [Bug] Fix InboundDecoder version compat check (#2570) ([#2573](https://github.com/opensearch-project/OpenSearch/pull/2573)) +* Fixing PluginsServiceTests (post Lucene 9 update) ([#2484](https://github.com/opensearch-project/OpenSearch/pull/2484)) +* Fixing the --release flag usage for javac (#2343) ([#2352](https://github.com/opensearch-project/OpenSearch/pull/2352)) +* Fix flaky test case - string profiler via global ordinals ([#2226](https://github.com/opensearch-project/OpenSearch/pull/2226)) +* Fixing the indentation in version.yml ([#2163](https://github.com/opensearch-project/OpenSearch/pull/2163)) +* Fixing org.opensearch.monitor.os.OsProbeTests::testLogWarnCpuMessageOnlyOnes when CGroups are not available ([#2101](https://github.com/opensearch-project/OpenSearch/pull/2101)) +* Fix integration tests failure ([#2067](https://github.com/opensearch-project/OpenSearch/pull/2067)) +* Another attempt to fix o.o.transport.netty4.OpenSearchLoggingHandlerIT fails w/ stack overflow ([#2051](https://github.com/opensearch-project/OpenSearch/pull/2051)) +* Fix AssertionError message ([#2044](https://github.com/opensearch-project/OpenSearch/pull/2044)) +* Fix composite aggregation failed test cases introduce by missing_order parameter (#1942) ([#2005](https://github.com/opensearch-project/OpenSearch/pull/2005)) +* Fixing allocation filters to persist existing state on settings update ([#1718](https://github.com/opensearch-project/OpenSearch/pull/1718)) +* Fix more failing tests as a result of renaming ([#457](https://github.com/opensearch-project/OpenSearch/pull/457)) +* Fix failing rest-api-spec tests as part of renaming. ([#451](https://github.com/opensearch-project/OpenSearch/pull/451)) +* Fix multiple failing server tests. ([#453](https://github.com/opensearch-project/OpenSearch/pull/453)) +* [TEST] Fix FsHealthServiceTest by increasing the timeout period before checking the FS health after restoring the FS status ([#1813](https://github.com/opensearch-project/OpenSearch/pull/1813)) +* [BUG] Wait for outstanding requests to complete in LastSuccessfulSett… ([#1939](https://github.com/opensearch-project/OpenSearch/pull/1939)) +* [Bug] Wait for outstanding requests to complete ([#1925](https://github.com/opensearch-project/OpenSearch/pull/1925)) +* [BUG] Serialization bugs can cause node drops ([#1885](https://github.com/opensearch-project/OpenSearch/pull/1885)) +* [BUG] Docker distribution builds are failing. Switching to http://vault.centos.org ([#2024](https://github.com/opensearch-project/OpenSearch/pull/2024)) +* [BUG] SymbolicLinkPreservingUntarTransform fails on Windows ([#1433](https://github.com/opensearch-project/OpenSearch/pull/1433)) +* [BUG] ConcurrentSnapshotsIT#testAssertMultipleSnapshotsAndPrimaryFailOver fails intermittently ([#1311](https://github.com/opensearch-project/OpenSearch/pull/1311)) +* [Bug] Fix InstallPluginCommand to use proper key signatures ([#1233](https://github.com/opensearch-project/OpenSearch/pull/1233)) +* [Bug] Fix mixed cluster support for OpenSearch 2+ ([#1191](https://github.com/opensearch-project/OpenSearch/pull/1191)) +* [BUG] Fix cat.health test failures in pre 1.0.0 mixed cluster test ([#928](https://github.com/opensearch-project/OpenSearch/pull/928)) +* [BUG] Fix versioning issues discovered through version bump ([#884](https://github.com/opensearch-project/OpenSearch/pull/884)) +* [BUG] fix MainResponse to spoof version number for legacy clients ([#708](https://github.com/opensearch-project/OpenSearch/pull/708)) +* [Bug] Fix gradle build on Windows failing from a recent change ([#758](https://github.com/opensearch-project/OpenSearch/pull/758)) +* Apply fix for health API response to distinguish no master ([#656](https://github.com/opensearch-project/OpenSearch/pull/656)) +* Rename translog pruning setting to CCR specific setting and addressed Bug in the test case ([#1243](https://github.com/opensearch-project/OpenSearch/pull/1243)) +* fix gradle check fail due to renameing -min in #1094 ([#1289](https://github.com/opensearch-project/OpenSearch/pull/1289)) +* Added explicit 'null' check for response listener to prevent obscure NullPointerException issues (#3048) ([#3050](https://github.com/opensearch-project/OpenSearch/pull/3050)) +* [Backport] [2.0] Bugfix to guard against stack overflow errors caused by very large reg-ex input ([#2816](https://github.com/opensearch-project/OpenSearch/pull/2816)) +* [Bug] Change 1.0.0 version check in PluginInfo +* TEST BUG: MergeSchedulerSettingsTests fails always on small machines ([#559](https://github.com/opensearch-project/OpenSearch/pull/559)) +* Fix bwcVersions after bumping version 1.3.1 ([#2532](https://github.com/opensearch-project/OpenSearch/pull/2532)) +* Fixing bwcVersions and bwc builds (#2430) - adding 1.4.0 into main bwcVersions +* Fixing invalid Java code example in JavaDoc ([#2008](https://github.com/opensearch-project/OpenSearch/pull/2008)) +* Fixing org.opensearch.common.network.InetAddressesTests.testForStringIPv6WithScopeIdInput ([#1913](https://github.com/opensearch-project/OpenSearch/pull/1913)) +* Fix o.o.transport.netty4.OpenSearchLoggingHandlerIT stack overflow test failure ([#1900](https://github.com/opensearch-project/OpenSearch/pull/1900)) +* Fix verifyVersions gradle task and cleanup bwcVersions ([#1878](https://github.com/opensearch-project/OpenSearch/pull/1878)) +* Attempt to fix :test:fixtures:s3-fixture:composeUp fails due to HTTP connection issue ([#1866](https://github.com/opensearch-project/OpenSearch/pull/1866)) +* Fixing build failures after Flavor Serialization backport ([#1867](https://github.com/opensearch-project/OpenSearch/pull/1867)) +* Fixing auto backport workflow ([#1845](https://github.com/opensearch-project/OpenSearch/pull/1845)) +* Upgrade and fix link checker to 1.2. ([#1811](https://github.com/opensearch-project/OpenSearch/pull/1811)) +* link checker fix - only run on opensearch-project/OpenSearch ([#1719](https://github.com/opensearch-project/OpenSearch/pull/1719)) +* Fixing .gitattributes for binary content, removing *.class files ([#1717](https://github.com/opensearch-project/OpenSearch/pull/1717)) +* Fix unit test testFailsHealthOnHungIOBeyondHealthyTimeout() by incresing the max waiting time before assertion ([#1692](https://github.com/opensearch-project/OpenSearch/pull/1692)) +* Fixing bwc test for repository-multi-version ([#1441](https://github.com/opensearch-project/OpenSearch/pull/1441)) +* Fixing support for a multi-node cluster via "gradle run" ([#1455](https://github.com/opensearch-project/OpenSearch/pull/1455)) +* Fix windows build (mostly) ([#1412](https://github.com/opensearch-project/OpenSearch/pull/1412)) +* Fixing post merge 3rd party audit issues ([#1384](https://github.com/opensearch-project/OpenSearch/pull/1384)) +* Minor fix for the flaky test to reduce concurrency (#1361) ([#1364](https://github.com/opensearch-project/OpenSearch/pull/1364)) +* Fixing org.opensearch.repositories.azure.AzureBlobContainerRetriesTests and org.opensearch.action.admin.cluster.node.stats.NodeStatsTests ([#1390](https://github.com/opensearch-project/OpenSearch/pull/1390)) +* Fix failure in SearchCancellationIT.testMSearchChildReqCancellationWithHybridTimeout ([#1103](https://github.com/opensearch-project/OpenSearch/pull/1103)) +* Fix failing test caused by versioning change. ([#598](https://github.com/opensearch-project/OpenSearch/pull/598)) +* fixed broken anchor link. ([#436](https://github.com/opensearch-project/OpenSearch/pull/436)) +* [Rename] fix painless test ([#446](https://github.com/opensearch-project/OpenSearch/pull/446)) +* Fix name of the log appender. ([#445](https://github.com/opensearch-project/OpenSearch/pull/445)) +* [Rename] Fixing lingering rename and ./gradlew run will start ([#443](https://github.com/opensearch-project/OpenSearch/pull/443)) +* Fixed copyright to OpenSearch ([#1175](https://github.com/opensearch-project/OpenSearch/pull/1175)) +* Fix defects in code-coverage.gralde to generate code coverage report properly ([#1214](https://github.com/opensearch-project/OpenSearch/pull/1214)) +* Fix failure in SearchCancellationIT.testMSearchChildReqCancellationWithHybridTimeout ([#1103](https://github.com/opensearch-project/OpenSearch/pull/1103)) +* Fix Snapshot pattern in DistributionDownloader. ([#916](https://github.com/opensearch-project/OpenSearch/pull/916)) +* Fix stragglers from renaming to OpenSearch work. ([#483](https://github.com/opensearch-project/OpenSearch/pull/483)) +* Fix rename issues and failing repository-hdfs tests. ([#518](https://github.com/opensearch-project/OpenSearch/pull/518)) +* Fix build-tools integ test failures. ([#465](https://github.com/opensearch-project/OpenSearch/pull/465)) +* Fix a few more renaming issues. ([#464](https://github.com/opensearch-project/OpenSearch/pull/464)) +* Fix org.opensearch.index.reindex.ReindexRestClientSslTests#testClientSucceedsWithCertificateAuthorities - javax.net.ssl.SSLPeerUnverifiedException ([#1212](https://github.com/opensearch-project/OpenSearch/pull/1212)) +* Fix opensearch-env always sources the environment from hardcoded file ([#875](https://github.com/opensearch-project/OpenSearch/pull/875)) +* Fix resource leak issues suggested by Amazon CodeGuru ([#816](https://github.com/opensearch-project/OpenSearch/pull/816)) +* Fix arm architecture translation issue ([#809](https://github.com/opensearch-project/OpenSearch/pull/809)) +* Fix Javadoc errors in `client/sniffer` ([#802](https://github.com/opensearch-project/OpenSearch/pull/802)) +* [BWC] fix mixedCluster and rolling upgrades ([#775](https://github.com/opensearch-project/OpenSearch/pull/775)) +* Fix #649: Properly escape @ in JavaDoc. ([#651](https://github.com/opensearch-project/OpenSearch/pull/651)) +* Fix snapshot deletion task getting stuck in the event of exceptions ([#629](https://github.com/opensearch-project/OpenSearch/pull/629)) +* Fix failing test caused by versioning change. ([#598](https://github.com/opensearch-project/OpenSearch/pull/598)) +* Use the correct domain to fix failing integration tests. ([#519](https://github.com/opensearch-project/OpenSearch/pull/519)) +* Change OpenSearch Version to OpenSearch version to fix failed test case org.opensearch.plugins.ListPluginsCommandTests.testPluginWithNativeController ([#460](https://github.com/opensearch-project/OpenSearch/pull/460)) +* [Rename] Fix env variables and old es maven repo ([#439](https://github.com/opensearch-project/OpenSearch/pull/439)) +* ignore_malformed parameter on ip_range data_type throws mapper_parsing_exception ([#2429](https://github.com/opensearch-project/OpenSearch/pull/2429)) +* Discrepancy in result from _validate/query API and actual query validity ([#2416](https://github.com/opensearch-project/OpenSearch/pull/2416)) +* MapperService has to be passed in as null for EnginePlugins CodecService constructor ([#2177](https://github.com/opensearch-project/OpenSearch/pull/2177)) +* Adding shards per node constraint for predictability to testClusterGr… ([#2110](https://github.com/opensearch-project/OpenSearch/pull/2110)) +* Mapping update for “date_range” field type is not idempotent ([#2094](https://github.com/opensearch-project/OpenSearch/pull/2094)) +* Use Version.compareMajor instead of using equals operator ([#1876](https://github.com/opensearch-project/OpenSearch/pull/1876)) +* Execution failed for task ':test:fixtures:azure/s3/hdfs/gcs-fixture:composeDown' ([#1824](https://github.com/opensearch-project/OpenSearch/pull/1824)) +* RestIntegTestTask fails because of missed log4j-core dependency ([#1815](https://github.com/opensearch-project/OpenSearch/pull/1815)) +* Start MockLogAppender before adding to static context ([#1587](https://github.com/opensearch-project/OpenSearch/pull/1587)) +* Use a non-default port for upgrade-cli unit tests ([#1512](https://github.com/opensearch-project/OpenSearch/pull/1512)) +* Close first engine instance before creating second ([#1457](https://github.com/opensearch-project/OpenSearch/pull/1457)) +* Avoid crashing on using the index.lifecycle.name in the API body ([#1060](https://github.com/opensearch-project/OpenSearch/pull/1060)) +* Max scroll limit breach to throw a OpenSearchRejectedExecutionException ([#1054](https://github.com/opensearch-project/OpenSearch/pull/1054)) +* Extract excludes into a file, fix the link checker by adding http://site.icu-project.org/. ([#1189](https://github.com/opensearch-project/OpenSearch/pull/1189)) +* Prevent /_cat/master from getting tripped by the CB ([#1036](https://github.com/opensearch-project/OpenSearch/pull/1036)) +* Excluding missed broken links from link checker ([#1010](https://github.com/opensearch-project/OpenSearch/pull/1010)) +* Excluding links from link checker ([#995](https://github.com/opensearch-project/OpenSearch/pull/995)) +* Version checks are incorrectly returning versions < 1.0.0. ([#797](https://github.com/opensearch-project/OpenSearch/pull/797)) +* Make `:server:check` pass successfully ([#471](https://github.com/opensearch-project/OpenSearch/pull/471)) +* Correct the regex pattern for class path in testDieWithDignity() ([#466](https://github.com/opensearch-project/OpenSearch/pull/466)) +* Change ESLoggingHandler to OpenSearchLoggingHandler to pass failing test case org.opensearch.transport.netty4.OpenSearchLoggingHandlerIT.testLoggingHandler due to renaming ([#461](https://github.com/opensearch-project/OpenSearch/pull/461)) + + +### Infrastructure + +* Using Github App token to trigger CI for version increment PRs ([#2157](https://github.com/opensearch-project/OpenSearch/pull/2157)) +* Using Github App to trigger CI for auto-backport ([#2071](https://github.com/opensearch-project/OpenSearch/pull/2071)) +* Remove precommit and wrapper validation workflows for gradle as we migrate it to internal CI tools ([#452](https://github.com/opensearch-project/OpenSearch/pull/452)) +* Updated the url for docker distribution ([#2325](https://github.com/opensearch-project/OpenSearch/pull/2325)) +* Recommend Docker 3.6.0. ([#1427](https://github.com/opensearch-project/OpenSearch/pull/1427)) +* docker build: use OSS `log4j2.properties` ([#878](https://github.com/opensearch-project/OpenSearch/pull/878)) +* [DOCKER] add apt update to test fixture krb5kdc ([#565](https://github.com/opensearch-project/OpenSearch/pull/565)) +* Cleanup `default` flavor stragglers from docker distributions. ([#481](https://github.com/opensearch-project/OpenSearch/pull/481)) +* Replace blacklist in Gradle build environment configuration (#2752) ([#2781](https://github.com/opensearch-project/OpenSearch/pull/2781)) +* Add 1.3.2 to main causing gradle check failures (#2679) ([#2684](https://github.com/opensearch-project/OpenSearch/pull/2684)) +* Added jenkinsfile to run gradle check in OpenSearch (#2166) ([#2629](https://github.com/opensearch-project/OpenSearch/pull/2629)) +* Gradle check retry (#2638) ([#2661](https://github.com/opensearch-project/OpenSearch/pull/2661)) +* Move Gradle wrapper and precommit checks into OpenSearch repo. ([#1664](https://github.com/opensearch-project/OpenSearch/pull/1664)) +* Enabling missingJavadoc validation in gradle check ([#721](https://github.com/opensearch-project/OpenSearch/pull/721)) +* Removing Jenkinsfile (not used), replaced by opensearch-build/jenkins/opensearch/Jenkinsfile ([#1408](https://github.com/opensearch-project/OpenSearch/pull/1408)) +* Changed JAVA_HOME to jdk-17 (#2656) ([#2671](https://github.com/opensearch-project/OpenSearch/pull/2671)) +* Adding support for JDK17 and removing JDK8 ([#2025](https://github.com/opensearch-project/OpenSearch/pull/2025)) +* Add darwin-arm64-tar and no-jdk-darwin-arm64-tar archive distributions. ([#1668](https://github.com/opensearch-project/OpenSearch/pull/1668)) +* Better JDK-18 EA (and beyond) support of SecurityManager ([#1750](https://github.com/opensearch-project/OpenSearch/pull/1750)) +* Support JDK 18 EA builds ([#1710](https://github.com/opensearch-project/OpenSearch/pull/1710)) +* Adding 1.2.2 ([#1731](https://github.com/opensearch-project/OpenSearch/pull/1731)) +* Add version 1.2.1. ([#1701](https://github.com/opensearch-project/OpenSearch/pull/1701)) +* Add version 1.2.3. ([#1760](https://github.com/opensearch-project/OpenSearch/pull/1760)) +* Modernize and consolidate JDKs usage across all stages of the build. Use JDK-17 as bundled JDK distribution to run tests ([#1358](https://github.com/opensearch-project/OpenSearch/pull/1358)) +* Fix build-tools/reaper source/target compatibility to be JDK-11 (#2596) ([#2606](https://github.com/opensearch-project/OpenSearch/pull/2606)) +* Add darwin-arm64-tar and no-jdk-darwin-arm64-tar archive distributions. ([#1668](https://github.com/opensearch-project/OpenSearch/pull/1668)) +* Remove Github DCO action since DCO runs via Github App now ([#2317](https://github.com/opensearch-project/OpenSearch/pull/2317)) +* Adding Github action for auto backport PR creation ([#1600](https://github.com/opensearch-project/OpenSearch/pull/1600)) +* Add a whitesource unified agent file and update the config ([#1540](https://github.com/opensearch-project/OpenSearch/pull/1540)) +* Run link checker GitHub action on schedule. ([#1221](https://github.com/opensearch-project/OpenSearch/pull/1221)) +* Clarify opensearch.version to not include -SNAPSHOT. ([#1186](https://github.com/opensearch-project/OpenSearch/pull/1186)) +* Move pr template to .github as default since folder design required manually added to url ([#458](https://github.com/opensearch-project/OpenSearch/pull/458)) +* changed label from low hanging fruit to help wanted. added link to filter for that label. Added link to forum ([#435](https://github.com/opensearch-project/OpenSearch/pull/435)) +* adding in untriaged label to features ([#1419](https://github.com/opensearch-project/OpenSearch/pull/1419)) +* Run spotless and exclude checkstyle on plugins module ([#1417](https://github.com/opensearch-project/OpenSearch/pull/1417)) +* Adding spotless support for subprojects under :test ([#1464](https://github.com/opensearch-project/OpenSearch/pull/1464)) +* Run spotless and exclude checkstyle on rest-api-spec module ([#1462](https://github.com/opensearch-project/OpenSearch/pull/1462)) +* Run spotless and exclude checkstyle on modules module ([#1442](https://github.com/opensearch-project/OpenSearch/pull/1442)) +* Enabling spotless, disabling checkstyle check on plugins ([#1488](https://github.com/opensearch-project/OpenSearch/pull/1488)) +* Cleanup for Checkstyle ([#1370](https://github.com/opensearch-project/OpenSearch/pull/1370)) +* Run spotless and exclude checkstyle on libs module ([#1428](https://github.com/opensearch-project/OpenSearch/pull/1428)) +* Run spotless and exclude checkstyle on client module ([#1392](https://github.com/opensearch-project/OpenSearch/pull/1392)) +* Run spotless and exclude checkstyle on server module ([#1380](https://github.com/opensearch-project/OpenSearch/pull/1380)) +* Change whitesource integration to scan on 1.x branch ([#1786](https://github.com/opensearch-project/OpenSearch/pull/1786)) +* Add .whitesource configuration file ([#1525](https://github.com/opensearch-project/OpenSearch/pull/1525)) +* add codeowners file ([#1530](https://github.com/opensearch-project/OpenSearch/pull/1530)) +* Updated links for linkchecker ([#1539](https://github.com/opensearch-project/OpenSearch/pull/1539)) +* Updating dependabot open pr limits ([#1875](https://github.com/opensearch-project/OpenSearch/pull/1875)) +* Updating .gitattributes for additional file types ([#1727](https://github.com/opensearch-project/OpenSearch/pull/1727)) +* Updating the Ivy repository to point to real url for Releases ([#602](https://github.com/opensearch-project/OpenSearch/pull/602)) +* build: introduce support for reproducible builds ([#1995](https://github.com/opensearch-project/OpenSearch/pull/1995)) +* Add support to generate code coverage report with JaCoCo ([#971](https://github.com/opensearch-project/OpenSearch/pull/971)) +* Support running elasticsearch-oss distribution in test cluster for BWC ([#764](https://github.com/opensearch-project/OpenSearch/pull/764)) +* FreeBSD Java support ([#1014](https://github.com/opensearch-project/OpenSearch/pull/1014)) +* Override Default Distribution Download Url with Custom Distribution Url when it is passed from Plugin ([#2420](https://github.com/opensearch-project/OpenSearch/pull/2420)) +* Restore Java 8 compatibility for build tools. (#2300) ([#2321](https://github.com/opensearch-project/OpenSearch/pull/2321)) +* Revert "Override Default Distribution Download Url with Custom Distribution Url When User Passes a Url" ([#2256](https://github.com/opensearch-project/OpenSearch/pull/2256)) +* Override Default Distribution Download Url with Custom Distribution Url When User Passes a Url ([#2086](https://github.com/opensearch-project/OpenSearch/pull/2086)) +* added config file to git issue template directory to disable blank issue creation ([#2158](https://github.com/opensearch-project/OpenSearch/pull/2158)) +* Add JetBrains Gateway setup details ([#1944](https://github.com/opensearch-project/OpenSearch/pull/1944)) +* Adding workflow to auto delete backport merged branches from backport workflow ([#2050](https://github.com/opensearch-project/OpenSearch/pull/2050)) +* Add IssueNavigationLink ([#1964](https://github.com/opensearch-project/OpenSearch/pull/1964)) +* Using pull_request_target in place of pull_request ([#1952](https://github.com/opensearch-project/OpenSearch/pull/1952)) +* Using custom branch name for auto backporting PRs ([#1862](https://github.com/opensearch-project/OpenSearch/pull/1862)) +* Added help to build distributions in docs ([#1898](https://github.com/opensearch-project/OpenSearch/pull/1898)) +* Auto-increment next development iteration. ([#1816](https://github.com/opensearch-project/OpenSearch/pull/1816)) +* Catching Maintainers up for Q4 2021 new additions/removals ([#1841](https://github.com/opensearch-project/OpenSearch/pull/1841)) +* Added .gitattributes to manage end-of-line checks for Windows/*nix systems ([#1638](https://github.com/opensearch-project/OpenSearch/pull/1638)) +* Add staged version 1.1.1 ([#1506](https://github.com/opensearch-project/OpenSearch/pull/1506)) +* [BWC] Diable BWC tests until branch versions are synced ([#1508](https://github.com/opensearch-project/OpenSearch/pull/1508)) +* Moving DCO to workflows ([#1458](https://github.com/opensearch-project/OpenSearch/pull/1458)) +* changed work-in-progress language ([#1275](https://github.com/opensearch-project/OpenSearch/pull/1275)) +* Removed beta from new issues. ([#1071](https://github.com/opensearch-project/OpenSearch/pull/1071)) +* Include sources and javadoc artifacts while publishing to a Maven repository ([#1049](https://github.com/opensearch-project/OpenSearch/pull/1049)) +* Replaced custom built JNA by official JNA distribution. ([#1003](https://github.com/opensearch-project/OpenSearch/pull/1003)) +* [Version] Don't spoof major for 3.0+ clusters (#2722) ([#2749](https://github.com/opensearch-project/OpenSearch/pull/2749)) +* adds ToC ([#2546](https://github.com/opensearch-project/OpenSearch/pull/2546)) +* Add Version.V_1_2_5 constant +* add 1.2.5 to bwcVersions +* [Deprecate] Setting explicit version on analysis component ([#1978](https://github.com/opensearch-project/OpenSearch/pull/1978)) +* [Deprecate] index.merge.policy.max_merge_at_once_explicit ([#1981](https://github.com/opensearch-project/OpenSearch/pull/1981)) +* [plugin] repository-azure: add configuration settings for connect/write/response/read timeouts ([#1789](https://github.com/opensearch-project/OpenSearch/pull/1789)) +* [plugin] repository-azure is not working properly hangs on basic operations (#1740) ([#1749](https://github.com/opensearch-project/OpenSearch/pull/1749)) +* [main] Add staged version 1.3.0 for bwc ([#1510](https://github.com/opensearch-project/OpenSearch/pull/1510)) +* [repository-azure] plugin should use Azure Storage SDK v12 for Java ([#1302](https://github.com/opensearch-project/OpenSearch/pull/1302)) +* Allow building on FreeBSD ([#1091](https://github.com/opensearch-project/OpenSearch/pull/1091)) +* initial commit to add in a dependabot.yml file ([#1353](https://github.com/opensearch-project/OpenSearch/pull/1353)) +* Rename artifact produced by the build to include -min ([#1251](https://github.com/opensearch-project/OpenSearch/pull/1251)) +* [Version] Add 1.2 for BWC testing ([#1241](https://github.com/opensearch-project/OpenSearch/pull/1241)) +* Exclude failing links from plugins/modules ([#1223](https://github.com/opensearch-project/OpenSearch/pull/1223)) +* Kept the original constructor for PluginInfo to maintain bwc ([#1206](https://github.com/opensearch-project/OpenSearch/pull/1206)) +* [Version] Increment main to 2.0 ([#1192](https://github.com/opensearch-project/OpenSearch/pull/1192)) +* Added all icu-project.org websites to the link checker exclusions. ([#1201](https://github.com/opensearch-project/OpenSearch/pull/1201)) +* Add 1.0.1 revision ([#1152](https://github.com/opensearch-project/OpenSearch/pull/1152)) +* distribution/packages: Fix filename format for deb archives ([#621](https://github.com/opensearch-project/OpenSearch/pull/621)) +* [Versioning] Fix Version.fromString logic for legacy version ([#604](https://github.com/opensearch-project/OpenSearch/pull/604)) +* Rename the distribution used in test clusters. ([#603](https://github.com/opensearch-project/OpenSearch/pull/603)) +* clean up rpm artifact naming ([#590](https://github.com/opensearch-project/OpenSearch/pull/590)) +* changed to point to open issues rather than the project board +* Update Plugin Signing Key ([#512](https://github.com/opensearch-project/OpenSearch/pull/512)) +* Use OpenSearch artifacts URL for official plugin installation. ([#490](https://github.com/opensearch-project/OpenSearch/pull/490)) +* Perform more renaming to OpenSearch. ([#470](https://github.com/opensearch-project/OpenSearch/pull/470)) +* Adding instructions on License and DCO practices to PR template ([#462](https://github.com/opensearch-project/OpenSearch/pull/462)) +* Remove lingering instances of Default distribution in favour of Oss ([#440](https://github.com/opensearch-project/OpenSearch/pull/440)) +* Validation for official plugins for upgrade tool ([#973](https://github.com/opensearch-project/OpenSearch/pull/973)) +* Lower build requirement from Java 14+ to Java 11+ ([#940](https://github.com/opensearch-project/OpenSearch/pull/940)) +* Add Snapshot maven repository ([#829](https://github.com/opensearch-project/OpenSearch/pull/829)) +* distribution/packages: Fix RPM architecture name for 64-bit x86 ([#620](https://github.com/opensearch-project/OpenSearch/pull/620)) +* Update issue template with multiple labels ([#668](https://github.com/opensearch-project/OpenSearch/pull/668)) +* Renaming CPU architecture to have consistent naming ([#612](https://github.com/opensearch-project/OpenSearch/pull/612)) + +### Documentation + +* Adding workflow to create documentation related issues in documentation-website repo (#2929) ([#2976](https://github.com/opensearch-project/OpenSearch/pull/2976)) +* Updating auto backport documentation ([#1620](https://github.com/opensearch-project/OpenSearch/pull/1620)) +* Updating README and CONTRIBUTING guide to get ready for beta1 release. ([#672](https://github.com/opensearch-project/OpenSearch/pull/672)) +* Update instructions on debugging OpenSearch. ([#689](https://github.com/opensearch-project/OpenSearch/pull/689)) +* Fixing typo in TESTING.md ([#1849](https://github.com/opensearch-project/OpenSearch/pull/1849)) +* Fix JavaDoc typo in XContentBuilder ([#1739](https://github.com/opensearch-project/OpenSearch/pull/1739)) +* Update Readme ([#433](https://github.com/opensearch-project/OpenSearch/pull/433)) +* Fix DCO CLI example in CONTRIBUTING.md ([#576](https://github.com/opensearch-project/OpenSearch/pull/576)) +* Change comment to point to DEVELOPER_GUIDE.md ([#1415](https://github.com/opensearch-project/OpenSearch/pull/1415)) +* [typos] typos in DEVELOPER_GUIDE.md ([#1381](https://github.com/opensearch-project/OpenSearch/pull/1381)) +* Adding Security Reporting Instructions in README.md file Signed-off-by: Rishikesh Reddy Pasham rishireddy1159@gmail.com ([#1326](https://github.com/opensearch-project/OpenSearch/pull/1326)) +* Add guide for generating code coverage report in TESTING.md ([#1264](https://github.com/opensearch-project/OpenSearch/pull/1264)) +* Added Eclipse import instructions to DEVELOPER_GUIDE.md ([#1215](https://github.com/opensearch-project/OpenSearch/pull/1215)) +* Update/maintainers.md ([#723](https://github.com/opensearch-project/OpenSearch/pull/723)) +* Added a link to the maintainer file in contribution guides ([#589](https://github.com/opensearch-project/OpenSearch/pull/589)) +* Updated READMEs on releasing, maintaining, admins and security. ([#853](https://github.com/opensearch-project/OpenSearch/pull/853)) +* adding components to DEVELOPER_GUIDE ([#1200](https://github.com/opensearch-project/OpenSearch/pull/1200)) +* Update developer guide reference to download JDK 14 ([#1452](https://github.com/opensearch-project/OpenSearch/pull/1452)) +* [WIP] Developer guide updates ([#595](https://github.com/opensearch-project/OpenSearch/pull/595)) +* Update README with getting started ([#549](https://github.com/opensearch-project/OpenSearch/pull/549)) +* Update Developers Guide. ([#522](https://github.com/opensearch-project/OpenSearch/pull/522)) +* Update LICENSE.txt +* [License] Add SPDX and OpenSearch Modification license header ([#509](https://github.com/opensearch-project/OpenSearch/pull/509)) +* [License] Update SPDX License Header ([#510](https://github.com/opensearch-project/OpenSearch/pull/510)) +* Cleanup TESTING and DEVELOPER_GUIDE markdowns ([#946](https://github.com/opensearch-project/OpenSearch/pull/946)) +* Add 1.3.0 release notes in main ([#2489](https://github.com/opensearch-project/OpenSearch/pull/2489)) +* Add release notes for 1.2.4 ([#1934](https://github.com/opensearch-project/OpenSearch/pull/1934)) +* Added release notes for 1.2.3. ([#1791](https://github.com/opensearch-project/OpenSearch/pull/1791)) +* Adding release notes for 1.2.2 ([#1730](https://github.com/opensearch-project/OpenSearch/pull/1730)) +* Adding release notes for 1.2.1 ([#1725](https://github.com/opensearch-project/OpenSearch/pull/1725)) +* Add 1.2 release notes and correct 1.1 release notes. ([#1581](https://github.com/opensearch-project/OpenSearch/pull/1581)) +* Generate release notes for 1.1 ([#1230](https://github.com/opensearch-project/OpenSearch/pull/1230)) +* Update release note for GA 1.0 with new commits and removes #547 ([#953](https://github.com/opensearch-project/OpenSearch/pull/953)) +* Adding release notes for 1.0.0 ([#885](https://github.com/opensearch-project/OpenSearch/pull/885)) +* Adding release notes for 1.0.0-rc1 ([#794](https://github.com/opensearch-project/OpenSearch/pull/794)) +* Modified TESTING instructions to clarify use of testing classes ([#1930](https://github.com/opensearch-project/OpenSearch/pull/1930)) +* Clarify JDK requirement in the developer guide ([#1153](https://github.com/opensearch-project/OpenSearch/pull/1153)) +* Add trademark notice ([#2473](https://github.com/opensearch-project/OpenSearch/pull/2473)) +* Expand SearchPlugin javadocs. ([#1909](https://github.com/opensearch-project/OpenSearch/pull/1909)) +* Linked the formatting setting file ([#1860](https://github.com/opensearch-project/OpenSearch/pull/1860)) +* Add more instructions how to install/configure git secrets ([#1202](https://github.com/opensearch-project/OpenSearch/pull/1202)) +* Add themed logo to README ([#988](https://github.com/opensearch-project/OpenSearch/pull/988)) +* Replace Elasticsearch docs links in scripts ([#994](https://github.com/opensearch-project/OpenSearch/pull/994)) +* Cleaned up developer guide, added TOC. ([#572](https://github.com/opensearch-project/OpenSearch/pull/572)) +* Document running individual tests. ([#741](https://github.com/opensearch-project/OpenSearch/pull/741)) +* [License] Add SPDX License Header to security policies ([#531](https://github.com/opensearch-project/OpenSearch/pull/531)) +* Added a maintainers file ([#523](https://github.com/opensearch-project/OpenSearch/pull/523)) +* Remove extra greater-thans from README ([#527](https://github.com/opensearch-project/OpenSearch/pull/527)) +* [Rename] Update Vagrantfile ([#515](https://github.com/opensearch-project/OpenSearch/pull/515)) +* [README] Remove stale information ([#513](https://github.com/opensearch-project/OpenSearch/pull/513)) +* [Rename] Change license header and copyright notice to SPDX ([#437](https://github.com/opensearch-project/OpenSearch/pull/437)) + + +### Maintenance + +* Make discovered_master field optional on the client to support compatibility for opensearch client with odfe (#2641) ([#2653](https://github.com/opensearch-project/OpenSearch/pull/2653)) +* Update azure-storage-blob to 12.15.0: fix test flakiness (#2795) ([#2799](https://github.com/opensearch-project/OpenSearch/pull/2799)) +* Update azure-storage-blob to 12.15.0 (#2774) ([#2778](https://github.com/opensearch-project/OpenSearch/pull/2778)) +* Update the BWC versions (post 1.x backport) ([#2390](https://github.com/opensearch-project/OpenSearch/pull/2390)) +* Update bwc verions for (#2237) ([#2248](https://github.com/opensearch-project/OpenSearch/pull/2248)) +* Update #2103 BWC Versions ([#2173](https://github.com/opensearch-project/OpenSearch/pull/2173)) +* Update bundled JDK distribution to 17.0.2+8 ([#2007](https://github.com/opensearch-project/OpenSearch/pull/2007)) +* Update Mockito to 4.3.1 ([#1973](https://github.com/opensearch-project/OpenSearch/pull/1973)) +* Update protobuf-java to 3.19.3 ([#1945](https://github.com/opensearch-project/OpenSearch/pull/1945)) +* Update Netty to 4.1.73.Final ([#1936](https://github.com/opensearch-project/OpenSearch/pull/1936)) +* Update FIPS API libraries of Bouncy Castle ([#1853](https://github.com/opensearch-project/OpenSearch/pull/1853)) +* Update junit to 4.13.1 ([#1837](https://github.com/opensearch-project/OpenSearch/pull/1837)) +* Update Mockito to 4.2.x ([#1830](https://github.com/opensearch-project/OpenSearch/pull/1830)) +* Upgrading bouncycastle to 1.70 ([#1832](https://github.com/opensearch-project/OpenSearch/pull/1832)) +* Updating Netty to 4.1.72.Final ([#1831](https://github.com/opensearch-project/OpenSearch/pull/1831)) +* Update to log4j 2.17.1 ([#1820](https://github.com/opensearch-project/OpenSearch/pull/1820)) +* Update to log4j 2.17.0 ([#1771](https://github.com/opensearch-project/OpenSearch/pull/1771)) +* [repository-azure] Update to the latest Azure Storage SDK v12, remove privileged runnable wrapper in favor of access helper ([#1521](https://github.com/opensearch-project/OpenSearch/pull/1521)) +* Update bundled JDK distribution to 17.0.1+12 ([#1476](https://github.com/opensearch-project/OpenSearch/pull/1476)) +* Upgrading netty version to 4.1.69.Final ([#1363](https://github.com/opensearch-project/OpenSearch/pull/1363)) +* Modernize and consolidate JDKs usage across all stages of the build. Update JDK-14 requirement, switch to JDK-17 instead ([#1368](https://github.com/opensearch-project/OpenSearch/pull/1368)) +* Upgrade hadoop dependencies for hdfs plugin ([#1335](https://github.com/opensearch-project/OpenSearch/pull/1335)) +* Replace securemock with mock-maker (test support), update Mockito to 3.12.4 ([#1332](https://github.com/opensearch-project/OpenSearch/pull/1332)) +* Update Jackson to 2.12.5 ([#1247](https://github.com/opensearch-project/OpenSearch/pull/1247)) +* Update DistributionDownloader to support fetching arm64 bundles. ([#929](https://github.com/opensearch-project/OpenSearch/pull/929)) +* Update favicon for OpenSearch ([#932](https://github.com/opensearch-project/OpenSearch/pull/932)) +* Update DistributionDownloader to fetch snapshots and staging bundles. ([#904](https://github.com/opensearch-project/OpenSearch/pull/904)) +* Version bump for 1.1 release ([#772](https://github.com/opensearch-project/OpenSearch/pull/772)) +* update external library 'pdfbox' version to 2.0.24 to reduce vulnerability ([#883](https://github.com/opensearch-project/OpenSearch/pull/883)) +* Update dependencies for ingest-attachment plugin. ([#666](https://github.com/opensearch-project/OpenSearch/pull/666)) +* Update hadoop-minicluster version for test fixture. ([#645](https://github.com/opensearch-project/OpenSearch/pull/645)) +* Update remote repo for BWC checks. ([#482](https://github.com/opensearch-project/OpenSearch/pull/482)) +* Update year and developer info in generated POMs. ([#444](https://github.com/opensearch-project/OpenSearch/pull/444)) +* Refresh OpenSearch nodes version in cluster state after upgrade ([#865](https://github.com/opensearch-project/OpenSearch/pull/865)) +* [Upgrade] ICU4j from 68.2 to 70.1 ([#2504](https://github.com/opensearch-project/OpenSearch/pull/2504)) +* Upgrade to log4j 2.16.0 ([#1721](https://github.com/opensearch-project/OpenSearch/pull/1721)) +* Upgrade to logj4 2.15.0 ([#1698](https://github.com/opensearch-project/OpenSearch/pull/1698)) +* Updating Log4j to 2.11.2 ([#1696](https://github.com/opensearch-project/OpenSearch/pull/1696)) +* Upgrade dependency ([#1571](https://github.com/opensearch-project/OpenSearch/pull/1571)) +* Upgrade apache commons-compress to 1.21 ([#1197](https://github.com/opensearch-project/OpenSearch/pull/1197)) +* Removed java11 source folders since JDK-11 is the baseline now (#2898) ([#2953](https://github.com/opensearch-project/OpenSearch/pull/2953)) +* [Remove] MainResponse version override cluster setting (#3031) ([#3033](https://github.com/opensearch-project/OpenSearch/pull/3033)) +* [Remove] remaining AllFieldMapper references (#3007) ([#3010](https://github.com/opensearch-project/OpenSearch/pull/3010)) +* [2.x] Remove deprecation warning of using REST API request parameter 'master_timeout' (#2920) ([#2931](https://github.com/opensearch-project/OpenSearch/pull/2931)) +* [Rename] ESTestCase stragglers to OpenSearchTestCase (#3053) ([#3064](https://github.com/opensearch-project/OpenSearch/pull/3064)) +* Use G1GC on JDK11+ (#2964) ([#2970](https://github.com/opensearch-project/OpenSearch/pull/2970)) +* Remove endpoint_suffix dependency on account key (#2485) ([#2808](https://github.com/opensearch-project/OpenSearch/pull/2808)) +* Updating repository commons logging version ([#2541](https://github.com/opensearch-project/OpenSearch/pull/2541)) +* Upgrading Shadow plugin to 7.1.2 ([#2033](https://github.com/opensearch-project/OpenSearch/pull/2033)) +* Upgrading Jackson-Databind version ([#1982](https://github.com/opensearch-project/OpenSearch/pull/1982)) +* Upgrading commons-codec in hdfs-fixture and cleaning up dependencies in repository-hdfs ([#1603](https://github.com/opensearch-project/OpenSearch/pull/1603)) +* Upgrading gson to 2.8.9 ([#1541](https://github.com/opensearch-project/OpenSearch/pull/1541)) +* Upgrading dependencies ([#1491](https://github.com/opensearch-project/OpenSearch/pull/1491)) +* Upgrading dependencies in hdfs plugin ([#1466](https://github.com/opensearch-project/OpenSearch/pull/1466)) +* Upgrading mockito version to make it consistent across the repo ([#1410](https://github.com/opensearch-project/OpenSearch/pull/1410)) +* Change deprecation message for REST API parameter 'master_timeout' to specify the version of removal (#2863) ([#2865](https://github.com/opensearch-project/OpenSearch/pull/2865)) +* Update ThirdPartyAuditTask to check for and list pointless exclusions. (#2760) ([#2765](https://github.com/opensearch-project/OpenSearch/pull/2765)) +* Add Shadow jar publication to lang-painless module. (#2681) ([#2712](https://github.com/opensearch-project/OpenSearch/pull/2712)) +* Add mapping method back referenced in other repos (#2636) ([#2649](https://github.com/opensearch-project/OpenSearch/pull/2649)) +* Move Jackson-databind to 2.13.2 ([#2548](https://github.com/opensearch-project/OpenSearch/pull/2548)) +* [Unmute] NumberFieldTypeTests ([#2531](https://github.com/opensearch-project/OpenSearch/pull/2531)) +* [Unmute] IndexPrimaryRelocationIT ([#2488](https://github.com/opensearch-project/OpenSearch/pull/2488)) +* [Remove] TrimUnsafeCommit logic for legacy 6.x indexes ([#2225](https://github.com/opensearch-project/OpenSearch/pull/2225)) +* Adjust main version after backport to 1.x ([#2147](https://github.com/opensearch-project/OpenSearch/pull/2147)) +* [Remove] CircuitBreaker Accounting ([#2056](https://github.com/opensearch-project/OpenSearch/pull/2056)) +* [Remove] Segment memory estimation and tracking ([#2029](https://github.com/opensearch-project/OpenSearch/pull/2029)) +* [Remove] index.merge.policy.max_merge_at_once_explicit ([#1988](https://github.com/opensearch-project/OpenSearch/pull/1988)) +* [Remove] Setting explicit version on analysis component ([#1986](https://github.com/opensearch-project/OpenSearch/pull/1986)) +* Wildcard max_expansion version check update ([#1980](https://github.com/opensearch-project/OpenSearch/pull/1980)) +* Removing lingering transportclient ([#1955](https://github.com/opensearch-project/OpenSearch/pull/1955)) +* [BWC] Ensure 2.x compatibility with Legacy 7.10.x ([#1902](https://github.com/opensearch-project/OpenSearch/pull/1902)) +* File name correction to follow existing convention ([#1874](https://github.com/opensearch-project/OpenSearch/pull/1874)) +* [Remove] Old Translog Checkpoint Format ([#1884](https://github.com/opensearch-project/OpenSearch/pull/1884)) +* Remove unwanted unreleased versions ([#1877](https://github.com/opensearch-project/OpenSearch/pull/1877)) +* replace with opensearch-http-channel and opensearch-http-server-channel ([#1799](https://github.com/opensearch-project/OpenSearch/pull/1799)) +* Add bwc version 1.2.4 ([#1796](https://github.com/opensearch-project/OpenSearch/pull/1796)) +* [Remove] various builder and mapping deprecations ([#1752](https://github.com/opensearch-project/OpenSearch/pull/1752)) +* [Remove] Remaining Flavor Serialization ([#1751](https://github.com/opensearch-project/OpenSearch/pull/1751)) +* [Remove] DynamicTemplate deprecations ([#1742](https://github.com/opensearch-project/OpenSearch/pull/1742)) +* [Remove] Analyzer Deprecations ([#1741](https://github.com/opensearch-project/OpenSearch/pull/1741)) +* Drop mocksocket & securemock dependencies from sniffer and rest client (no needed) ([#1174](https://github.com/opensearch-project/OpenSearch/pull/1174)) +* [BWC] Temporarily disable bwc testing while bumping 1.0.1 +* [DEPRECATE] SimpleFS in favor of NIOFS ([#1073](https://github.com/opensearch-project/OpenSearch/pull/1073)) +* Replace JCenter with Maven Central. ([#1057](https://github.com/opensearch-project/OpenSearch/pull/1057)) +* Restoring alpha/beta/rc version semantics ([#1112](https://github.com/opensearch-project/OpenSearch/pull/1112)) +* Remove `client/sniffer` from Javadoc exemption list ([#818](https://github.com/opensearch-project/OpenSearch/pull/818)) +* Removed pre-alpha notes. ([#815](https://github.com/opensearch-project/OpenSearch/pull/815)) +* Remove checks for legacy .yaml and .json config files. ([#792](https://github.com/opensearch-project/OpenSearch/pull/792)) +* Remove reference to an EC2 instance type. ([#812](https://github.com/opensearch-project/OpenSearch/pull/812)) +* Remove all elastic.co references from javadocs ([#586](https://github.com/opensearch-project/OpenSearch/pull/586)) +* Remove the oss string from OpenSearch distributions ([#575](https://github.com/opensearch-project/OpenSearch/pull/575)) +* [Rename] Remove final references to legacy keystore ([#514](https://github.com/opensearch-project/OpenSearch/pull/514)) +* changed Apache to Apache 2.0. Numbered principles +* fixed apache to apache 2.0 +* Replace nio and nitty test endpoint ([#475](https://github.com/opensearch-project/OpenSearch/pull/475)) +* [Rename] org.elasticsearch.client.documentation.SearchDocumentationIT.testSearchRequestSuggestions ([#467](https://github.com/opensearch-project/OpenSearch/pull/467)) + +### Refactoring + +* [Rename] Refactoring Elastic references in docker and kerberos builds (#428) ([#438](https://github.com/opensearch-project/OpenSearch/pull/438)) +* [Refactor] LuceneChangesSnapshot to use accurate ops history ([#2452](https://github.com/opensearch-project/OpenSearch/pull/2452)) +* Refactoring gated and ref-counted interfaces and their implementations ([#2396](https://github.com/opensearch-project/OpenSearch/pull/2396)) +* [Refactor] MapperService to QueryShardContext in valueFetcher ([#2027](https://github.com/opensearch-project/OpenSearch/pull/2027)) +* [Refactor] Lucene DataInput and DataOutput to StreamInput and StreamOutput ([#2035](https://github.com/opensearch-project/OpenSearch/pull/2035)) +* [Refactor] InternalEngine to always use soft deletes ([#1933](https://github.com/opensearch-project/OpenSearch/pull/1933)) +* Refactor LegacyESVersion tests from Version tests ([#1662](https://github.com/opensearch-project/OpenSearch/pull/1662)) +* Remove the IndexCommitRef class ([#2421](https://github.com/opensearch-project/OpenSearch/pull/2421)) +* Decouple IndexSettings from IncludeExclude ([#2860](https://github.com/opensearch-project/OpenSearch/pull/2860)) +* Clear up some confusing code in IndexShardHotSpotTests ([#1534](https://github.com/opensearch-project/OpenSearch/pull/1534)) +* Rename reference to project OpenSearch was forked from ([#2483](https://github.com/opensearch-project/OpenSearch/pull/2483)) +* Introduce RestHandler.Wrapper to help with delegate implementations ([#1004](https://github.com/opensearch-project/OpenSearch/pull/1004)) + +### Tests + +* Add type mapping removal bwc tests for indexing, searching, snapshots ([#2901](https://github.com/opensearch-project/OpenSearch/pull/2901)) +* Removing SLM check in tests for OpenSearch versions (#2604) ([#2620](https://github.com/opensearch-project/OpenSearch/pull/2620)) +* Use Hamcrest matchers and assertThat() in ReindexRenamedSettingTests ([#2503](https://github.com/opensearch-project/OpenSearch/pull/2503)) +* [Test-Failure] Mute TranslogPolicyIT ([#2342](https://github.com/opensearch-project/OpenSearch/pull/2342)) +* Added timeout to ensureGreen() for testClusterGreenAfterPartialRelocation ([#2074](https://github.com/opensearch-project/OpenSearch/pull/2074)) +* Stabilizing org.opensearch.cluster.routing.MovePrimaryFirstTests.test… ([#2048](https://github.com/opensearch-project/OpenSearch/pull/2048)) +* Added timeout to ensureGreen() for testClusterGreenAfterPartialRelocation ([#1983](https://github.com/opensearch-project/OpenSearch/pull/1983)) +* Add hook to execute logic before Integ test task starts ([#1969](https://github.com/opensearch-project/OpenSearch/pull/1969)) +* Remove transport client from tests. ([#1809](https://github.com/opensearch-project/OpenSearch/pull/1809)) +* [Tests] ClusterHealthIT:testHealthOnMasterFailover - Increase master node timeout ([#1812](https://github.com/opensearch-project/OpenSearch/pull/1812)) +* Ignore file order in test assertion ([#1755](https://github.com/opensearch-project/OpenSearch/pull/1755)) +* Integration test that checks for settings upgrade ([#1482](https://github.com/opensearch-project/OpenSearch/pull/1482)) +* [bwc] reenable bwc testing after syncing staged branches ([#1511](https://github.com/opensearch-project/OpenSearch/pull/1511)) +* [Tests] Translog Pruning tests to MetadataCreateIndexServiceTests ([#1295](https://github.com/opensearch-project/OpenSearch/pull/1295)) +* Reduce iterations to improve test run time ([#1168](https://github.com/opensearch-project/OpenSearch/pull/1168)) +* Tune datanode count and shards count to improve test run time ([#1170](https://github.com/opensearch-project/OpenSearch/pull/1170)) +* [BWC] Re-enable bwc testing after 1.0.1 version bump +* Add unit test for RestActionListener. Validate that onFailure() sends response even when BytesRestResponse can not be constructed using passed exception. Follow up on #923. ([#1024](https://github.com/opensearch-project/OpenSearch/pull/1024)) +* [TEST] Fix failing distro tests for linux packages ([#569](https://github.com/opensearch-project/OpenSearch/pull/569)) +* [TEST] Fix failing packaging tests for OpenSearch distributions. ([#541](https://github.com/opensearch-project/OpenSearch/pull/541)) +* Remove the references to xpack and elastic in tests. ([#516](https://github.com/opensearch-project/OpenSearch/pull/516)) From 79eb3b04922f4d4688372b93550ded9feffd7338 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Wed, 27 Apr 2022 06:07:35 -0700 Subject: [PATCH 128/653] Replace internal usages of 'master' term in 'server/src/main' directory (#2519) * Replace internal usages of 'master' terminology in server/src/main directory Signed-off-by: Tianli Feng * Restore rename DISCOVERED_MASTER in ClusterHealthResponse Signed-off-by: Tianli Feng * Rename two methods in unit tests Signed-off-by: Tianli Feng * Replace master word in ClusterState Signed-off-by: Tianli Feng * Replace master word in LeaderChecker JoinHelper JoinTaskExecutor Signed-off-by: Tianli Feng * Replace master word in more classes Signed-off-by: Tianli Feng * Replace master word in more classes Signed-off-by: Tianli Feng * Replace master word in more classes Signed-off-by: Tianli Feng * Replace master word in more classes Signed-off-by: Tianli Feng * Replace master word in more classes Signed-off-by: Tianli Feng * Replace master word in more classes Signed-off-by: Tianli Feng * Replace master word in DiscoveryNodes classes Signed-off-by: Tianli Feng * Replace master word in more classes Signed-off-by: Tianli Feng * Correct mistakes Signed-off-by: Tianli Feng * Adjust format by spotlessApply task Signed-off-by: Tianli Feng * Change MASTER__NODE_BOOTSTRAPPED_MSG in test Signed-off-by: Tianli Feng * Fix SnapshotDisruptionIT by renaming to cluster-manager Signed-off-by: Tianli Feng --- .../UnsafeBootstrapAndDetachCommandIT.java | 4 +- .../discovery/SnapshotDisruptionIT.java | 4 +- .../env/NodeRepurposeCommandIT.java | 2 +- .../org/opensearch/OpenSearchException.java | 4 +- ...ansportClusterAllocationExplainAction.java | 2 +- .../cluster/health/ClusterHealthResponse.java | 4 +- .../health/TransportClusterHealthAction.java | 4 +- .../TransportCleanupRepositoryAction.java | 8 +- .../TransportClusterUpdateSettingsAction.java | 8 +- .../restore/RestoreClusterStateListener.java | 4 +- .../TransportSnapshotsStatusAction.java | 6 +- .../cluster/state/ClusterStateResponse.java | 10 +- .../state/TransportClusterStateAction.java | 2 +- .../stats/ClusterStatsNodeResponse.java | 2 +- .../cluster/stats/ClusterStatsResponse.java | 4 +- .../admin/indices/dangling/package-info.java | 4 +- .../bulk/BulkPrimaryExecutionContext.java | 6 +- .../action/bulk/MappingUpdatePerformer.java | 2 +- .../action/bulk/TransportShardBulkAction.java | 2 +- .../node/TransportBroadcastByNodeAction.java | 6 +- .../master/AcknowledgedRequestBuilder.java | 2 +- .../MasterNodeOperationRequestBuilder.java | 6 +- ...MasterNodeReadOperationRequestBuilder.java | 2 +- .../support/master/MasterNodeReadRequest.java | 6 +- .../support/master/MasterNodeRequest.java | 6 +- .../master/TransportMasterNodeAction.java | 14 +- .../master/TransportMasterNodeReadAction.java | 2 +- .../action/support/nodes/BaseNodeRequest.java | 2 +- .../replication/ReplicationOperation.java | 3 +- .../action/update/TransportUpdateAction.java | 2 +- .../cluster/ClusterChangedEvent.java | 8 +- .../org/opensearch/cluster/ClusterState.java | 39 ++--- .../cluster/ClusterStateObserver.java | 17 +-- .../cluster/ClusterStateTaskExecutor.java | 2 +- .../cluster/ClusterStateTaskListener.java | 4 +- .../cluster/ClusterStateUpdateTask.java | 2 +- .../cluster/LocalClusterUpdateTask.java | 2 +- .../cluster/LocalNodeMasterListener.java | 24 ++-- .../cluster/MasterNodeChangePredicate.java | 10 +- .../cluster/NodeConnectionsService.java | 2 +- .../cluster/NotMasterException.java | 6 +- .../cluster/SnapshotsInProgress.java | 6 +- .../opensearch/cluster/ack/AckedRequest.java | 2 +- .../ack/ClusterStateUpdateRequest.java | 4 +- .../action/index/MappingUpdatedAction.java | 6 +- .../index/NodeMappingRefreshAction.java | 8 +- .../action/shard/ShardStateAction.java | 43 +++--- .../coordination/ApplyCommitRequest.java | 2 +- .../coordination/ClusterBootstrapService.java | 8 +- .../coordination/ClusterStatePublisher.java | 4 +- .../coordination/CoordinationState.java | 6 +- .../cluster/coordination/Coordinator.java | 136 +++++++++--------- .../cluster/coordination/JoinHelper.java | 19 ++- .../cluster/coordination/JoinRequest.java | 8 +- .../coordination/JoinTaskExecutor.java | 27 ++-- .../cluster/coordination/LagDetector.java | 2 +- .../cluster/coordination/LeaderChecker.java | 6 +- .../coordination/NoMasterBlockService.java | 20 +-- .../NodeRemovalClusterStateTaskExecutor.java | 2 +- .../cluster/coordination/PeersResponse.java | 24 ++-- .../PublicationTransportHandler.java | 7 +- .../PublishClusterStateStats.java | 4 +- .../cluster/coordination/PublishRequest.java | 2 +- .../cluster/coordination/Reconfigurator.java | 39 ++--- .../UnsafeBootstrapMasterCommand.java | 14 +- .../metadata/MetadataIndexUpgradeService.java | 2 +- .../SystemIndexMetadataUpgradeService.java | 8 +- .../cluster/node/DiscoveryNode.java | 4 +- .../cluster/node/DiscoveryNodes.java | 104 +++++++------- .../routing/BatchedRerouteService.java | 4 +- .../cluster/routing/RecoverySource.java | 2 +- .../cluster/routing/UnassignedInfo.java | 2 +- .../allocation/ExistingShardsAllocator.java | 2 +- .../service/ClusterApplierService.java | 2 +- .../cluster/service/ClusterService.java | 2 +- .../cluster/service/MasterService.java | 33 +++-- .../settings/ConsistentSettingsService.java | 8 +- ...AckClusterStatePublishResponseHandler.java | 6 +- ...ingClusterStatePublishResponseHandler.java | 10 +- .../org/opensearch/discovery/Discovery.java | 2 +- .../opensearch/discovery/DiscoveryModule.java | 4 +- .../org/opensearch/discovery/PeerFinder.java | 4 +- .../opensearch/env/NodeRepurposeCommand.java | 12 +- .../gateway/DanglingIndicesState.java | 2 +- .../opensearch/gateway/GatewayMetaState.java | 8 +- .../opensearch/gateway/GatewayService.java | 32 +++-- .../gateway/LocalAllocateDangledIndices.java | 8 +- .../index/mapper/DocumentParser.java | 2 +- .../index/mapper/MapperService.java | 4 +- .../index/seqno/ReplicationTracker.java | 18 +-- .../index/shard/IndexEventListener.java | 2 +- .../opensearch/index/shard/IndexShard.java | 14 +- .../opensearch/index/shard/StoreRecovery.java | 2 +- .../cluster/IndicesClusterStateService.java | 28 ++-- .../indices/recovery/RecoverySettings.java | 2 +- .../indices/recovery/RecoveryTarget.java | 2 +- .../TransportNodesListShardStoreMetadata.java | 4 +- .../main/java/org/opensearch/node/Node.java | 4 +- .../PersistentTasksClusterService.java | 4 +- .../PersistentTasksNodeService.java | 16 +-- .../persistent/PersistentTasksService.java | 10 +- .../opensearch/persistent/package-info.java | 4 +- .../repositories/RepositoriesService.java | 12 +- .../opensearch/repositories/Repository.java | 8 +- .../blobstore/BlobStoreRepository.java | 24 ++-- .../repositories/blobstore/package-info.java | 18 +-- .../repositories/fs/FsRepository.java | 2 +- .../admin/indices/RestGetAliasesAction.java | 2 +- .../rest/action/cat/RestIndicesAction.java | 12 +- .../rest/action/cat/RestMasterAction.java | 12 +- .../rest/action/cat/RestNodesAction.java | 4 +- .../InternalSnapshotsInfoService.java | 4 +- .../opensearch/snapshots/RestoreService.java | 2 +- .../snapshots/SnapshotShardsService.java | 28 ++-- .../snapshots/SnapshotsService.java | 81 ++++++----- .../opensearch/snapshots/package-info.java | 22 +-- .../transport/ConnectionProfile.java | 2 +- .../TransportRequestDeduplicator.java | 2 +- .../action/shard/ShardStateActionTests.java | 4 +- .../coordination/LeaderCheckerTests.java | 2 +- .../cluster/coordination/NodeJoinTests.java | 6 +- .../cluster/node/DiscoveryNodesTests.java | 4 +- .../service/ClusterApplierServiceTests.java | 4 +- .../ConsistentSettingsServiceTests.java | 16 +-- .../opensearch/discovery/PeerFinderTests.java | 2 +- .../env/NodeRepurposeCommandTests.java | 2 +- .../CoordinationStateTestCluster.java | 4 +- 127 files changed, 679 insertions(+), 625 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java index 292469c6e7b79..f976ffdbe8ad5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java @@ -111,7 +111,7 @@ private MockTerminal executeCommand( private MockTerminal unsafeBootstrap(Environment environment, boolean abort, Boolean applyClusterReadOnlyBlock) throws Exception { final MockTerminal terminal = executeCommand(new UnsafeBootstrapMasterCommand(), environment, 0, abort, applyClusterReadOnlyBlock); assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.CONFIRMATION_MSG)); - assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.MASTER_NODE_BOOTSTRAPPED_MSG)); + assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.CLUSTER_MANAGER_NODE_BOOTSTRAPPED_MSG)); return terminal; } @@ -171,7 +171,7 @@ public void testBootstrapNotMasterEligible() { final Environment environment = TestEnvironment.newEnvironment( Settings.builder().put(nonMasterNode(internalCluster().getDefaultSettings())).build() ); - expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapMasterCommand.NOT_MASTER_NODE_MSG); + expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapMasterCommand.NOT_CLUSTER_MANAGER_NODE_MSG); } public void testBootstrapNoDataFolder() { diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/SnapshotDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/SnapshotDisruptionIT.java index 086aeb695c411..e6ddfd94871ce 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/SnapshotDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/SnapshotDisruptionIT.java @@ -248,7 +248,7 @@ public void testMasterFailOverDuringShardSnapshots() throws Exception { blockDataNode(repoName, dataNode); - logger.info("--> create snapshot via master node client"); + logger.info("--> create snapshot via cluster-manager node client"); final ActionFuture snapshotResponse = internalCluster().masterClient() .admin() .cluster() @@ -272,7 +272,7 @@ public void testMasterFailOverDuringShardSnapshots() throws Exception { SnapshotException.class, () -> snapshotResponse.actionGet(TimeValue.timeValueSeconds(30L)) ); - assertThat(sne.getMessage(), endsWith("no longer master")); + assertThat(sne.getMessage(), endsWith("no longer cluster-manager")); } private void assertSnapshotExists(String repository, String snapshot) { diff --git a/server/src/internalClusterTest/java/org/opensearch/env/NodeRepurposeCommandIT.java b/server/src/internalClusterTest/java/org/opensearch/env/NodeRepurposeCommandIT.java index 2547333490f23..e2bbd0ee13db3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/env/NodeRepurposeCommandIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/env/NodeRepurposeCommandIT.java @@ -136,7 +136,7 @@ private void executeRepurposeCommand(Settings settings, int expectedIndexCount, boolean verbose = randomBoolean(); Settings settingsWithPath = Settings.builder().put(internalCluster().getDefaultSettings()).put(settings).build(); Matcher matcher = allOf( - containsString(NodeRepurposeCommand.noMasterMessage(expectedIndexCount, expectedShardCount, 0)), + containsString(NodeRepurposeCommand.noClusterManagerMessage(expectedIndexCount, expectedShardCount, 0)), NodeRepurposeCommandTests.conditionalNot(containsString("test-repurpose"), verbose == false) ); NodeRepurposeCommandTests.verifySuccess(settingsWithPath, matcher, verbose); diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java index 8d3e2569957f1..5a9e5b91982a2 100644 --- a/server/src/main/java/org/opensearch/OpenSearchException.java +++ b/server/src/main/java/org/opensearch/OpenSearchException.java @@ -785,7 +785,7 @@ private enum OpenSearchExceptionHandle { 2, UNKNOWN_VERSION_ADDED ), - MASTER_NOT_DISCOVERED_EXCEPTION( + CLUSTER_MANAGER_NOT_DISCOVERED_EXCEPTION( org.opensearch.discovery.MasterNotDiscoveredException.class, org.opensearch.discovery.MasterNotDiscoveredException::new, 3, @@ -1496,7 +1496,7 @@ private enum OpenSearchExceptionHandle { 143, UNKNOWN_VERSION_ADDED ), - NOT_MASTER_EXCEPTION( + NOT_CLUSTER_MANAGER_EXCEPTION( org.opensearch.cluster.NotMasterException.class, org.opensearch.cluster.NotMasterException::new, 144, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java index 95fbe42384238..baa2ce0847501 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java @@ -64,7 +64,7 @@ /** * The {@code TransportClusterAllocationExplainAction} is responsible for actually executing the explanation of a shard's allocation on the - * master node in the cluster. + * cluster-manager node in the cluster. */ public class TransportClusterAllocationExplainAction extends TransportMasterNodeAction< ClusterAllocationExplainRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java index 841231c971eaa..ce731fd1c8aca 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java @@ -90,7 +90,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo // ClusterStateHealth fields int numberOfNodes = (int) parsedObjects[i++]; int numberOfDataNodes = (int) parsedObjects[i++]; - boolean hasDiscoveredMaster = Boolean.TRUE.equals(parsedObjects[i++]); + boolean hasDiscoveredClusterManager = Boolean.TRUE.equals(parsedObjects[i++]); int activeShards = (int) parsedObjects[i++]; int relocatingShards = (int) parsedObjects[i++]; int activePrimaryShards = (int) parsedObjects[i++]; @@ -118,7 +118,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo unassignedShards, numberOfNodes, numberOfDataNodes, - hasDiscoveredMaster, + hasDiscoveredClusterManager, activeShardsPercent, status, indices diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java index 6855803ba6c45..98c264e54a1d0 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -218,11 +218,11 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS @Override public void onNoLongerMaster(String source) { logger.trace( - "stopped being master while waiting for events with priority [{}]. retrying.", + "stopped being cluster-manager while waiting for events with priority [{}]. retrying.", request.waitForEvents() ); // TransportMasterNodeAction implements the retry logic, which is triggered by passing a NotMasterException - listener.onFailure(new NotMasterException("no longer master. source: [" + source + "]")); + listener.onFailure(new NotMasterException("no longer cluster-manager. source: [" + source + "]")); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java index b7b9da675a385..c56b2fd2b2205 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java @@ -73,12 +73,12 @@ *

    *
  1. Check that there are no running repository cleanup, snapshot create, or snapshot delete actions * and add an entry for the repository that is to be cleaned up to {@link RepositoryCleanupInProgress}
  2. - *
  3. Run cleanup actions on the repository. Note, these are executed exclusively on the master node. + *
  4. Run cleanup actions on the repository. Note, these are executed exclusively on the cluster-manager node. * For the precise operations execute see {@link BlobStoreRepository#cleanup}
  5. *
  6. Remove the entry in {@link RepositoryCleanupInProgress} in the first step.
  7. *
* - * On master failover during the cleanup operation it is simply removed from the cluster state. This is safe because the logic in + * On cluster-manager failover during the cleanup operation it is simply removed from the cluster state. This is safe because the logic in * {@link BlobStoreRepository#cleanup} ensures that the repository state id has not changed between creation of the cluster state entry * and any delete/write operations. TODO: This will not work if we also want to clean up at the shard level as those will involve writes * as well as deletes. @@ -119,7 +119,7 @@ public TransportCleanupRepositoryAction( ); this.repositoriesService = repositoriesService; this.snapshotsService = snapshotsService; - // We add a state applier that will remove any dangling repository cleanup actions on master failover. + // We add a state applier that will remove any dangling repository cleanup actions on cluster-manager failover. // This is safe to do since cleanups will increment the repository state id before executing any operations to prevent concurrent // operations from corrupting the repository. This is the same safety mechanism used by snapshot deletes. if (DiscoveryNode.isMasterNode(clusterService.getSettings())) { @@ -136,7 +136,7 @@ private static void addClusterStateApplier(ClusterService clusterService) { return; } clusterService.submitStateUpdateTask( - "clean up repository cleanup task after master failover", + "clean up repository cleanup task after cluster-manager failover", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 6d479431e1a94..3bfdf2a0cbd5a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -155,11 +155,11 @@ public void onAckTimeout() { } private void reroute(final boolean updateSettingsAcked) { - // We're about to send a second update task, so we need to check if we're still the elected master - // For example the minimum_master_node could have been breached and we're no longer elected master, + // We're about to send a second update task, so we need to check if we're still the elected cluster-manager + // For example the minimum_master_node could have been breached and we're no longer elected cluster-manager, // so we should *not* execute the reroute. if (!clusterService.state().nodes().isLocalNodeElectedMaster()) { - logger.debug("Skipping reroute after cluster update settings, because node is no longer master"); + logger.debug("Skipping reroute after cluster update settings, because node is no longer cluster-manager"); listener.onResponse( new ClusterUpdateSettingsResponse( updateSettingsAcked, @@ -198,7 +198,7 @@ protected ClusterUpdateSettingsResponse newResponse(boolean acknowledged) { @Override public void onNoLongerMaster(String source) { logger.debug( - "failed to preform reroute after cluster settings were updated - current node is no longer a master" + "failed to preform reroute after cluster settings were updated - current node is no longer a cluster-manager" ); listener.onResponse( new ClusterUpdateSettingsResponse( diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java index e596348127faf..cb6f8493551f6 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java @@ -69,8 +69,8 @@ public void clusterChanged(ClusterChangedEvent changedEvent) { final RestoreInProgress.Entry prevEntry = restoreInProgress(changedEvent.previousState(), uuid); final RestoreInProgress.Entry newEntry = restoreInProgress(changedEvent.state(), uuid); if (prevEntry == null) { - // When there is a master failure after a restore has been started, this listener might not be registered - // on the current master and as such it might miss some intermediary cluster states due to batching. + // When there is a cluster-manager failure after a restore has been started, this listener might not be registered + // on the current cluster-manager and as such it might miss some intermediary cluster states due to batching. // Clean up listener in that case and acknowledge completion of restore operation to client. clusterService.removeListener(this); listener.onResponse(new RestoreSnapshotResponse((RestoreInfo) null)); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index 33d4ac5d50347..1e29a70e1f41f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -221,8 +221,8 @@ private void buildResponse( // Unlikely edge case: // Data node has finished snapshotting the shard but the cluster state has not yet been updated // to reflect this. We adjust the status to show up as snapshot metadata being written because - // technically if the data node failed before successfully reporting DONE state to master, then - // this shards state would jump to a failed state. + // technically if the data node failed before successfully reporting DONE state to cluster-manager, + // then this shards state would jump to a failed state. shardStatus = new SnapshotIndexShardStatus( shardEntry.key, SnapshotIndexShardStage.FINALIZE, @@ -406,7 +406,7 @@ private SnapshotInfo snapshot(SnapshotsInProgress snapshotsInProgress, String re /** * Returns status of shards currently finished snapshots *

- * This method is executed on master node and it's complimentary to the + * This method is executed on cluster-manager node and it's complimentary to the * {@link SnapshotShardsService#currentSnapshotShards(Snapshot)} because it * returns similar information but for already finished snapshots. *

diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java index d2f053137e446..80d1f7022967d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java @@ -108,20 +108,20 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; ClusterStateResponse response = (ClusterStateResponse) o; return waitForTimedOut == response.waitForTimedOut && Objects.equals(clusterName, response.clusterName) && - // Best effort. Only compare cluster state version and master node id, + // Best effort. Only compare cluster state version and cluster-manager node id, // because cluster state doesn't implement equals() Objects.equals(getVersion(clusterState), getVersion(response.clusterState)) - && Objects.equals(getMasterNodeId(clusterState), getMasterNodeId(response.clusterState)); + && Objects.equals(getClusterManagerNodeId(clusterState), getClusterManagerNodeId(response.clusterState)); } @Override public int hashCode() { - // Best effort. Only use cluster state version and master node id, + // Best effort. Only use cluster state version and cluster-manager node id, // because cluster state doesn't implement hashcode() - return Objects.hash(clusterName, getVersion(clusterState), getMasterNodeId(clusterState), waitForTimedOut); + return Objects.hash(clusterName, getVersion(clusterState), getClusterManagerNodeId(clusterState), waitForTimedOut); } - private static String getMasterNodeId(ClusterState clusterState) { + private static String getClusterManagerNodeId(ClusterState clusterState) { if (clusterState == null) { return null; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java index 42497c5244167..595127d83d4bf 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -138,7 +138,7 @@ public void onNewClusterState(ClusterState newState) { } else { listener.onFailure( new NotMasterException( - "master stepped down waiting for metadata version " + request.waitForMetadataVersion() + "cluster-manager stepped down waiting for metadata version " + request.waitForMetadataVersion() ) ); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java index 7607a2ef70980..01d4d5ac0fb53 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java @@ -85,7 +85,7 @@ public NodeStats nodeStats() { } /** - * Cluster Health Status, only populated on master nodes. + * Cluster Health Status, only populated on cluster-manager nodes. */ @Nullable public ClusterHealthStatus clusterStatus() { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java index 1470f252756a5..172159a1efe5b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java @@ -60,7 +60,7 @@ public class ClusterStatsResponse extends BaseNodesResponse *
  • A user overflows the index graveyard by deleting more than 500 indices while a node is offline and then the node rejoins the * cluster
  • - *
  • A node (unsafely) moves from one cluster to another, perhaps because the original cluster lost all its master nodes
  • + *
  • A node (unsafely) moves from one cluster to another, perhaps because the original cluster lost all its cluster-manager nodes
  • *
  • A user (unsafely) meddles with the contents of the data path, maybe restoring an old index folder from a backup
  • *
  • A disk partially fails and the user has no replicas and no snapshots and wants to (unsafely) recover whatever they can
  • - *
  • A cluster loses all master nodes and those are (unsafely) restored from backup, but the backup does not contain the index
  • + *
  • A cluster loses all cluster-manager nodes and those are (unsafely) restored from backup, but the backup does not contain the index
  • * * *

    The classes in this package form an API for managing dangling indices, allowing them to be listed, imported or deleted. diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkPrimaryExecutionContext.java b/server/src/main/java/org/opensearch/action/bulk/BulkPrimaryExecutionContext.java index da8833fe49a29..17bfa082295af 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkPrimaryExecutionContext.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkPrimaryExecutionContext.java @@ -61,7 +61,7 @@ enum ItemProcessingState { TRANSLATED, /** * the request can not execute with the current mapping and should wait for a new mapping - * to arrive from the master. A mapping request for the needed changes has already been + * to arrive from the cluster-manager. A mapping request for the needed changes has already been * submitted */ WAIT_FOR_MAPPING_UPDATE, @@ -144,7 +144,7 @@ public boolean isOperationExecuted() { return currentItemState == ItemProcessingState.EXECUTED; } - /** returns true if the request needs to wait for a mapping update to arrive from the master */ + /** returns true if the request needs to wait for a mapping update to arrive from the cluster-manager */ public boolean requiresWaitingForMappingUpdate() { return currentItemState == ItemProcessingState.WAIT_FOR_MAPPING_UPDATE; } @@ -216,7 +216,7 @@ public > T getRequestToExecute() { return (T) requestToExecute; } - /** indicates that the current operation can not be completed and needs to wait for a new mapping from the master */ + /** indicates that the current operation can not be completed and needs to wait for a new mapping from the cluster-manager */ public void markAsRequiringMappingUpdate() { assert assertInvariants(ItemProcessingState.TRANSLATED); currentItemState = ItemProcessingState.WAIT_FOR_MAPPING_UPDATE; diff --git a/server/src/main/java/org/opensearch/action/bulk/MappingUpdatePerformer.java b/server/src/main/java/org/opensearch/action/bulk/MappingUpdatePerformer.java index c0eb29e4c112f..ebfe82eb6ed38 100644 --- a/server/src/main/java/org/opensearch/action/bulk/MappingUpdatePerformer.java +++ b/server/src/main/java/org/opensearch/action/bulk/MappingUpdatePerformer.java @@ -39,7 +39,7 @@ public interface MappingUpdatePerformer { /** - * Update the mappings on the master. + * Update the mappings on the cluster-manager. */ void updateMappings(Mapping update, ShardId shardId, ActionListener listener); diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java index cc9f20b7aa256..5311186fee0dc 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java @@ -621,7 +621,7 @@ private static Engine.Result performOpOnReplica( throw new IllegalStateException("Unexpected request operation type on replica: " + docWriteRequest.opType().getLowercase()); } if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) { - // Even though the primary waits on all nodes to ack the mapping changes to the master + // Even though the primary waits on all nodes to ack the mapping changes to the cluster-manager // (see MappingUpdatedAction.updateMappingOnMaster) we still need to protect against missing mappings // and wait for them. The reason is concurrent requests. Request r1 which has new field f triggers a // mapping update. Assume that that update is first applied on the primary, and only later on the replica diff --git a/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index 3950f3a9fef77..6eb3c7a0cfe89 100644 --- a/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -310,9 +310,9 @@ protected AsyncAction(Task task, Request request, ActionListener liste for (ShardRouting shard : shardIt) { // send a request to the shard only if it is assigned to a node that is in the local node's cluster state // a scenario in which a shard can be assigned but to a node that is not in the local node's cluster state - // is when the shard is assigned to the master node, the local node has detected the master as failed - // and a new master has not yet been elected; in this situation the local node will have removed the - // master node from the local cluster state, but the shards assigned to the master will still be in the + // is when the shard is assigned to the cluster-manager node, the local node has detected the cluster-manager as failed + // and a new cluster-manager has not yet been elected; in this situation the local node will have removed the + // cluster-manager node from the local cluster state, but the shards assigned to the cluster-manager will still be in the // routing table as such if (shard.assignedToNode() && nodes.get(shard.currentNodeId()) != null) { String nodeId = shard.currentNodeId(); diff --git a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequestBuilder.java index 9337e646cebea..daed5a09bb0f3 100644 --- a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequestBuilder.java @@ -36,7 +36,7 @@ import org.opensearch.common.unit.TimeValue; /** - * Base request builder for master node operations that support acknowledgements + * Base request builder for cluster-manager node operations that support acknowledgements */ public abstract class AcknowledgedRequestBuilder< Request extends AcknowledgedRequest, diff --git a/server/src/main/java/org/opensearch/action/support/master/MasterNodeOperationRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/master/MasterNodeOperationRequestBuilder.java index b39ec5fe4cc6b..98996e222b30a 100644 --- a/server/src/main/java/org/opensearch/action/support/master/MasterNodeOperationRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/support/master/MasterNodeOperationRequestBuilder.java @@ -39,7 +39,7 @@ import org.opensearch.common.unit.TimeValue; /** - * Base request builder for master node operations + * Base request builder for cluster-manager node operations */ public abstract class MasterNodeOperationRequestBuilder< Request extends MasterNodeRequest, @@ -53,7 +53,7 @@ protected MasterNodeOperationRequestBuilder(OpenSearchClient client, ActionType< } /** - * Sets the master node timeout in case the master has not yet been discovered. + * Sets the cluster-manager node timeout in case the cluster-manager has not yet been discovered. */ @SuppressWarnings("unchecked") public final RequestBuilder setMasterNodeTimeout(TimeValue timeout) { @@ -62,7 +62,7 @@ public final RequestBuilder setMasterNodeTimeout(TimeValue timeout) { } /** - * Sets the master node timeout in case the master has not yet been discovered. + * Sets the cluster-manager node timeout in case the cluster-manager has not yet been discovered. */ @SuppressWarnings("unchecked") public final RequestBuilder setMasterNodeTimeout(String timeout) { diff --git a/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadOperationRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadOperationRequestBuilder.java index add5c5177df42..99e6b37debd8f 100644 --- a/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadOperationRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadOperationRequestBuilder.java @@ -37,7 +37,7 @@ import org.opensearch.client.OpenSearchClient; /** - * Base request builder for master node read operations that can be executed on the local node as well + * Base request builder for cluster-manager node read operations that can be executed on the local node as well */ public abstract class MasterNodeReadOperationRequestBuilder< Request extends MasterNodeReadRequest, diff --git a/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadRequest.java b/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadRequest.java index eeafa148ca7c3..9842c47652a97 100644 --- a/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadRequest.java +++ b/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadRequest.java @@ -38,7 +38,7 @@ import java.io.IOException; /** - * Base request for master based read operations that allows to read the cluster state from the local node if needed + * Base request for cluster-manager based read operations that allows to read the cluster state from the local node if needed */ public abstract class MasterNodeReadRequest> extends MasterNodeRequest { @@ -64,9 +64,9 @@ public final Request local(boolean local) { } /** - * Return local information, do not retrieve the state from master node (default: false). + * Return local information, do not retrieve the state from cluster-manager node (default: false). * @return true if local information is to be returned; - * false if information is to be retrieved from master node (default). + * false if information is to be retrieved from cluster-manager node (default). */ public final boolean local() { return local; diff --git a/server/src/main/java/org/opensearch/action/support/master/MasterNodeRequest.java b/server/src/main/java/org/opensearch/action/support/master/MasterNodeRequest.java index d5be6c48e23b8..f7ea962f7c4a1 100644 --- a/server/src/main/java/org/opensearch/action/support/master/MasterNodeRequest.java +++ b/server/src/main/java/org/opensearch/action/support/master/MasterNodeRequest.java @@ -40,7 +40,7 @@ import java.io.IOException; /** - * A based request for master based operation. + * A based request for cluster-manager based operation. */ public abstract class MasterNodeRequest> extends ActionRequest { @@ -62,7 +62,7 @@ public void writeTo(StreamOutput out) throws IOException { } /** - * A timeout value in case the master has not been discovered yet or disconnected. + * A timeout value in case the cluster-manager has not been discovered yet or disconnected. */ @SuppressWarnings("unchecked") public final Request masterNodeTimeout(TimeValue timeout) { @@ -71,7 +71,7 @@ public final Request masterNodeTimeout(TimeValue timeout) { } /** - * A timeout value in case the master has not been discovered yet or disconnected. + * A timeout value in case the cluster-manager has not been discovered yet or disconnected. */ public final Request masterNodeTimeout(String timeout) { return masterNodeTimeout(TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".masterNodeTimeout")); diff --git a/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeAction.java b/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeAction.java index 62d08c23534af..083bea079174c 100644 --- a/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeAction.java @@ -67,7 +67,7 @@ import java.util.function.Predicate; /** - * A base class for operations that needs to be performed on the master node. + * A base class for operations that needs to be performed on the cluster-manager node. */ public abstract class TransportMasterNodeAction, Response extends ActionResponse> extends HandledTransportAction { @@ -198,13 +198,13 @@ protected void doStart(ClusterState clusterState) { } } else { if (nodes.getMasterNode() == null) { - logger.debug("no known master node, scheduling a retry"); + logger.debug("no known cluster-manager node, scheduling a retry"); retryOnMasterChange(clusterState, null); } else { - DiscoveryNode masterNode = nodes.getMasterNode(); - final String actionName = getMasterActionName(masterNode); + DiscoveryNode clusterManagerNode = nodes.getMasterNode(); + final String actionName = getMasterActionName(clusterManagerNode); transportService.sendRequest( - masterNode, + clusterManagerNode, actionName, request, new ActionListenerResponseHandler(listener, TransportMasterNodeAction.this::read) { @@ -213,7 +213,7 @@ public void handleException(final TransportException exp) { Throwable cause = exp.unwrapCause(); if (cause instanceof ConnectTransportException || (exp instanceof RemoteTransportException && cause instanceof NodeClosedException)) { - // we want to retry here a bit to see if a new master is elected + // we want to retry here a bit to see if a new cluster-manager is elected logger.debug( "connection exception while trying to forward request with action name [{}] to " + "master node [{}], scheduling a retry. Error: [{}]", @@ -279,7 +279,7 @@ public void onTimeout(TimeValue timeout) { } /** - * Allows to conditionally return a different master node action name in the case an action gets renamed. + * Allows to conditionally return a different cluster-manager node action name in the case an action gets renamed. * This mainly for backwards compatibility should be used rarely */ protected String getMasterActionName(DiscoveryNode node) { diff --git a/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeReadAction.java b/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeReadAction.java index b230901eb456e..b8be63dd6564b 100644 --- a/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeReadAction.java +++ b/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeReadAction.java @@ -41,7 +41,7 @@ import org.opensearch.transport.TransportService; /** - * A base class for read operations that needs to be performed on the master node. + * A base class for read operations that needs to be performed on the cluster-manager node. * Can also be executed on the local node if needed. */ public abstract class TransportMasterNodeReadAction, Response extends ActionResponse> extends diff --git a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeRequest.java b/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeRequest.java index e91a659d331d1..27d9cb8b6c002 100644 --- a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeRequest.java +++ b/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeRequest.java @@ -39,7 +39,7 @@ import java.io.IOException; -// TODO: this class can be removed in master once 7.x is bumped to 7.4.0 +// TODO: this class can be removed in main once 7.x is bumped to 7.4.0 public abstract class BaseNodeRequest extends TransportRequest { public BaseNodeRequest() {} diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java index 68c5416f3603e..f7fd6acf8be23 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java @@ -263,7 +263,8 @@ public void onFailure(Exception replicaException) { ), replicaException ); - // Only report "critical" exceptions - TODO: Reach out to the master node to get the latest shard state then report. + // Only report "critical" exceptions + // TODO: Reach out to the cluster-manager node to get the latest shard state then report. if (TransportActions.isShardNotAvailableException(replicaException) == false) { RestStatus restStatus = ExceptionsHelper.status(replicaException); shardReplicaFailures.add( diff --git a/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java b/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java index 387c0d24ed4df..e554ebc0f8414 100644 --- a/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java +++ b/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java @@ -154,7 +154,7 @@ protected void doExecute(Task task, final UpdateRequest request, final ActionLis request.index() ); } - // if we don't have a master, we don't have metadata, that's fine, let it find a master using create index API + // if we don't have a master, we don't have metadata, that's fine, let it find a cluster-manager using create index API if (autoCreateIndex.shouldAutoCreate(request.index(), clusterService.state())) { client.admin() .indices() diff --git a/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java b/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java index db54016a61f6d..387a27da46820 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java @@ -248,7 +248,7 @@ public boolean nodesChanged() { * Determines whether or not the current cluster state represents an entirely * new cluster, either when a node joins a cluster for the first time or when * the node receives a cluster state update from a brand new cluster (different - * UUID from the previous cluster), which will happen when a master node is + * UUID from the previous cluster), which will happen when a cluster-manager node is * elected that has never been part of the cluster before. */ public boolean isNewCluster() { @@ -260,10 +260,10 @@ public boolean isNewCluster() { // Get the deleted indices by comparing the index metadatas in the previous and new cluster states. // If an index exists in the previous cluster state, but not in the new cluster state, it must have been deleted. private List indicesDeletedFromClusterState() { - // If the new cluster state has a new cluster UUID, the likely scenario is that a node was elected - // master that has had its data directory wiped out, in which case we don't want to delete the indices and lose data; + // If the new cluster state has a new cluster UUID, the likely scenario is that a node was elected cluster-manager + // that has had its data directory wiped out, in which case we don't want to delete the indices and lose data; // rather we want to import them as dangling indices instead. So we check here if the cluster UUID differs from the previous - // cluster UUID, in which case, we don't want to delete indices that the master erroneously believes shouldn't exist. + // cluster UUID, in which case, we don't want to delete indices that the cluster-manager erroneously believes shouldn't exist. // See test DiscoveryWithServiceDisruptionsIT.testIndicesDeleted() // See discussion on https://github.com/elastic/elasticsearch/pull/9952 and // https://github.com/elastic/elasticsearch/issues/11665 diff --git a/server/src/main/java/org/opensearch/cluster/ClusterState.java b/server/src/main/java/org/opensearch/cluster/ClusterState.java index 459c0b9502acf..3eaac99bad998 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterState.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterState.java @@ -81,7 +81,7 @@ *

    * The cluster state object is immutable with the exception of the {@link RoutingNodes} structure, which is * built on demand from the {@link RoutingTable}. - * The cluster state can be updated only on the master node. All updates are performed by on a + * The cluster state can be updated only on the cluster-manager node. All updates are performed by on a * single thread and controlled by the {@link ClusterService}. After every update the * {@link Discovery#publish} method publishes a new version of the cluster state to all other nodes in the * cluster. The actual publishing mechanism is delegated to the {@link Discovery#publish} method and depends on @@ -169,7 +169,7 @@ default boolean isPrivate() { private final boolean wasReadFromDiff; - private final int minimumMasterNodesOnPublishingMaster; + private final int minimumClusterManagerNodesOnPublishingClusterManager; // built on demand private volatile RoutingNodes routingNodes; @@ -198,7 +198,7 @@ public ClusterState( DiscoveryNodes nodes, ClusterBlocks blocks, ImmutableOpenMap customs, - int minimumMasterNodesOnPublishingMaster, + int minimumClusterManagerNodesOnPublishingClusterManager, boolean wasReadFromDiff ) { this.version = version; @@ -209,7 +209,7 @@ public ClusterState( this.nodes = nodes; this.blocks = blocks; this.customs = customs; - this.minimumMasterNodesOnPublishingMaster = minimumMasterNodesOnPublishingMaster; + this.minimumClusterManagerNodesOnPublishingClusterManager = minimumClusterManagerNodesOnPublishingClusterManager; this.wasReadFromDiff = wasReadFromDiff; } @@ -226,8 +226,9 @@ public long getVersion() { } public long getVersionOrMetadataVersion() { - // When following a Zen1 master, the cluster state version is not guaranteed to increase, so instead it is preferable to use the - // metadata version to determine the freshest node. However when following a Zen2 master the cluster state version should be used. + // When following a Zen1 cluster-manager, the cluster state version is not guaranteed to increase, + // so instead it is preferable to use the metadata version to determine the freshest node. + // However when following a Zen2 cluster-manager the cluster state version should be used. return term() == ZEN1_BWC_TERM ? metadata().version() : version(); } @@ -388,7 +389,7 @@ public String toString() { } /** - * a cluster state supersedes another state if they are from the same master and the version of this state is higher than that of the + * a cluster state supersedes another state if they are from the same cluster-manager and the version of this state is higher than that of the * other state. *

    * In essence that means that all the changes from the other cluster state are also reflected by the current one @@ -590,7 +591,7 @@ public static class Builder { private ClusterBlocks blocks = ClusterBlocks.EMPTY_CLUSTER_BLOCK; private final ImmutableOpenMap.Builder customs; private boolean fromDiff; - private int minimumMasterNodesOnPublishingMaster = -1; + private int minimumClusterManagerNodesOnPublishingClusterManager = -1; public Builder(ClusterState state) { this.clusterName = state.clusterName; @@ -601,7 +602,7 @@ public Builder(ClusterState state) { this.metadata = state.metadata(); this.blocks = state.blocks(); this.customs = ImmutableOpenMap.builder(state.customs()); - this.minimumMasterNodesOnPublishingMaster = state.minimumMasterNodesOnPublishingMaster; + this.minimumClusterManagerNodesOnPublishingClusterManager = state.minimumClusterManagerNodesOnPublishingClusterManager; this.fromDiff = false; } @@ -662,8 +663,8 @@ public Builder stateUUID(String uuid) { return this; } - public Builder minimumMasterNodesOnPublishingMaster(int minimumMasterNodesOnPublishingMaster) { - this.minimumMasterNodesOnPublishingMaster = minimumMasterNodesOnPublishingMaster; + public Builder minimumClusterManagerNodesOnPublishingClusterManager(int minimumClusterManagerNodesOnPublishingClusterManager) { + this.minimumClusterManagerNodesOnPublishingClusterManager = minimumClusterManagerNodesOnPublishingClusterManager; return this; } @@ -701,7 +702,7 @@ public ClusterState build() { nodes, blocks, customs.build(), - minimumMasterNodesOnPublishingMaster, + minimumClusterManagerNodesOnPublishingClusterManager, fromDiff ); } @@ -746,7 +747,7 @@ public static ClusterState readFrom(StreamInput in, DiscoveryNode localNode) thr Custom customIndexMetadata = in.readNamedWriteable(Custom.class); builder.putCustom(customIndexMetadata.getWriteableName(), customIndexMetadata); } - builder.minimumMasterNodesOnPublishingMaster = in.readVInt(); + builder.minimumClusterManagerNodesOnPublishingClusterManager = in.readVInt(); return builder.build(); } @@ -772,7 +773,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeNamedWriteable(cursor.value); } } - out.writeVInt(minimumMasterNodesOnPublishingMaster); + out.writeVInt(minimumClusterManagerNodesOnPublishingClusterManager); } private static class ClusterStateDiff implements Diff { @@ -795,7 +796,7 @@ private static class ClusterStateDiff implements Diff { private final Diff> customs; - private final int minimumMasterNodesOnPublishingMaster; + private final int minimumClusterManagerNodesOnPublishingClusterManager; ClusterStateDiff(ClusterState before, ClusterState after) { fromUuid = before.stateUUID; @@ -807,7 +808,7 @@ private static class ClusterStateDiff implements Diff { metadata = after.metadata.diff(before.metadata); blocks = after.blocks.diff(before.blocks); customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER); - minimumMasterNodesOnPublishingMaster = after.minimumMasterNodesOnPublishingMaster; + minimumClusterManagerNodesOnPublishingClusterManager = after.minimumClusterManagerNodesOnPublishingClusterManager; } ClusterStateDiff(StreamInput in, DiscoveryNode localNode) throws IOException { @@ -820,7 +821,7 @@ private static class ClusterStateDiff implements Diff { metadata = Metadata.readDiffFrom(in); blocks = ClusterBlocks.readDiffFrom(in); customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER); - minimumMasterNodesOnPublishingMaster = in.readVInt(); + minimumClusterManagerNodesOnPublishingClusterManager = in.readVInt(); } @Override @@ -834,7 +835,7 @@ public void writeTo(StreamOutput out) throws IOException { metadata.writeTo(out); blocks.writeTo(out); customs.writeTo(out); - out.writeVInt(minimumMasterNodesOnPublishingMaster); + out.writeVInt(minimumClusterManagerNodesOnPublishingClusterManager); } @Override @@ -854,7 +855,7 @@ public ClusterState apply(ClusterState state) { builder.metadata(metadata.apply(state.metadata)); builder.blocks(blocks.apply(state.blocks)); builder.customs(customs.apply(state.customs)); - builder.minimumMasterNodesOnPublishingMaster(minimumMasterNodesOnPublishingMaster); + builder.minimumClusterManagerNodesOnPublishingClusterManager(minimumClusterManagerNodesOnPublishingClusterManager); builder.fromDiff(true); return builder.build(); } diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateObserver.java b/server/src/main/java/org/opensearch/cluster/ClusterStateObserver.java index 5d55ce70aec02..4f3372b4e9069 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateObserver.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateObserver.java @@ -187,7 +187,7 @@ public void waitForNextChange(Listener listener, Predicate statePr // sample a new state. This state maybe *older* than the supplied state if we are called from an applier, // which wants to wait for something else to happen ClusterState newState = clusterApplierService.state(); - if (lastObservedState.get().isOlderOrDifferentMaster(newState) && statePredicate.test(newState)) { + if (lastObservedState.get().isOlderOrDifferentClusterManager(newState) && statePredicate.test(newState)) { // good enough, let's go. logger.trace("observer: sampled state accepted by predicate ({})", newState); lastObservedState.set(new StoredState(newState)); @@ -241,7 +241,7 @@ public void postAdded() { return; } ClusterState newState = clusterApplierService.state(); - if (lastObservedState.get().isOlderOrDifferentMaster(newState) && context.statePredicate.test(newState)) { + if (lastObservedState.get().isOlderOrDifferentClusterManager(newState) && context.statePredicate.test(newState)) { // double check we're still listening if (observingContext.compareAndSet(context, null)) { logger.trace("observer: post adding listener: accepting current cluster state ({})", newState); @@ -295,22 +295,23 @@ public String toString() { } /** - * The observer considers two cluster states to be the same if they have the same version and master node id (i.e. null or set) + * The observer considers two cluster states to be the same if they have the same version and cluster-manager node id (i.e. null or set) */ private static class StoredState { - private final String masterNodeId; + private final String clusterManagerNodeId; private final long version; StoredState(ClusterState clusterState) { - this.masterNodeId = clusterState.nodes().getMasterNodeId(); + this.clusterManagerNodeId = clusterState.nodes().getMasterNodeId(); this.version = clusterState.version(); } /** - * returns true if stored state is older then given state or they are from a different master, meaning they can't be compared + * returns true if stored state is older then given state or they are from a different cluster-manager, meaning they can't be compared * */ - public boolean isOlderOrDifferentMaster(ClusterState clusterState) { - return version < clusterState.version() || Objects.equals(masterNodeId, clusterState.nodes().getMasterNodeId()) == false; + public boolean isOlderOrDifferentClusterManager(ClusterState clusterState) { + return version < clusterState.version() + || Objects.equals(clusterManagerNodeId, clusterState.nodes().getMasterNodeId()) == false; } } diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java index 04002b31a8b3e..48d3dd7d03cb5 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java @@ -45,7 +45,7 @@ public interface ClusterStateTaskExecutor { ClusterTasksResult execute(ClusterState currentState, List tasks) throws Exception; /** - * indicates whether this executor should only run if the current node is master + * indicates whether this executor should only run if the current node is cluster-manager */ default boolean runOnlyOnMaster() { return true; diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java index 718df33f8a2d2..d5b9eebbc3b5d 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java @@ -43,11 +43,11 @@ public interface ClusterStateTaskListener { void onFailure(String source, Exception e); /** - * called when the task was rejected because the local node is no longer master. + * called when the task was rejected because the local node is no longer cluster-manager. * Used only for tasks submitted to {@link MasterService}. */ default void onNoLongerMaster(String source) { - onFailure(source, new NotMasterException("no longer master. source: [" + source + "]")); + onFailure(source, new NotMasterException("no longer cluster-manager. source: [" + source + "]")); } /** diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java b/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java index 72d72158a5f0b..9393663b309fc 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java @@ -101,7 +101,7 @@ public Priority priority() { } /** - * Marked as final as cluster state update tasks should only run on master. + * Marked as final as cluster state update tasks should only run on cluster-manager. * For local requests, use {@link LocalClusterUpdateTask} instead. */ @Override diff --git a/server/src/main/java/org/opensearch/cluster/LocalClusterUpdateTask.java b/server/src/main/java/org/opensearch/cluster/LocalClusterUpdateTask.java index 06ed0c0580e2f..ffcd63b3b57c1 100644 --- a/server/src/main/java/org/opensearch/cluster/LocalClusterUpdateTask.java +++ b/server/src/main/java/org/opensearch/cluster/LocalClusterUpdateTask.java @@ -38,7 +38,7 @@ import java.util.List; /** - * Used to apply state updates on nodes that are not necessarily master + * Used to apply state updates on nodes that are not necessarily cluster-manager */ public abstract class LocalClusterUpdateTask implements diff --git a/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java b/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java index 1c35f7bbbe8a1..d4456b379237c 100644 --- a/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java +++ b/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java @@ -32,29 +32,29 @@ package org.opensearch.cluster; /** - * Enables listening to master changes events of the local node (when the local node becomes the master, and when the local - * node cease being a master). + * Enables listening to cluster-manager changes events of the local node (when the local node becomes the cluster-manager, and when the local + * node cease being a cluster-manager). */ public interface LocalNodeMasterListener extends ClusterStateListener { /** - * Called when local node is elected to be the master + * Called when local node is elected to be the cluster-manager */ - void onMaster(); + void onClusterManager(); /** - * Called when the local node used to be the master, a new master was elected and it's no longer the local node. + * Called when the local node used to be the cluster-manager, a new cluster-manager was elected and it's no longer the local node. */ - void offMaster(); + void offClusterManager(); @Override default void clusterChanged(ClusterChangedEvent event) { - final boolean wasMaster = event.previousState().nodes().isLocalNodeElectedMaster(); - final boolean isMaster = event.localNodeMaster(); - if (wasMaster == false && isMaster) { - onMaster(); - } else if (wasMaster && isMaster == false) { - offMaster(); + final boolean wasClusterManager = event.previousState().nodes().isLocalNodeElectedMaster(); + final boolean isClusterManager = event.localNodeMaster(); + if (wasClusterManager == false && isClusterManager) { + onClusterManager(); + } else if (wasClusterManager && isClusterManager == false) { + offClusterManager(); } } } diff --git a/server/src/main/java/org/opensearch/cluster/MasterNodeChangePredicate.java b/server/src/main/java/org/opensearch/cluster/MasterNodeChangePredicate.java index 9d11fb84af801..8eeaedd83cb26 100644 --- a/server/src/main/java/org/opensearch/cluster/MasterNodeChangePredicate.java +++ b/server/src/main/java/org/opensearch/cluster/MasterNodeChangePredicate.java @@ -48,14 +48,14 @@ private MasterNodeChangePredicate() { */ public static Predicate build(ClusterState currentState) { final long currentVersion = currentState.version(); - final DiscoveryNode masterNode = currentState.nodes().getMasterNode(); - final String currentMasterId = masterNode == null ? null : masterNode.getEphemeralId(); + final DiscoveryNode clusterManagerNode = currentState.nodes().getMasterNode(); + final String currentMasterId = clusterManagerNode == null ? null : clusterManagerNode.getEphemeralId(); return newState -> { - final DiscoveryNode newMaster = newState.nodes().getMasterNode(); + final DiscoveryNode newClusterManager = newState.nodes().getMasterNode(); final boolean accept; - if (newMaster == null) { + if (newClusterManager == null) { accept = false; - } else if (newMaster.getEphemeralId().equals(currentMasterId) == false) { + } else if (newClusterManager.getEphemeralId().equals(currentMasterId) == false) { accept = true; } else { accept = newState.version() > currentVersion; diff --git a/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java b/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java index 02139ea21b483..696df6278dbb6 100644 --- a/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java +++ b/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java @@ -69,7 +69,7 @@ * This component is responsible for maintaining connections from this node to all the nodes listed in the cluster state, and for * disconnecting from nodes once they are removed from the cluster state. It periodically checks that all connections are still open and * restores them if needed. Note that this component is *not* responsible for removing nodes from the cluster state if they disconnect or - * are unresponsive: this is the job of the master's fault detection components, particularly {@link FollowersChecker}. + * are unresponsive: this is the job of the cluster-manager's fault detection components, particularly {@link FollowersChecker}. *

    * The {@link NodeConnectionsService#connectToNodes(DiscoveryNodes, Runnable)} and {@link * NodeConnectionsService#disconnectFromNodesExcept(DiscoveryNodes)} methods are called on the {@link ClusterApplier} thread. This component diff --git a/server/src/main/java/org/opensearch/cluster/NotMasterException.java b/server/src/main/java/org/opensearch/cluster/NotMasterException.java index 61772aa13233b..c8ec32ed77eb9 100644 --- a/server/src/main/java/org/opensearch/cluster/NotMasterException.java +++ b/server/src/main/java/org/opensearch/cluster/NotMasterException.java @@ -37,9 +37,9 @@ import java.io.IOException; /** - * Thrown when a node join request or a master ping reaches a node which is not - * currently acting as a master or when a cluster state update task is to be executed - * on a node that is no longer master. + * Thrown when a node join request or a cluster-manager ping reaches a node which is not + * currently acting as a cluster-manager or when a cluster state update task is to be executed + * on a node that is no longer cluster-manager. */ public class NotMasterException extends OpenSearchException { diff --git a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java index 2c001833f46ce..d0d5aea9d036b 100644 --- a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java @@ -297,9 +297,9 @@ private Entry(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(VERSION_IN_SNAPSHOT_VERSION)) { version = Version.readVersion(in); } else if (in.getVersion().onOrAfter(SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION)) { - // If an older master informs us that shard generations are supported we use the minimum shard generation compatible - // version. If shard generations are not supported yet we use a placeholder for a version that does not use shard - // generations. + // If an older cluster-manager informs us that shard generations are supported + // we use the minimum shard generation compatible version. + // If shard generations are not supported yet we use a placeholder for a version that does not use shard generations. version = in.readBoolean() ? SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION : SnapshotsService.OLD_SNAPSHOT_FORMAT; } else { version = SnapshotsService.OLD_SNAPSHOT_FORMAT; diff --git a/server/src/main/java/org/opensearch/cluster/ack/AckedRequest.java b/server/src/main/java/org/opensearch/cluster/ack/AckedRequest.java index 23a9ed16e35d2..97d628e3231c9 100644 --- a/server/src/main/java/org/opensearch/cluster/ack/AckedRequest.java +++ b/server/src/main/java/org/opensearch/cluster/ack/AckedRequest.java @@ -45,7 +45,7 @@ public interface AckedRequest { TimeValue ackTimeout(); /** - * Returns the timeout for the request to be completed on the master node + * Returns the timeout for the request to be completed on the cluster-manager node */ TimeValue masterNodeTimeout(); } diff --git a/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateRequest.java b/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateRequest.java index d142c28086f70..0931086ab3ff0 100644 --- a/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateRequest.java +++ b/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateRequest.java @@ -62,7 +62,7 @@ public T ackTimeout(TimeValue ackTimeout) { /** * Returns the maximum time interval to wait for the request to - * be completed on the master node + * be completed on the cluster-manager node */ @Override public TimeValue masterNodeTimeout() { @@ -70,7 +70,7 @@ public TimeValue masterNodeTimeout() { } /** - * Sets the master node timeout + * Sets the cluster-manager node timeout */ @SuppressWarnings("unchecked") public T masterNodeTimeout(TimeValue masterNodeTimeout) { diff --git a/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java b/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java index f22d489ec6fd7..cf1f2d3141ccd 100644 --- a/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java +++ b/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java @@ -104,10 +104,10 @@ public void setClient(Client client) { } /** - * Update mappings on the master node, waiting for the change to be committed, + * Update mappings on the cluster-manager node, waiting for the change to be committed, * but not for the mapping update to be applied on all nodes. The timeout specified by - * {@code timeout} is the master node timeout ({@link MasterNodeRequest#masterNodeTimeout()}), - * potentially waiting for a master node to be available. + * {@code timeout} is the cluster-manager node timeout ({@link MasterNodeRequest#masterNodeTimeout()}), + * potentially waiting for a cluster-manager node to be available. */ public void updateMappingOnMaster(Index index, Mapping mappingUpdate, ActionListener listener) { diff --git a/server/src/main/java/org/opensearch/cluster/action/index/NodeMappingRefreshAction.java b/server/src/main/java/org/opensearch/cluster/action/index/NodeMappingRefreshAction.java index 23ce218904d21..b40665a1bcf1b 100644 --- a/server/src/main/java/org/opensearch/cluster/action/index/NodeMappingRefreshAction.java +++ b/server/src/main/java/org/opensearch/cluster/action/index/NodeMappingRefreshAction.java @@ -74,12 +74,12 @@ public NodeMappingRefreshAction(TransportService transportService, MetadataMappi ); } - public void nodeMappingRefresh(final DiscoveryNode masterNode, final NodeMappingRefreshRequest request) { - if (masterNode == null) { - logger.warn("can't send mapping refresh for [{}], no master known.", request.index()); + public void nodeMappingRefresh(final DiscoveryNode clusterManagerNode, final NodeMappingRefreshRequest request) { + if (clusterManagerNode == null) { + logger.warn("can't send mapping refresh for [{}], no cluster-manager known.", request.index()); return; } - transportService.sendRequest(masterNode, ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME); + transportService.sendRequest(clusterManagerNode, ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME); } private class NodeMappingRefreshTransportHandler implements TransportRequestHandler { diff --git a/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java index 300067587b78b..fd6a5367146a4 100644 --- a/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java @@ -177,14 +177,14 @@ private void sendShardAction( final ActionListener listener ) { ClusterStateObserver observer = new ClusterStateObserver(currentState, clusterService, null, logger, threadPool.getThreadContext()); - DiscoveryNode masterNode = currentState.nodes().getMasterNode(); + DiscoveryNode clusterManagerNode = currentState.nodes().getMasterNode(); Predicate changePredicate = MasterNodeChangePredicate.build(currentState); - if (masterNode == null) { - logger.warn("no master known for action [{}] for shard entry [{}]", actionName, request); - waitForNewMasterAndRetry(actionName, observer, request, listener, changePredicate); + if (clusterManagerNode == null) { + logger.warn("no cluster-manager known for action [{}] for shard entry [{}]", actionName, request); + waitForNewClusterManagerAndRetry(actionName, observer, request, listener, changePredicate); } else { - logger.debug("sending [{}] to [{}] for shard entry [{}]", actionName, masterNode.getId(), request); - transportService.sendRequest(masterNode, actionName, request, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { + logger.debug("sending [{}] to [{}] for shard entry [{}]", actionName, clusterManagerNode.getId(), request); + transportService.sendRequest(clusterManagerNode, actionName, request, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { @Override public void handleResponse(TransportResponse.Empty response) { listener.onResponse(null); @@ -192,14 +192,14 @@ public void handleResponse(TransportResponse.Empty response) { @Override public void handleException(TransportException exp) { - if (isMasterChannelException(exp)) { - waitForNewMasterAndRetry(actionName, observer, request, listener, changePredicate); + if (isClusterManagerChannelException(exp)) { + waitForNewClusterManagerAndRetry(actionName, observer, request, listener, changePredicate); } else { logger.warn( new ParameterizedMessage( "unexpected failure while sending request [{}]" + " to [{}] for shard entry [{}]", actionName, - masterNode, + clusterManagerNode, request ), exp @@ -217,17 +217,17 @@ public void handleException(TransportException exp) { } } - private static Class[] MASTER_CHANNEL_EXCEPTIONS = new Class[] { + private static Class[] CLUSTER_MANAGER_CHANNEL_EXCEPTIONS = new Class[] { NotMasterException.class, ConnectTransportException.class, FailedToCommitClusterStateException.class }; - private static boolean isMasterChannelException(TransportException exp) { - return ExceptionsHelper.unwrap(exp, MASTER_CHANNEL_EXCEPTIONS) != null; + private static boolean isClusterManagerChannelException(TransportException exp) { + return ExceptionsHelper.unwrap(exp, CLUSTER_MANAGER_CHANNEL_EXCEPTIONS) != null; } /** - * Send a shard failed request to the master node to update the cluster state with the failure of a shard on another node. This means + * Send a shard failed request to the cluster-manager node to update the cluster state with the failure of a shard on another node. This means * that the shard should be failed because a write made it into the primary but was not replicated to this shard copy. If the shard * does not exist anymore but still has an entry in the in-sync set, remove its allocation id from the in-sync set. * @@ -261,7 +261,7 @@ int remoteShardFailedCacheSize() { } /** - * Send a shard failed request to the master node to update the cluster state when a shard on the local node failed. + * Send a shard failed request to the cluster-manager node to update the cluster state when a shard on the local node failed. */ public void localShardFailed( final ShardRouting shardRouting, @@ -273,7 +273,7 @@ public void localShardFailed( } /** - * Send a shard failed request to the master node to update the cluster state when a shard on the local node failed. + * Send a shard failed request to the cluster-manager node to update the cluster state when a shard on the local node failed. */ public void localShardFailed( final ShardRouting shardRouting, @@ -294,7 +294,7 @@ public void localShardFailed( } // visible for testing - protected void waitForNewMasterAndRetry( + protected void waitForNewClusterManagerAndRetry( String actionName, ClusterStateObserver observer, TransportRequest request, @@ -305,7 +305,7 @@ protected void waitForNewMasterAndRetry( @Override public void onNewClusterState(ClusterState state) { if (logger.isTraceEnabled()) { - logger.trace("new cluster state [{}] after waiting for master election for shard entry [{}]", state, request); + logger.trace("new cluster state [{}] after waiting for cluster-manager election for shard entry [{}]", state, request); } sendShardAction(actionName, state, request, listener); } @@ -318,7 +318,7 @@ public void onClusterServiceClose() { @Override public void onTimeout(TimeValue timeout) { - // we wait indefinitely for a new master + // we wait indefinitely for a new cluster-manager assert false; } }, changePredicate); @@ -376,13 +376,13 @@ public void onFailure(String source, Exception e) { @Override public void onNoLongerMaster(String source) { - logger.error("{} no longer master while failing shard [{}]", request.shardId, request); + logger.error("{} no longer cluster-manager while failing shard [{}]", request.shardId, request); try { channel.sendResponse(new NotMasterException(source)); } catch (Exception channelException) { logger.warn( () -> new ParameterizedMessage( - "{} failed to send no longer master while failing shard [{}]", + "{} failed to send no longer cluster-manager while failing shard [{}]", request.shardId, request ), @@ -714,7 +714,8 @@ public ClusterTasksResult execute(ClusterState currentState, if (matched == null) { // tasks that correspond to non-existent shards are marked as successful. The reason is that we resend shard started // events on every cluster state publishing that does not contain the shard as started yet. This means that old stale - // requests might still be in flight even after the shard has already been started or failed on the master. We just + // requests might still be in flight even after the shard has already been started or failed on the cluster-manager. We + // just // ignore these requests for now. logger.debug("{} ignoring shard started task [{}] (shard does not exist anymore)", task.shardId, task); builder.success(task); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ApplyCommitRequest.java b/server/src/main/java/org/opensearch/cluster/coordination/ApplyCommitRequest.java index 40d6375d8d916..2ace3e86b31de 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ApplyCommitRequest.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ApplyCommitRequest.java @@ -38,7 +38,7 @@ import java.io.IOException; /** - * A master node sends this request to its peers to inform them that it could commit the + * A cluster-manager node sends this request to its peers to inform them that it could commit the * cluster state with the given term and version. Peers that have accepted the given cluster * state will then consider it as committed and proceed to apply the state locally. */ diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java b/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java index c7708a54f9031..979b36110b6a3 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java @@ -141,11 +141,11 @@ public ClusterBootstrapService( bootstrapRequirements = Collections.singleton(Node.NODE_NAME_SETTING.get(settings)); unconfiguredBootstrapTimeout = null; } else { - final List initialMasterNodes = INITIAL_CLUSTER_MANAGER_NODES_SETTING.get(settings); - bootstrapRequirements = unmodifiableSet(new LinkedHashSet<>(initialMasterNodes)); - if (bootstrapRequirements.size() != initialMasterNodes.size()) { + final List initialClusterManagerNodes = INITIAL_CLUSTER_MANAGER_NODES_SETTING.get(settings); + bootstrapRequirements = unmodifiableSet(new LinkedHashSet<>(initialClusterManagerNodes)); + if (bootstrapRequirements.size() != initialClusterManagerNodes.size()) { throw new IllegalArgumentException( - "setting [" + initialClusterManagerSettingName + "] contains duplicates: " + initialMasterNodes + "setting [" + initialClusterManagerSettingName + "] contains duplicates: " + initialClusterManagerNodes ); } unconfiguredBootstrapTimeout = discoveryIsConfigured(settings) ? null : UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING.get(settings); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java b/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java index 47a18d5be1ec4..ef35c6f8b3249 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java @@ -39,8 +39,8 @@ public interface ClusterStatePublisher { /** - * Publish all the changes to the cluster from the master (can be called just by the master). The publish - * process should apply this state to the master as well! + * Publish all the changes to the cluster from the cluster-manager (can be called just by the cluster-manager). The publish + * process should apply this state to the cluster-manager as well! * * The publishListener allows to wait for the publication to complete, which can be either successful completion, timing out or failing. * The method is guaranteed to pass back a {@link FailedToCommitClusterStateException} to the publishListener if the change is not diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java index b28fde5d9cc16..9713c841caaf7 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java @@ -610,7 +610,8 @@ default void markLastAcceptedStateAsCommitted() { metadataBuilder = Metadata.builder(lastAcceptedState.metadata()); metadataBuilder.coordinationMetadata(coordinationMetadata); } - // if we receive a commit from a Zen1 master that has not recovered its state yet, the cluster uuid might not been known yet. + // if we receive a commit from a Zen1 cluster-manager that has not recovered its state yet, + // the cluster uuid might not been known yet. assert lastAcceptedState.metadata().clusterUUID().equals(Metadata.UNKNOWN_CLUSTER_UUID) == false || lastAcceptedState.term() == ZEN1_BWC_TERM : "received cluster state with empty cluster uuid but not Zen1 BWC term: " + lastAcceptedState; @@ -622,7 +623,8 @@ default void markLastAcceptedStateAsCommitted() { metadataBuilder.clusterUUIDCommitted(true); if (lastAcceptedState.term() != ZEN1_BWC_TERM) { - // Zen1 masters never publish a committed cluster UUID so if we logged this it'd happen on on every update. Let's just + // Zen1 cluster-managers never publish a committed cluster UUID so if we logged this it'd happen on on every update. + // Let's just // not log it at all in a 6.8/7.x rolling upgrade. logger.info("cluster UUID set to [{}]", lastAcceptedState.metadata().clusterUUID()); } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index 89e5b9b4cfbcc..ef578300cdbe2 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -136,12 +136,12 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery private final boolean singleNodeDiscovery; private final ElectionStrategy electionStrategy; private final TransportService transportService; - private final MasterService masterService; + private final MasterService clusterManagerService; private final AllocationService allocationService; private final JoinHelper joinHelper; private final NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor; private final Supplier persistedStateSupplier; - private final NoMasterBlockService noMasterBlockService; + private final NoMasterBlockService noClusterManagerBlockService; final Object mutex = new Object(); // package-private to allow tests to call methods that assert that the mutex is held private final SetOnce coordinationState = new SetOnce<>(); // initialized on start-up (see doStart) private volatile ClusterState applierState; // the state that should be exposed to the cluster state applier @@ -186,7 +186,7 @@ public Coordinator( TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, AllocationService allocationService, - MasterService masterService, + MasterService clusterManagerService, Supplier persistedStateSupplier, SeedHostsProvider seedHostsProvider, ClusterApplier clusterApplier, @@ -198,7 +198,7 @@ public Coordinator( ) { this.settings = settings; this.transportService = transportService; - this.masterService = masterService; + this.clusterManagerService = clusterManagerService; this.allocationService = allocationService; this.onJoinValidators = JoinTaskExecutor.addBuiltInJoinValidators(onJoinValidators); this.singleNodeDiscovery = DiscoveryModule.isSingleNodeDiscovery(settings); @@ -206,10 +206,10 @@ public Coordinator( this.joinHelper = new JoinHelper( settings, allocationService, - masterService, + clusterManagerService, transportService, this::getCurrentTerm, - this::getStateForMasterService, + this::getStateForClusterManagerService, this::handleJoinRequest, this::joinLeaderInTerm, this.onJoinValidators, @@ -217,7 +217,7 @@ public Coordinator( nodeHealthService ); this.persistedStateSupplier = persistedStateSupplier; - this.noMasterBlockService = new NoMasterBlockService(settings, clusterSettings); + this.noClusterManagerBlockService = new NoMasterBlockService(settings, clusterSettings); this.lastKnownLeader = Optional.empty(); this.lastJoin = Optional.empty(); this.joinAccumulator = new InitialJoinAccumulator(); @@ -255,7 +255,7 @@ public Coordinator( ); this.nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService, logger); this.clusterApplier = clusterApplier; - masterService.setClusterStateSupplier(this::getStateForMasterService); + clusterManagerService.setClusterStateSupplier(this::getStateForClusterManagerService); this.reconfigurator = new Reconfigurator(settings, clusterSettings); this.clusterBootstrapService = new ClusterBootstrapService( settings, @@ -282,7 +282,7 @@ public Coordinator( private ClusterFormationState getClusterFormationState() { return new ClusterFormationState( settings, - getStateForMasterService(), + getStateForClusterManagerService(), peerFinder.getLastResolvedAddresses(), Stream.concat(Stream.of(getLocalNode()), StreamSupport.stream(peerFinder.getFoundPeers().spliterator(), false)) .collect(Collectors.toList()), @@ -296,7 +296,7 @@ private void onLeaderFailure(Exception e) { synchronized (mutex) { if (mode != Mode.CANDIDATE) { assert lastKnownLeader.isPresent(); - logger.info(new ParameterizedMessage("master node [{}] failed, restarting discovery", lastKnownLeader.get()), e); + logger.info(new ParameterizedMessage("cluster-manager node [{}] failed, restarting discovery", lastKnownLeader.get()), e); } becomeCandidate("onLeaderFailure"); } @@ -305,7 +305,7 @@ private void onLeaderFailure(Exception e) { private void removeNode(DiscoveryNode discoveryNode, String reason) { synchronized (mutex) { if (mode == Mode.LEADER) { - masterService.submitStateUpdateTask( + clusterManagerService.submitStateUpdateTask( "node-left", new NodeRemovalClusterStateTaskExecutor.Task(discoveryNode, reason), ClusterStateTaskConfig.build(Priority.IMMEDIATE), @@ -336,11 +336,11 @@ void onFollowerCheckRequest(FollowerCheckRequest followerCheckRequest) { } else if (mode == Mode.FOLLOWER) { logger.trace("onFollowerCheckRequest: responding successfully to {}", followerCheckRequest); } else if (joinHelper.isJoinPending()) { - logger.trace("onFollowerCheckRequest: rejoining master, responding successfully to {}", followerCheckRequest); + logger.trace("onFollowerCheckRequest: rejoining cluster-manager, responding successfully to {}", followerCheckRequest); } else { - logger.trace("onFollowerCheckRequest: received check from faulty master, rejecting {}", followerCheckRequest); + logger.trace("onFollowerCheckRequest: received check from faulty cluster-manager, rejecting {}", followerCheckRequest); throw new CoordinationStateRejectedException( - "onFollowerCheckRequest: received check from faulty master, rejecting " + followerCheckRequest + "onFollowerCheckRequest: received check from faulty cluster-manager, rejecting " + followerCheckRequest ); } } @@ -352,9 +352,9 @@ private void handleApplyCommit(ApplyCommitRequest applyCommitRequest, ActionList coordinationState.get().handleCommit(applyCommitRequest); final ClusterState committedState = hideStateIfNotRecovered(coordinationState.get().getLastAcceptedState()); - applierState = mode == Mode.CANDIDATE ? clusterStateWithNoMasterBlock(committedState) : committedState; + applierState = mode == Mode.CANDIDATE ? clusterStateWithNoClusterManagerBlock(committedState) : committedState; if (applyCommitRequest.getSourceNode().equals(getLocalNode())) { - // master node applies the committed state at the end of the publication process, not here. + // cluster-manager node applies the committed state at the end of the publication process, not here. applyListener.onResponse(null); } else { clusterApplier.onNewClusterState(applyCommitRequest.toString(), () -> applierState, new ClusterApplyListener() { @@ -423,7 +423,7 @@ && getCurrentTerm() == ZEN1_BWC_TERM } if (publishRequest.getAcceptedState().term() > localState.term()) { - // only do join validation if we have not accepted state from this master yet + // only do join validation if we have not accepted state from this cluster manager yet onJoinValidators.forEach(a -> a.accept(getLocalNode(), publishRequest.getAcceptedState())); } @@ -507,12 +507,12 @@ private void startElection() { } } - private void abdicateTo(DiscoveryNode newMaster) { + private void abdicateTo(DiscoveryNode newClusterManager) { assert Thread.holdsLock(mutex); assert mode == Mode.LEADER : "expected to be leader on abdication but was " + mode; - assert newMaster.isMasterNode() : "should only abdicate to cluster-manager-eligible node but was " + newMaster; - final StartJoinRequest startJoinRequest = new StartJoinRequest(newMaster, Math.max(getCurrentTerm(), maxTermSeen) + 1); - logger.info("abdicating to {} with term {}", newMaster, startJoinRequest.getTerm()); + assert newClusterManager.isMasterNode() : "should only abdicate to cluster-manager-eligible node but was " + newClusterManager; + final StartJoinRequest startJoinRequest = new StartJoinRequest(newClusterManager, Math.max(getCurrentTerm(), maxTermSeen) + 1); + logger.info("abdicating to {} with term {}", newClusterManager, startJoinRequest.getTerm()); getLastAcceptedState().nodes().mastersFirstStream().forEach(node -> { if (isZen1Node(node) == false) { joinHelper.sendStartJoinRequest(startJoinRequest, node); @@ -521,7 +521,7 @@ private void abdicateTo(DiscoveryNode newMaster) { // handling of start join messages on the local node will be dispatched to the generic thread-pool assert mode == Mode.LEADER : "should still be leader after sending abdication messages " + mode; // explicitly move node to candidate state so that the next cluster state update task yields an onNoLongerMaster event - becomeCandidate("after abdicating to " + newMaster); + becomeCandidate("after abdicating to " + newClusterManager); } private static boolean localNodeMayWinElection(ClusterState lastAcceptedState) { @@ -580,7 +580,7 @@ private void handleJoinRequest(JoinRequest joinRequest, JoinHelper.JoinCallback } transportService.connectToNode(joinRequest.getSourceNode(), ActionListener.wrap(ignore -> { - final ClusterState stateForJoinValidation = getStateForMasterService(); + final ClusterState stateForJoinValidation = getStateForClusterManagerService(); if (stateForJoinValidation.nodes().isLocalNodeElectedMaster()) { onJoinValidators.forEach(a -> a.accept(joinRequest.getSourceNode(), stateForJoinValidation)); @@ -668,11 +668,11 @@ void becomeCandidate(String method) { lagDetector.clearTrackedNodes(); if (prevMode == Mode.LEADER) { - cleanMasterService(); + cleanClusterManagerService(); } if (applierState.nodes().getMasterNodeId() != null) { - applierState = clusterStateWithNoMasterBlock(applierState); + applierState = clusterStateWithNoClusterManagerBlock(applierState); clusterApplier.onNewClusterState("becoming candidate: " + method, () -> applierState, (source, e) -> {}); } } @@ -750,8 +750,8 @@ void becomeFollower(String method, DiscoveryNode leaderNode) { lagDetector.clearTrackedNodes(); } - private void cleanMasterService() { - masterService.submitStateUpdateTask("clean-up after stepping down as cluster-manager", new LocalClusterUpdateTask() { + private void cleanClusterManagerService() { + clusterManagerService.submitStateUpdateTask("clean-up after stepping down as cluster-manager", new LocalClusterUpdateTask() { @Override public void onFailure(String source, Exception e) { // ignore @@ -833,7 +833,7 @@ protected void doStart() { .blocks( ClusterBlocks.builder() .addGlobalBlock(STATE_NOT_RECOVERED_BLOCK) - .addGlobalBlock(noMasterBlockService.getNoMasterBlock()) + .addGlobalBlock(noClusterManagerBlockService.getNoMasterBlock()) ) .nodes(DiscoveryNodes.builder().add(getLocalNode()).localNodeId(getLocalNode().getId())) .build(); @@ -888,7 +888,7 @@ public void invariant() { + lagDetector.getTrackedNodes(); if (mode == Mode.LEADER) { - final boolean becomingMaster = getStateForMasterService().term() != getCurrentTerm(); + final boolean becomingClusterManager = getStateForClusterManagerService().term() != getCurrentTerm(); assert coordinationState.get().electionWon(); assert lastKnownLeader.isPresent() && lastKnownLeader.get().equals(getLocalNode()); @@ -896,7 +896,8 @@ public void invariant() { assert peerFinderLeader.equals(lastKnownLeader) : peerFinderLeader; assert electionScheduler == null : electionScheduler; assert prevotingRound == null : prevotingRound; - assert becomingMaster || getStateForMasterService().nodes().getMasterNodeId() != null : getStateForMasterService(); + assert becomingClusterManager || getStateForClusterManagerService().nodes().getMasterNodeId() != null + : getStateForClusterManagerService(); assert leaderChecker.leader() == null : leaderChecker.leader(); assert getLocalNode().equals(applierState.nodes().getMasterNode()) || (applierState.nodes().getMasterNodeId() == null && applierState.term() < getCurrentTerm()); @@ -904,8 +905,9 @@ assert getLocalNode().equals(applierState.nodes().getMasterNode()) assert clusterFormationFailureHelper.isRunning() == false; final boolean activePublication = currentPublication.map(CoordinatorPublication::isActiveForCurrentLeader).orElse(false); - if (becomingMaster && activePublication == false) { - // cluster state update task to become master is submitted to MasterService, but publication has not started yet + if (becomingClusterManager && activePublication == false) { + // cluster state update task to become cluster-manager is submitted to MasterService, + // but publication has not started yet assert followersChecker.getKnownFollowers().isEmpty() : followersChecker.getKnownFollowers(); } else { final ClusterState lastPublishedState; @@ -924,7 +926,7 @@ assert getLocalNode().equals(applierState.nodes().getMasterNode()) + followersChecker.getKnownFollowers(); } - assert becomingMaster + assert becomingClusterManager || activePublication || coordinationState.get() .getLastAcceptedConfiguration() @@ -939,8 +941,8 @@ assert getLocalNode().equals(applierState.nodes().getMasterNode()) assert peerFinderLeader.equals(lastKnownLeader) : peerFinderLeader; assert electionScheduler == null : electionScheduler; assert prevotingRound == null : prevotingRound; - assert getStateForMasterService().nodes().getMasterNodeId() == null : getStateForMasterService(); - assert leaderChecker.currentNodeIsMaster() == false; + assert getStateForClusterManagerService().nodes().getMasterNodeId() == null : getStateForClusterManagerService(); + assert leaderChecker.currentNodeIsClusterManager() == false; assert lastKnownLeader.equals(Optional.of(leaderChecker.leader())); assert followersChecker.getKnownFollowers().isEmpty(); assert lastKnownLeader.get().equals(applierState.nodes().getMasterNode()) @@ -954,8 +956,8 @@ assert getLocalNode().equals(applierState.nodes().getMasterNode()) assert joinAccumulator instanceof JoinHelper.CandidateJoinAccumulator; assert peerFinderLeader.isPresent() == false : peerFinderLeader; assert prevotingRound == null || electionScheduler != null; - assert getStateForMasterService().nodes().getMasterNodeId() == null : getStateForMasterService(); - assert leaderChecker.currentNodeIsMaster() == false; + assert getStateForClusterManagerService().nodes().getMasterNodeId() == null : getStateForClusterManagerService(); + assert leaderChecker.currentNodeIsClusterManager() == false; assert leaderChecker.leader() == null : leaderChecker.leader(); assert followersChecker.getKnownFollowers().isEmpty(); assert applierState.nodes().getMasterNodeId() == null; @@ -967,7 +969,7 @@ assert getLocalNode().equals(applierState.nodes().getMasterNode()) } public boolean isInitialConfigurationSet() { - return getStateForMasterService().getLastAcceptedConfiguration().isEmpty() == false; + return getStateForClusterManagerService().getLastAcceptedConfiguration().isEmpty() == false; } /** @@ -979,7 +981,7 @@ public boolean isInitialConfigurationSet() { */ public boolean setInitialConfiguration(final VotingConfiguration votingConfiguration) { synchronized (mutex) { - final ClusterState currentState = getStateForMasterService(); + final ClusterState currentState = getStateForClusterManagerService(); if (isInitialConfigurationSet()) { logger.debug("initial configuration already set, ignoring {}", votingConfiguration); @@ -1051,7 +1053,7 @@ ClusterState improveConfiguration(ClusterState clusterState) { // the voting config. We could exclude all the cluster-manager-ineligible nodes here, but there could be quite a few of them and // that makes // the logging much harder to follow. - final Stream masterIneligibleNodeIdsInVotingConfig = StreamSupport.stream(clusterState.nodes().spliterator(), false) + final Stream clusterManagerIneligibleNodeIdsInVotingConfig = StreamSupport.stream(clusterState.nodes().spliterator(), false) .filter( n -> n.isMasterNode() == false && (clusterState.getLastAcceptedConfiguration().getNodeIds().contains(n.getId()) @@ -1066,7 +1068,7 @@ ClusterState improveConfiguration(ClusterState clusterState) { .collect(Collectors.toSet()); final VotingConfiguration newConfig = reconfigurator.reconfigure( liveNodes, - Stream.concat(masterIneligibleNodeIdsInVotingConfig, excludedNodeIds).collect(Collectors.toSet()), + Stream.concat(clusterManagerIneligibleNodeIdsInVotingConfig, excludedNodeIds).collect(Collectors.toSet()), getLocalNode(), clusterState.getLastAcceptedConfiguration() ); @@ -1119,7 +1121,7 @@ private void scheduleReconfigurationIfNeeded() { final ClusterState state = getLastAcceptedState(); if (improveConfiguration(state) != state && reconfigurationTaskScheduled.compareAndSet(false, true)) { logger.trace("scheduling reconfiguration"); - masterService.submitStateUpdateTask("reconfigure", new ClusterStateUpdateTask(Priority.URGENT) { + clusterManagerService.submitStateUpdateTask("reconfigure", new ClusterStateUpdateTask(Priority.URGENT) { @Override public ClusterState execute(ClusterState currentState) { reconfigurationTaskScheduled.set(false); @@ -1148,13 +1150,14 @@ private void handleJoin(Join join) { if (coordinationState.get().electionWon()) { // If we have already won the election then the actual join does not matter for election purposes, so swallow any exception - final boolean isNewJoinFromMasterEligibleNode = handleJoinIgnoringExceptions(join); + final boolean isNewJoinFromClusterManagerEligibleNode = handleJoinIgnoringExceptions(join); - // If we haven't completely finished becoming master then there's already a publication scheduled which will, in turn, + // If we haven't completely finished becoming cluster-manager then there's already a publication scheduled which will, in + // turn, // schedule a reconfiguration if needed. It's benign to schedule a reconfiguration anyway, but it might fail if it wins the // race against the election-winning publication and log a big error message, which we can prevent by checking this here: - final boolean establishedAsMaster = mode == Mode.LEADER && getLastAcceptedState().term() == getCurrentTerm(); - if (isNewJoinFromMasterEligibleNode && establishedAsMaster && publicationInProgress() == false) { + final boolean establishedAsClusterManager = mode == Mode.LEADER && getLastAcceptedState().term() == getCurrentTerm(); + if (isNewJoinFromClusterManagerEligibleNode && establishedAsClusterManager && publicationInProgress() == false) { scheduleReconfigurationIfNeeded(); } } else { @@ -1193,27 +1196,28 @@ private List getDiscoveredNodes() { return nodes; } - ClusterState getStateForMasterService() { + ClusterState getStateForClusterManagerService() { synchronized (mutex) { - // expose last accepted cluster state as base state upon which the master service + // expose last accepted cluster state as base state upon which the cluster_manager service // speculatively calculates the next cluster state update final ClusterState clusterState = coordinationState.get().getLastAcceptedState(); if (mode != Mode.LEADER || clusterState.term() != getCurrentTerm()) { - // the master service checks if the local node is the master node in order to fail execution of the state update early - return clusterStateWithNoMasterBlock(clusterState); + // the cluster-manager service checks if the local node is the cluster-manager node in order to fail execution of the state + // update early + return clusterStateWithNoClusterManagerBlock(clusterState); } return clusterState; } } - private ClusterState clusterStateWithNoMasterBlock(ClusterState clusterState) { + private ClusterState clusterStateWithNoClusterManagerBlock(ClusterState clusterState) { if (clusterState.nodes().getMasterNodeId() != null) { // remove block if it already exists before adding new one assert clusterState.blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID) == false : "NO_MASTER_BLOCK should only be added by Coordinator"; final ClusterBlocks clusterBlocks = ClusterBlocks.builder() .blocks(clusterState.blocks()) - .addGlobalBlock(noMasterBlockService.getNoMasterBlock()) + .addGlobalBlock(noClusterManagerBlockService.getNoMasterBlock()) .build(); final DiscoveryNodes discoveryNodes = new DiscoveryNodes.Builder(clusterState.nodes()).masterNodeId(null).build(); return ClusterState.builder(clusterState).blocks(clusterBlocks).nodes(discoveryNodes).build(); @@ -1233,14 +1237,16 @@ public void publish( if (mode != Mode.LEADER || getCurrentTerm() != clusterChangedEvent.state().term()) { logger.debug( () -> new ParameterizedMessage( - "[{}] failed publication as node is no longer master for term {}", + "[{}] failed publication as node is no longer cluster-manager for term {}", clusterChangedEvent.source(), clusterChangedEvent.state().term() ) ); publishListener.onFailure( new FailedToCommitClusterStateException( - "node is no longer master for term " + clusterChangedEvent.state().term() + " while handling publication" + "node is no longer cluster-manager for term " + + clusterChangedEvent.state().term() + + " while handling publication" ) ); return; @@ -1302,12 +1308,12 @@ private boolean assertPreviousStateConsistency(ClusterChangedEvent event) { .equals( XContentHelper.convertToMap( JsonXContent.jsonXContent, - Strings.toString(clusterStateWithNoMasterBlock(coordinationState.get().getLastAcceptedState())), + Strings.toString(clusterStateWithNoClusterManagerBlock(coordinationState.get().getLastAcceptedState())), false ) ) : Strings.toString(event.previousState()) + " vs " - + Strings.toString(clusterStateWithNoMasterBlock(coordinationState.get().getLastAcceptedState())); + + Strings.toString(clusterStateWithNoClusterManagerBlock(coordinationState.get().getLastAcceptedState())); return true; } @@ -1363,10 +1369,10 @@ private class CoordinatorPeerFinder extends PeerFinder { } @Override - protected void onActiveMasterFound(DiscoveryNode masterNode, long term) { + protected void onActiveClusterManagerFound(DiscoveryNode clusterManagerNode, long term) { synchronized (mutex) { - ensureTermAtLeast(masterNode, term); - joinHelper.sendJoinRequest(masterNode, getCurrentTerm(), joinWithDestination(lastJoin, masterNode, term)); + ensureTermAtLeast(clusterManagerNode, term); + joinHelper.sendJoinRequest(clusterManagerNode, getCurrentTerm(), joinWithDestination(lastJoin, clusterManagerNode, term)); } } @@ -1613,12 +1619,12 @@ public void onSuccess(String source) { boolean attemptReconfiguration = true; final ClusterState state = getLastAcceptedState(); // committed state if (localNodeMayWinElection(state) == false) { - final List masterCandidates = completedNodes().stream() + final List clusterManagerCandidates = completedNodes().stream() .filter(DiscoveryNode::isMasterNode) .filter(node -> nodeMayWinElection(state, node)) .filter(node -> { - // check if master candidate would be able to get an election quorum if we were to - // abdicate to it. Assume that every node that completed the publication can provide + // check if cluster_manager candidate would be able to get an election quorum if we were + // to abdicate to it. Assume that every node that completed the publication can provide // a vote in that next election and has the latest state. final long futureElectionTerm = state.term() + 1; final VoteCollection futureVoteCollection = new VoteCollection(); @@ -1638,8 +1644,8 @@ public void onSuccess(String source) { ); }) .collect(Collectors.toList()); - if (masterCandidates.isEmpty() == false) { - abdicateTo(masterCandidates.get(random.nextInt(masterCandidates.size()))); + if (clusterManagerCandidates.isEmpty() == false) { + abdicateTo(clusterManagerCandidates.get(random.nextInt(clusterManagerCandidates.size()))); attemptReconfiguration = false; } } @@ -1665,7 +1671,7 @@ public void onFailure(Exception e) { cancelTimeoutHandlers(); final FailedToCommitClusterStateException exception = new FailedToCommitClusterStateException("publication failed", e); - ackListener.onNodeAck(getLocalNode(), exception); // other nodes have acked, but not the master. + ackListener.onNodeAck(getLocalNode(), exception); // other nodes have acked, but not the cluster manager. publishListener.onFailure(exception); } }, OpenSearchExecutors.newDirectExecutorService(), transportService.getThreadPool().getThreadContext()); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java index 5975e5b64214f..693a997d318cd 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java @@ -138,23 +138,28 @@ public class JoinHelper { @Override public ClusterTasksResult execute(ClusterState currentState, List joiningTasks) throws Exception { - // The current state that MasterService uses might have been updated by a (different) master in a higher term already + // The current state that MasterService uses might have been updated by a (different) cluster-manager in a higher term + // already // Stop processing the current cluster state update, as there's no point in continuing to compute it as // it will later be rejected by Coordinator.publish(...) anyhow if (currentState.term() > term) { - logger.trace("encountered higher term {} than current {}, there is a newer master", currentState.term(), term); + logger.trace("encountered higher term {} than current {}, there is a newer cluster-manager", currentState.term(), term); throw new NotMasterException( - "Higher term encountered (current: " + currentState.term() + " > used: " + term + "), there is a newer master" + "Higher term encountered (current: " + + currentState.term() + + " > used: " + + term + + "), there is a newer cluster-manager" ); } else if (currentState.nodes().getMasterNodeId() == null && joiningTasks.stream().anyMatch(Task::isBecomeMasterTask)) { - assert currentState.term() < term : "there should be at most one become master task per election (= by term)"; + assert currentState.term() < term : "there should be at most one become cluster-manager task per election (= by term)"; final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder(currentState.coordinationMetadata()) .term(term) .build(); final Metadata metadata = Metadata.builder(currentState.metadata()).coordinationMetadata(coordinationMetadata).build(); currentState = ClusterState.builder(currentState).metadata(metadata).build(); } else if (currentState.nodes().isLocalNodeElectedMaster()) { - assert currentState.term() == term : "term should be stable for the same master"; + assert currentState.term() == term : "term should be stable for the same cluster-manager"; } return super.execute(currentState, joiningTasks); } @@ -297,7 +302,7 @@ void logLastFailedJoinAttempt() { } public void sendJoinRequest(DiscoveryNode destination, long term, Optional optionalJoin, Runnable onCompletion) { - assert destination.isMasterNode() : "trying to join master-ineligible " + destination; + assert destination.isMasterNode() : "trying to join cluster-manager-ineligible " + destination; final StatusInfo statusInfo = nodeHealthService.getHealth(); if (statusInfo.getStatus() == UNHEALTHY) { logger.debug("dropping join request to [{}]: [{}]", destination, statusInfo.getInfo()); @@ -348,7 +353,7 @@ public String executor() { } public void sendStartJoinRequest(final StartJoinRequest startJoinRequest, final DiscoveryNode destination) { - assert startJoinRequest.getSourceNode().isMasterNode() : "sending start-join request for master-ineligible " + assert startJoinRequest.getSourceNode().isMasterNode() : "sending start-join request for cluster-manager-ineligible " + startJoinRequest.getSourceNode(); transportService.sendRequest(destination, START_JOIN_ACTION_NAME, startJoinRequest, new TransportResponseHandler() { @Override diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinRequest.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinRequest.java index f18396e78fbf9..84adf834d85e8 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinRequest.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinRequest.java @@ -50,15 +50,15 @@ public class JoinRequest extends TransportRequest { /** * The minimum term for which the joining node will accept any cluster state publications. If the joining node is in a strictly greater - * term than the master it wants to join then the master must enter a new term and hold another election. Doesn't necessarily match + * term than the cluster-manager it wants to join then the cluster-manager must enter a new term and hold another election. Doesn't necessarily match * {@link JoinRequest#optionalJoin} and may be zero in join requests sent prior to {@link LegacyESVersion#V_7_7_0}. */ private final long minimumTerm; /** - * A vote for the receiving node. This vote is optional since the sending node may have voted for a different master in this term. - * That's ok, the sender likely discovered that the master we voted for lost the election and now we're trying to join the winner. Once - * the sender has successfully joined the master, the lack of a vote in its term causes another election (see + * A vote for the receiving node. This vote is optional since the sending node may have voted for a different cluster-manager in this term. + * That's ok, the sender likely discovered that the cluster-manager we voted for lost the election and now we're trying to join the winner. Once + * the sender has successfully joined the cluster-manager, the lack of a vote in its term causes another election (see * {@link Publication#onMissingJoin(DiscoveryNode)}). */ private final Optional optionalJoin; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index ea5c33b4300a5..b8f7dfd116b7e 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -129,16 +129,19 @@ public ClusterTasksResult execute(ClusterState currentState, List jo if (joiningNodes.size() == 1 && joiningNodes.get(0).isFinishElectionTask()) { return results.successes(joiningNodes).build(currentState); } else if (currentNodes.getMasterNode() == null && joiningNodes.stream().anyMatch(Task::isBecomeMasterTask)) { - assert joiningNodes.stream().anyMatch(Task::isFinishElectionTask) : "becoming a master but election is not finished " + assert joiningNodes.stream().anyMatch(Task::isFinishElectionTask) : "becoming a cluster-manager but election is not finished " + joiningNodes; - // use these joins to try and become the master. + // use these joins to try and become the cluster-manager. // Note that we don't have to do any validation of the amount of joining nodes - the commit // during the cluster state publishing guarantees that we have enough - newState = becomeMasterAndTrimConflictingNodes(currentState, joiningNodes); + newState = becomeClusterManagerAndTrimConflictingNodes(currentState, joiningNodes); nodesChanged = true; } else if (currentNodes.isLocalNodeElectedMaster() == false) { - logger.trace("processing node joins, but we are not the master. current master: {}", currentNodes.getMasterNode()); - throw new NotMasterException("Node [" + currentNodes.getLocalNode() + "] not master for join request"); + logger.trace( + "processing node joins, but we are not the cluster-manager. current cluster-manager: {}", + currentNodes.getMasterNode() + ); + throw new NotMasterException("Node [" + currentNodes.getLocalNode() + "] not cluster-manager for join request"); } else { newState = ClusterState.builder(currentState); } @@ -221,12 +224,12 @@ public ClusterTasksResult execute(ClusterState currentState, List jo return results.build(allocationService.adaptAutoExpandReplicas(newState.nodes(nodesBuilder).build())); } else { // we must return a new cluster state instance to force publishing. This is important - // for the joining node to finalize its join and set us as a master + // for the joining node to finalize its join and set us as a cluster-manager return results.build(newState.build()); } } - protected ClusterState.Builder becomeMasterAndTrimConflictingNodes(ClusterState currentState, List joiningNodes) { + protected ClusterState.Builder becomeClusterManagerAndTrimConflictingNodes(ClusterState currentState, List joiningNodes) { assert currentState.nodes().getMasterNodeId() == null : currentState; DiscoveryNodes currentNodes = currentState.nodes(); DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentNodes); @@ -256,13 +259,13 @@ protected ClusterState.Builder becomeMasterAndTrimConflictingNodes(ClusterState } } - // now trim any left over dead nodes - either left there when the previous master stepped down + // now trim any left over dead nodes - either left there when the previous cluster-manager stepped down // or removed by us above ClusterState tmpState = ClusterState.builder(currentState) .nodes(nodesBuilder) .blocks(ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_ID)) .build(); - logger.trace("becomeMasterAndTrimConflictingNodes: {}", tmpState.nodes()); + logger.trace("becomeClusterManagerAndTrimConflictingNodes: {}", tmpState.nodes()); allocationService.cleanCaches(); tmpState = PersistentTasksCustomMetadata.disassociateDeadNodes(tmpState); return ClusterState.builder(allocationService.disassociateDeadNodes(tmpState, false, "removed dead nodes on election")); @@ -277,7 +280,7 @@ private void refreshDiscoveryNodeVersionAfterUpgrade(DiscoveryNodes currentNodes // updating the version of those node which have connection with the new master. // Note: This should get deprecated with BWC mode logic if (null == transportService) { - // this logic is only applicable when OpenSearch node is master and is noop for zen discovery node + // this logic is only applicable when OpenSearch node is cluster-manager and is noop for zen discovery node return; } if (currentNodes.getMinNodeVersion().before(Version.V_1_0_0)) { @@ -310,7 +313,7 @@ private void refreshDiscoveryNodeVersionAfterUpgrade(DiscoveryNodes currentNodes } } else { // in case existing OpenSearch node is present in the cluster and but there is no connection to that node yet, - // either that node will send new JoinRequest to the master with version >=1.0, then no issue or + // either that node will send new JoinRequest to the cluster-manager/master with version >=1.0, then no issue or // there is an edge case if doesn't send JoinRequest and connection is established, // then it can continue to report version as 7.10.2 instead of actual OpenSearch version. So, // removing the node from cluster state to prevent stale version reporting and let it reconnect. @@ -409,7 +412,7 @@ public static void ensureNodesCompatibility(Version joiningNodeVersion, Version /** * ensures that the joining node's major version is equal or higher to the minClusterNodeVersion. This is needed - * to ensure that if the master is already fully operating under the new major version, it doesn't go back to mixed + * to ensure that if the cluster-manager/master is already fully operating under the new major version, it doesn't go back to mixed * version mode **/ public static void ensureMajorVersionBarrier(Version joiningNodeVersion, Version minClusterNodeVersion) { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/LagDetector.java b/server/src/main/java/org/opensearch/cluster/coordination/LagDetector.java index e599fffa68ff1..70a1c4f3ec220 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/LagDetector.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/LagDetector.java @@ -102,7 +102,7 @@ public void clearTrackedNodes() { public void setAppliedVersion(final DiscoveryNode discoveryNode, final long appliedVersion) { final NodeAppliedStateTracker nodeAppliedStateTracker = appliedStateTrackersByNode.get(discoveryNode); if (nodeAppliedStateTracker == null) { - // Received an ack from a node that a later publication has removed (or we are no longer master). No big deal. + // Received an ack from a node that a later publication has removed (or we are no longer cluster-manager). No big deal. logger.trace("node {} applied version {} but this node's version is not being tracked", discoveryNode, appliedVersion); } else { nodeAppliedStateTracker.increaseAppliedVersion(appliedVersion); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java b/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java index b4edc9401234d..fcf54aff7f478 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java @@ -190,7 +190,7 @@ void setCurrentNodes(DiscoveryNodes discoveryNodes) { } // For assertions - boolean currentNodeIsMaster() { + boolean currentNodeIsClusterManager() { return discoveryNodes.isLocalNodeElectedMaster(); } @@ -208,9 +208,9 @@ private void handleLeaderCheck(LeaderCheckRequest request) { logger.debug(message); throw new NodeHealthCheckFailureException(message); } else if (discoveryNodes.isLocalNodeElectedMaster() == false) { - logger.debug("rejecting leader check on non-master {}", request); + logger.debug("rejecting leader check on non-cluster-manager {}", request); throw new CoordinationStateRejectedException( - "rejecting leader check from [" + request.getSender() + "] sent to a node that is no longer the master" + "rejecting leader check from [" + request.getSender() + "] sent to a node that is no longer the cluster-manager" ); } else if (discoveryNodes.nodeExists(request.getSender()) == false) { logger.debug("rejecting leader check from removed node: {}", request); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/NoMasterBlockService.java b/server/src/main/java/org/opensearch/cluster/coordination/NoMasterBlockService.java index 8cbb0446a1337..f6420bb32b5f3 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/NoMasterBlockService.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/NoMasterBlockService.java @@ -74,7 +74,7 @@ public class NoMasterBlockService { public static final Setting NO_MASTER_BLOCK_SETTING = new Setting<>( "cluster.no_master_block", "write", - NoMasterBlockService::parseNoMasterBlock, + NoMasterBlockService::parseNoClusterManagerBlock, Property.Dynamic, Property.NodeScope, Property.Deprecated @@ -84,19 +84,19 @@ public class NoMasterBlockService { public static final Setting NO_CLUSTER_MANAGER_BLOCK_SETTING = new Setting<>( "cluster.no_cluster_manager_block", NO_MASTER_BLOCK_SETTING, - NoMasterBlockService::parseNoMasterBlock, + NoMasterBlockService::parseNoClusterManagerBlock, Property.Dynamic, Property.NodeScope ); - private volatile ClusterBlock noMasterBlock; + private volatile ClusterBlock noClusterManagerBlock; public NoMasterBlockService(Settings settings, ClusterSettings clusterSettings) { - this.noMasterBlock = NO_CLUSTER_MANAGER_BLOCK_SETTING.get(settings); + this.noClusterManagerBlock = NO_CLUSTER_MANAGER_BLOCK_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(NO_CLUSTER_MANAGER_BLOCK_SETTING, this::setNoMasterBlock); } - private static ClusterBlock parseNoMasterBlock(String value) { + private static ClusterBlock parseNoClusterManagerBlock(String value) { switch (value) { case "all": return NO_MASTER_BLOCK_ALL; @@ -105,15 +105,17 @@ private static ClusterBlock parseNoMasterBlock(String value) { case "metadata_write": return NO_MASTER_BLOCK_METADATA_WRITES; default: - throw new IllegalArgumentException("invalid no-master block [" + value + "], must be one of [all, write, metadata_write]"); + throw new IllegalArgumentException( + "invalid no-cluster-manager block [" + value + "], must be one of [all, write, metadata_write]" + ); } } public ClusterBlock getNoMasterBlock() { - return noMasterBlock; + return noClusterManagerBlock; } - private void setNoMasterBlock(ClusterBlock noMasterBlock) { - this.noMasterBlock = noMasterBlock; + private void setNoMasterBlock(ClusterBlock noClusterManagerBlock) { + this.noClusterManagerBlock = noClusterManagerBlock; } } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java index 02bdb65c7edf2..e8ab2f8d53d3f 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java @@ -127,7 +127,7 @@ public void onFailure(final String source, final Exception e) { @Override public void onNoLongerMaster(String source) { - logger.debug("no longer master while processing node removal [{}]", source); + logger.debug("no longer cluster-manager while processing node removal [{}]", source); } } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java b/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java index 76be3ebd3a374..e667052ca5fdd 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java @@ -43,27 +43,27 @@ import java.util.Optional; public class PeersResponse extends TransportResponse { - private final Optional masterNode; + private final Optional clusterManagerNode; private final List knownPeers; private final long term; - public PeersResponse(Optional masterNode, List knownPeers, long term) { - assert masterNode.isPresent() == false || knownPeers.isEmpty(); - this.masterNode = masterNode; + public PeersResponse(Optional clusterManagerNode, List knownPeers, long term) { + assert clusterManagerNode.isPresent() == false || knownPeers.isEmpty(); + this.clusterManagerNode = clusterManagerNode; this.knownPeers = knownPeers; this.term = term; } public PeersResponse(StreamInput in) throws IOException { - masterNode = Optional.ofNullable(in.readOptionalWriteable(DiscoveryNode::new)); + clusterManagerNode = Optional.ofNullable(in.readOptionalWriteable(DiscoveryNode::new)); knownPeers = in.readList(DiscoveryNode::new); term = in.readLong(); - assert masterNode.isPresent() == false || knownPeers.isEmpty(); + assert clusterManagerNode.isPresent() == false || knownPeers.isEmpty(); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalWriteable(masterNode.orElse(null)); + out.writeOptionalWriteable(clusterManagerNode.orElse(null)); out.writeList(knownPeers); out.writeLong(term); } @@ -72,7 +72,7 @@ public void writeTo(StreamOutput out) throws IOException { * @return the node that is currently leading, according to the responding node. */ public Optional getMasterNode() { - return masterNode; + return clusterManagerNode; } /** @@ -93,7 +93,7 @@ public long getTerm() { @Override public String toString() { - return "PeersResponse{" + "masterNode=" + masterNode + ", knownPeers=" + knownPeers + ", term=" + term + '}'; + return "PeersResponse{" + "clusterManagerNode=" + clusterManagerNode + ", knownPeers=" + knownPeers + ", term=" + term + '}'; } @Override @@ -101,11 +101,13 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; PeersResponse that = (PeersResponse) o; - return term == that.term && Objects.equals(masterNode, that.masterNode) && Objects.equals(knownPeers, that.knownPeers); + return term == that.term + && Objects.equals(clusterManagerNode, that.clusterManagerNode) + && Objects.equals(knownPeers, that.knownPeers); } @Override public int hashCode() { - return Objects.hash(masterNode, knownPeers, term); + return Objects.hash(clusterManagerNode, knownPeers, term); } } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java index ee97c0e07eb48..9a1a392348660 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java @@ -85,7 +85,7 @@ public class PublicationTransportHandler { private final AtomicReference lastSeenClusterState = new AtomicReference<>(); - // the master needs the original non-serialized state as the cluster state contains some volatile information that we + // the cluster-manager needs the original non-serialized state as the cluster state contains some volatile information that we // don't want to be replicated because it's not usable on another node (e.g. UnassignedInfo.unassignedTimeNanos) or // because it's mostly just debugging info that would unnecessarily blow up CS updates (I think there was one in // snapshot code). @@ -337,8 +337,9 @@ public void sendPublishRequest( if (destination.equals(discoveryNodes.getLocalNode())) { // if publishing to self, use original request instead (see currentPublishRequestToSelf for explanation) final PublishRequest previousRequest = currentPublishRequestToSelf.getAndSet(publishRequest); - // we might override an in-flight publication to self in case where we failed as master and became master again, - // and the new publication started before the previous one completed (which fails anyhow because of higher current term) + // we might override an in-flight publication to self in case where we failed as cluster-manager and + // became cluster-manager again, and the new publication started before the previous one completed + // (which fails anyhow because of higher current term) assert previousRequest == null || previousRequest.getAcceptedState().term() < publishRequest.getAcceptedState().term(); responseActionListener = new ActionListener() { @Override diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublishClusterStateStats.java b/server/src/main/java/org/opensearch/cluster/coordination/PublishClusterStateStats.java index 77320810eba4c..b4adad898271e 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublishClusterStateStats.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublishClusterStateStats.java @@ -50,8 +50,8 @@ public class PublishClusterStateStats implements Writeable, ToXContentObject { private final long compatibleClusterStateDiffReceivedCount; /** - * @param fullClusterStateReceivedCount the number of times this node has received a full copy of the cluster state from the master. - * @param incompatibleClusterStateDiffReceivedCount the number of times this node has received a cluster-state diff from the master. + * @param fullClusterStateReceivedCount the number of times this node has received a full copy of the cluster state from the cluster-manager. + * @param incompatibleClusterStateDiffReceivedCount the number of times this node has received a cluster-state diff from the cluster-manager. * @param compatibleClusterStateDiffReceivedCount the number of times that received cluster-state diffs were compatible with */ public PublishClusterStateStats( diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublishRequest.java b/server/src/main/java/org/opensearch/cluster/coordination/PublishRequest.java index 76517573115fd..86ae9ce8bc081 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublishRequest.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublishRequest.java @@ -36,7 +36,7 @@ import java.util.Objects; /** - * Request which is used by the master node to publish cluster state changes. + * Request which is used by the cluster-manager node to publish cluster state changes. * Actual serialization of this request is done by {@link PublicationTransportHandler} */ public class PublishRequest { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java b/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java index b38b0cf0f4693..1c26dff45775f 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java @@ -53,8 +53,8 @@ public class Reconfigurator { private static final Logger logger = LogManager.getLogger(Reconfigurator.class); /** - * The cluster usually requires a vote from at least half of the master nodes in order to commit a cluster state update, and to achieve - * the best resilience it makes automatic adjustments to the voting configuration as master nodes join or leave the cluster. Adjustments + * The cluster usually requires a vote from at least half of the cluster-manager nodes in order to commit a cluster state update, and to achieve + * the best resilience it makes automatic adjustments to the voting configuration as cluster-manager nodes join or leave the cluster. Adjustments * that fix or increase the size of the voting configuration are always a good idea, but the wisdom of reducing the voting configuration * size is less clear. For instance, automatically reducing the voting configuration down to a single node means the cluster requires * this node to operate, which is not resilient: if it broke we could restore every other cluster-manager-eligible node in the cluster to health @@ -102,24 +102,24 @@ public String toString() { * @param retiredNodeIds Nodes that are leaving the cluster and which should not appear in the configuration if possible. Nodes that are * retired and not in the current configuration will never appear in the resulting configuration; this is useful * for shifting the vote in a 2-node cluster so one of the nodes can be restarted without harming availability. - * @param currentMaster The current master. Unless retired, we prefer to keep the current master in the config. + * @param currentClusterManager The current cluster-manager. Unless retired, we prefer to keep the current cluster-manager in the config. * @param currentConfig The current configuration. As far as possible, we prefer to keep the current config as-is. * @return An optimal configuration, or leave the current configuration unchanged if the optimal configuration has no live quorum. */ public VotingConfiguration reconfigure( Set liveNodes, Set retiredNodeIds, - DiscoveryNode currentMaster, + DiscoveryNode currentClusterManager, VotingConfiguration currentConfig ) { - assert liveNodes.contains(currentMaster) : "liveNodes = " + liveNodes + " master = " + currentMaster; + assert liveNodes.contains(currentClusterManager) : "liveNodes = " + liveNodes + " cluster-manager = " + currentClusterManager; logger.trace( - "{} reconfiguring {} based on liveNodes={}, retiredNodeIds={}, currentMaster={}", + "{} reconfiguring {} based on liveNodes={}, retiredNodeIds={}, currentClusterManager={}", this, currentConfig, liveNodes, retiredNodeIds, - currentMaster + currentClusterManager ); final Set liveNodeIds = liveNodes.stream() @@ -134,7 +134,12 @@ public VotingConfiguration reconfigure( .filter(n -> retiredNodeIds.contains(n.getId()) == false) .forEach( n -> orderedCandidateNodes.add( - new VotingConfigNode(n.getId(), true, n.getId().equals(currentMaster.getId()), currentConfigNodeIds.contains(n.getId())) + new VotingConfigNode( + n.getId(), + true, + n.getId().equals(currentClusterManager.getId()), + currentConfigNodeIds.contains(n.getId()) + ) ) ); currentConfigNodeIds.stream() @@ -166,22 +171,22 @@ public VotingConfiguration reconfigure( static class VotingConfigNode implements Comparable { final String id; final boolean live; - final boolean currentMaster; + final boolean currentClusterManager; final boolean inCurrentConfig; - VotingConfigNode(String id, boolean live, boolean currentMaster, boolean inCurrentConfig) { + VotingConfigNode(String id, boolean live, boolean currentClusterManager, boolean inCurrentConfig) { this.id = id; this.live = live; - this.currentMaster = currentMaster; + this.currentClusterManager = currentClusterManager; this.inCurrentConfig = inCurrentConfig; } @Override public int compareTo(VotingConfigNode other) { - // prefer current master - final int currentMasterComp = Boolean.compare(other.currentMaster, currentMaster); - if (currentMasterComp != 0) { - return currentMasterComp; + // prefer current cluster-manager + final int currentClusterManagerComp = Boolean.compare(other.currentClusterManager, currentClusterManager); + if (currentClusterManagerComp != 0) { + return currentClusterManagerComp; } // prefer nodes that are live final int liveComp = Boolean.compare(other.live, live); @@ -205,8 +210,8 @@ public String toString() { + '\'' + ", live=" + live - + ", currentMaster=" - + currentMaster + + ", currentClusterManager=" + + currentClusterManager + ", inCurrentConfig=" + inCurrentConfig + '}'; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java b/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java index c6c7e75497e29..6b31c39d71eb3 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java @@ -68,12 +68,12 @@ public class UnsafeBootstrapMasterCommand extends OpenSearchNodeCommand { + "\n" + "Do you want to proceed?\n"; - static final String NOT_MASTER_NODE_MSG = "unsafe-bootstrap tool can only be run on cluster-manager eligible node"; + static final String NOT_CLUSTER_MANAGER_NODE_MSG = "unsafe-bootstrap tool can only be run on cluster-manager eligible node"; static final String EMPTY_LAST_COMMITTED_VOTING_CONFIG_MSG = "last committed voting voting configuration is empty, cluster has never been bootstrapped?"; - static final String MASTER_NODE_BOOTSTRAPPED_MSG = "Master node was successfully bootstrapped"; + static final String CLUSTER_MANAGER_NODE_BOOTSTRAPPED_MSG = "Cluster-manager node was successfully bootstrapped"; static final Setting UNSAFE_BOOTSTRAP = ClusterService.USER_DEFINED_METADATA.getConcreteSetting( "cluster.metadata.unsafe-bootstrap" ); @@ -92,10 +92,10 @@ public class UnsafeBootstrapMasterCommand extends OpenSearchNodeCommand { @Override protected boolean validateBeforeLock(Terminal terminal, Environment env) { Settings settings = env.settings(); - terminal.println(Terminal.Verbosity.VERBOSE, "Checking node.master setting"); - Boolean master = DiscoveryNode.isMasterNode(settings); - if (master == false) { - throw new OpenSearchException(NOT_MASTER_NODE_MSG); + terminal.println(Terminal.Verbosity.VERBOSE, "Checking node.roles setting"); + Boolean clusterManager = DiscoveryNode.isMasterNode(settings); + if (clusterManager == false) { + throw new OpenSearchException(NOT_CLUSTER_MANAGER_NODE_MSG); } return true; @@ -171,6 +171,6 @@ protected void processNodePaths(Terminal terminal, Path[] dataPaths, int nodeLoc writer.writeFullStateAndCommit(state.v1(), newClusterState); } - terminal.println(MASTER_NODE_BOOTSTRAPPED_MSG); + terminal.println(CLUSTER_MANAGER_NODE_BOOTSTRAPPED_MSG); } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexUpgradeService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexUpgradeService.java index f6f42e0d81063..eda4833a36c96 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexUpgradeService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexUpgradeService.java @@ -93,7 +93,7 @@ public MetadataIndexUpgradeService( } /** - * Checks that the index can be upgraded to the current version of the master node. + * Checks that the index can be upgraded to the current version of the cluster-manager node. * *

    * If the index does not need upgrade it returns the index metadata unchanged, otherwise it returns a modified index metadata. If index diff --git a/server/src/main/java/org/opensearch/cluster/metadata/SystemIndexMetadataUpgradeService.java b/server/src/main/java/org/opensearch/cluster/metadata/SystemIndexMetadataUpgradeService.java index 51a2557ef80bb..f07b74575950c 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/SystemIndexMetadataUpgradeService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/SystemIndexMetadataUpgradeService.java @@ -56,7 +56,7 @@ public class SystemIndexMetadataUpgradeService implements ClusterStateListener { private final SystemIndices systemIndices; private final ClusterService clusterService; - private boolean master = false; + private boolean clusterManager = false; private volatile ImmutableOpenMap lastIndexMetadataMap = ImmutableOpenMap.of(); private volatile boolean updateTaskPending = false; @@ -68,11 +68,11 @@ public SystemIndexMetadataUpgradeService(SystemIndices systemIndices, ClusterSer @Override public void clusterChanged(ClusterChangedEvent event) { - if (event.localNodeMaster() != master) { - this.master = event.localNodeMaster(); + if (event.localNodeMaster() != clusterManager) { + this.clusterManager = event.localNodeMaster(); } - if (master && updateTaskPending == false) { + if (clusterManager && updateTaskPending == false) { final ImmutableOpenMap indexMetadataMap = event.state().metadata().indices(); if (lastIndexMetadataMap != indexMetadataMap) { diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java index 6bd943c5e1d0d..740b7a80dd1d9 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java @@ -73,7 +73,7 @@ public static boolean nodeRequiresLocalStorage(Settings settings) { boolean localStorageEnable = Node.NODE_LOCAL_STORAGE_SETTING.get(settings); if (localStorageEnable == false && (isDataNode(settings) || isMasterNode(settings))) { // TODO: make this a proper setting validation logic, requiring multi-settings validation - throw new IllegalArgumentException("storage can not be disabled for master and data nodes"); + throw new IllegalArgumentException("storage can not be disabled for cluster-manager and data nodes"); } return localStorageEnable; } @@ -453,7 +453,7 @@ public boolean isDataNode() { } /** - * Can this node become master or not. + * Can this node become cluster-manager or not. */ public boolean isMasterNode() { return roles.contains(DiscoveryNodeRole.MASTER_ROLE) || roles.contains(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java index 8d84869bc8bec..9d79157ad5b22 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java @@ -70,10 +70,10 @@ public class DiscoveryNodes extends AbstractDiffable implements private final ImmutableOpenMap nodes; private final ImmutableOpenMap dataNodes; - private final ImmutableOpenMap masterNodes; + private final ImmutableOpenMap clusterManagerNodes; private final ImmutableOpenMap ingestNodes; - private final String masterNodeId; + private final String clusterManagerNodeId; private final String localNodeId; private final Version minNonClientNodeVersion; private final Version maxNonClientNodeVersion; @@ -83,9 +83,9 @@ public class DiscoveryNodes extends AbstractDiffable implements private DiscoveryNodes( ImmutableOpenMap nodes, ImmutableOpenMap dataNodes, - ImmutableOpenMap masterNodes, + ImmutableOpenMap clusterManagerNodes, ImmutableOpenMap ingestNodes, - String masterNodeId, + String clusterManagerNodeId, String localNodeId, Version minNonClientNodeVersion, Version maxNonClientNodeVersion, @@ -94,9 +94,9 @@ private DiscoveryNodes( ) { this.nodes = nodes; this.dataNodes = dataNodes; - this.masterNodes = masterNodes; + this.clusterManagerNodes = clusterManagerNodes; this.ingestNodes = ingestNodes; - this.masterNodeId = masterNodeId; + this.clusterManagerNodeId = clusterManagerNodeId; this.localNodeId = localNodeId; this.minNonClientNodeVersion = minNonClientNodeVersion; this.maxNonClientNodeVersion = maxNonClientNodeVersion; @@ -110,14 +110,14 @@ public Iterator iterator() { } /** - * Returns {@code true} if the local node is the elected master node. + * Returns {@code true} if the local node is the elected cluster-manager node. */ public boolean isLocalNodeElectedMaster() { if (localNodeId == null) { // we don't know yet the local node id, return false return false; } - return localNodeId.equals(masterNodeId); + return localNodeId.equals(clusterManagerNodeId); } /** @@ -148,12 +148,12 @@ public ImmutableOpenMap getDataNodes() { } /** - * Get a {@link Map} of the discovered master nodes arranged by their ids + * Get a {@link Map} of the discovered cluster-manager nodes arranged by their ids * - * @return {@link Map} of the discovered master nodes arranged by their ids + * @return {@link Map} of the discovered cluster-manager nodes arranged by their ids */ public ImmutableOpenMap getMasterNodes() { - return this.masterNodes; + return this.clusterManagerNodes; } /** @@ -164,35 +164,35 @@ public ImmutableOpenMap getIngestNodes() { } /** - * Get a {@link Map} of the discovered master and data nodes arranged by their ids + * Get a {@link Map} of the discovered cluster-manager and data nodes arranged by their ids * - * @return {@link Map} of the discovered master and data nodes arranged by their ids + * @return {@link Map} of the discovered cluster-manager and data nodes arranged by their ids */ public ImmutableOpenMap getMasterAndDataNodes() { ImmutableOpenMap.Builder nodes = ImmutableOpenMap.builder(dataNodes); - nodes.putAll(masterNodes); + nodes.putAll(clusterManagerNodes); return nodes.build(); } /** - * Get a {@link Map} of the coordinating only nodes (nodes which are neither master, nor data, nor ingest nodes) arranged by their ids + * Get a {@link Map} of the coordinating only nodes (nodes which are neither cluster-manager, nor data, nor ingest nodes) arranged by their ids * * @return {@link Map} of the coordinating only nodes arranged by their ids */ public ImmutableOpenMap getCoordinatingOnlyNodes() { ImmutableOpenMap.Builder nodes = ImmutableOpenMap.builder(this.nodes); - nodes.removeAll(masterNodes.keys()); + nodes.removeAll(clusterManagerNodes.keys()); nodes.removeAll(dataNodes.keys()); nodes.removeAll(ingestNodes.keys()); return nodes.build(); } /** - * Returns a stream of all nodes, with master nodes at the front + * Returns a stream of all nodes, with cluster-manager nodes at the front */ public Stream mastersFirstStream() { return Stream.concat( - StreamSupport.stream(masterNodes.spliterator(), false).map(cur -> cur.value), + StreamSupport.stream(clusterManagerNodes.spliterator(), false).map(cur -> cur.value), StreamSupport.stream(this.spliterator(), false).filter(n -> n.isMasterNode() == false) ); } @@ -230,7 +230,7 @@ public boolean nodeExists(DiscoveryNode node) { /** * Determine if the given node exists and has the right roles. Supported roles vary by version, and our local cluster state might - * have come via an older master, so the roles may differ even if the node is otherwise identical. + * have come via an older cluster-manager, so the roles may differ even if the node is otherwise identical. */ public boolean nodeExistsWithSameRoles(DiscoveryNode discoveryNode) { final DiscoveryNode existing = nodes.get(discoveryNode.getId()); @@ -239,7 +239,7 @@ public boolean nodeExistsWithSameRoles(DiscoveryNode discoveryNode) { /** * Determine if the given node exists and has the right version. During upgrade from Elasticsearch version as OpenSearch node run in - * BWC mode and can have the version as 7.10.2 in cluster state from older master to OpenSearch master. + * BWC mode and can have the version as 7.10.2 in cluster state from older cluster-manager to OpenSearch cluster-manager. */ public boolean nodeExistsWithBWCVersion(DiscoveryNode discoveryNode) { final DiscoveryNode existing = nodes.get(discoveryNode.getId()); @@ -250,12 +250,12 @@ public boolean nodeExistsWithBWCVersion(DiscoveryNode discoveryNode) { } /** - * Get the id of the master node + * Get the id of the cluster-manager node * - * @return id of the master + * @return id of the cluster-manager */ public String getMasterNodeId() { - return this.masterNodeId; + return this.clusterManagerNodeId; } /** @@ -277,12 +277,12 @@ public DiscoveryNode getLocalNode() { } /** - * Returns the master node, or {@code null} if there is no master node + * Returns the cluster-manager node, or {@code null} if there is no cluster-manager node */ @Nullable public DiscoveryNode getMasterNode() { - if (masterNodeId != null) { - return nodes.get(masterNodeId); + if (clusterManagerNodeId != null) { + return nodes.get(clusterManagerNodeId); } return null; } @@ -394,9 +394,9 @@ public String[] resolveNodes(String... nodes) { resolvedNodesIds.add(localNodeId); } } else if (nodeId.equals("_master") || nodeId.equals("_cluster_manager")) { - String masterNodeId = getMasterNodeId(); - if (masterNodeId != null) { - resolvedNodesIds.add(masterNodeId); + String clusterManagerNodeId = getMasterNodeId(); + if (clusterManagerNodeId != null) { + resolvedNodesIds.add(clusterManagerNodeId); } } else if (nodeExists(nodeId)) { resolvedNodesIds.add(nodeId); @@ -421,9 +421,9 @@ public String[] resolveNodes(String... nodes) { } } else if (roleNameIsClusterManager(matchAttrName)) { if (Booleans.parseBoolean(matchAttrValue, true)) { - resolvedNodesIds.addAll(masterNodes.keys()); + resolvedNodesIds.addAll(clusterManagerNodes.keys()); } else { - resolvedNodesIds.removeAll(masterNodes.keys()); + resolvedNodesIds.removeAll(clusterManagerNodes.keys()); } } else if (DiscoveryNodeRole.INGEST_ROLE.roleName().equals(matchAttrName)) { if (Booleans.parseBoolean(matchAttrValue, true)) { @@ -506,7 +506,7 @@ public String toString() { sb.append(", local"); } if (node == getMasterNode()) { - sb.append(", master"); + sb.append(", cluster-manager"); } sb.append("\n"); } @@ -517,21 +517,21 @@ public static class Delta { private final String localNodeId; @Nullable - private final DiscoveryNode previousMasterNode; + private final DiscoveryNode previousClusterManagerNode; @Nullable - private final DiscoveryNode newMasterNode; + private final DiscoveryNode newClusterManagerNode; private final List removed; private final List added; private Delta( - @Nullable DiscoveryNode previousMasterNode, - @Nullable DiscoveryNode newMasterNode, + @Nullable DiscoveryNode previousClusterManagerNode, + @Nullable DiscoveryNode newClusterManagerNode, String localNodeId, List removed, List added ) { - this.previousMasterNode = previousMasterNode; - this.newMasterNode = newMasterNode; + this.previousClusterManagerNode = previousClusterManagerNode; + this.newClusterManagerNode = newClusterManagerNode; this.localNodeId = localNodeId; this.removed = removed; this.added = added; @@ -542,17 +542,17 @@ public boolean hasChanges() { } public boolean masterNodeChanged() { - return Objects.equals(newMasterNode, previousMasterNode) == false; + return Objects.equals(newClusterManagerNode, previousClusterManagerNode) == false; } @Nullable - public DiscoveryNode previousMasterNode() { - return previousMasterNode; + public DiscoveryNode previousClusterManagerNode() { + return previousClusterManagerNode; } @Nullable public DiscoveryNode newMasterNode() { - return newMasterNode; + return newClusterManagerNode; } public boolean removed() { @@ -575,8 +575,8 @@ public String shortSummary() { final StringBuilder summary = new StringBuilder(); if (masterNodeChanged()) { summary.append("cluster-manager node changed {previous ["); - if (previousMasterNode() != null) { - summary.append(previousMasterNode()); + if (previousClusterManagerNode() != null) { + summary.append(previousClusterManagerNode()); } summary.append("], current ["); if (newMasterNode() != null) { @@ -609,11 +609,11 @@ public String shortSummary() { @Override public void writeTo(StreamOutput out) throws IOException { - if (masterNodeId == null) { + if (clusterManagerNodeId == null) { out.writeBoolean(false); } else { out.writeBoolean(true); - out.writeString(masterNodeId); + out.writeString(clusterManagerNodeId); } out.writeVInt(nodes.size()); for (DiscoveryNode node : this) { @@ -659,7 +659,7 @@ public static Builder builder(DiscoveryNodes nodes) { public static class Builder { private final ImmutableOpenMap.Builder nodes; - private String masterNodeId; + private String clusterManagerNodeId; private String localNodeId; public Builder() { @@ -667,7 +667,7 @@ public Builder() { } public Builder(DiscoveryNodes nodes) { - this.masterNodeId = nodes.getMasterNodeId(); + this.clusterManagerNodeId = nodes.getMasterNodeId(); this.localNodeId = nodes.getLocalNodeId(); this.nodes = ImmutableOpenMap.builder(nodes.getNodes()); } @@ -712,8 +712,8 @@ public Builder remove(DiscoveryNode node) { return this; } - public Builder masterNodeId(String masterNodeId) { - this.masterNodeId = masterNodeId; + public Builder masterNodeId(String clusterManagerNodeId) { + this.clusterManagerNodeId = clusterManagerNodeId; return this; } @@ -784,7 +784,7 @@ public DiscoveryNodes build() { dataNodesBuilder.build(), masterNodesBuilder.build(), ingestNodesBuilder.build(), - masterNodeId, + clusterManagerNodeId, localNodeId, minNonClientNodeVersion == null ? Version.CURRENT : minNonClientNodeVersion, maxNonClientNodeVersion == null ? Version.CURRENT : maxNonClientNodeVersion, @@ -794,7 +794,7 @@ public DiscoveryNodes build() { } public boolean isLocalNodeElectedMaster() { - return masterNodeId != null && masterNodeId.equals(localNodeId); + return clusterManagerNodeId != null && clusterManagerNodeId.equals(localNodeId); } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/BatchedRerouteService.java b/server/src/main/java/org/opensearch/cluster/routing/BatchedRerouteService.java index 05c11e112364a..9139c72577c9f 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/BatchedRerouteService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/BatchedRerouteService.java @@ -50,7 +50,7 @@ /** * A {@link BatchedRerouteService} is a {@link RerouteService} that batches together reroute requests to avoid unnecessary extra reroutes. - * This component only does meaningful work on the elected master node. Reroute requests will fail with a {@link NotMasterException} on + * This component only does meaningful work on the elected cluster-manager node. Reroute requests will fail with a {@link NotMasterException} on * other nodes. */ public class BatchedRerouteService implements RerouteService { @@ -146,7 +146,7 @@ public void onNoLongerMaster(String source) { } } ActionListener.onFailure(currentListeners, new NotMasterException("delayed reroute [" + reason + "] cancelled")); - // no big deal, the new master will reroute again + // no big deal, the new cluster-manager will reroute again } @Override diff --git a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java index 543a6cba2e91b..1b0639bc98306 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java @@ -268,7 +268,7 @@ public Snapshot snapshot() { /** * Gets the {@link IndexId} of the recovery source. May contain {@link IndexMetadata#INDEX_UUID_NA_VALUE} as the index uuid if it - * was created by an older version master in a mixed version cluster. + * was created by an older version cluster-manager in a mixed version cluster. * * @return IndexId */ diff --git a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java index 09f1708b01307..8f82d6dcee318 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java +++ b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java @@ -301,7 +301,7 @@ public UnassignedInfo(StreamInput in) throws IOException { this.reason = Reason.values()[(int) in.readByte()]; this.unassignedTimeMillis = in.readLong(); // As System.nanoTime() cannot be compared across different JVMs, reset it to now. - // This means that in master fail-over situations, elapsed delay time is forgotten. + // This means that in cluster-manager fail-over situations, elapsed delay time is forgotten. this.unassignedTimeNanos = System.nanoTime(); this.delayed = in.readBoolean(); this.message = in.readOptionalString(); diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/ExistingShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/ExistingShardsAllocator.java index ca0744f099f84..9286ca3dd533d 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/ExistingShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/ExistingShardsAllocator.java @@ -84,7 +84,7 @@ void allocateUnassigned( AllocateUnassignedDecision explainUnassignedShardAllocation(ShardRouting unassignedShard, RoutingAllocation routingAllocation); /** - * Called when this node becomes the elected master and when it stops being the elected master, so that implementations can clean up any + * Called when this node becomes the elected cluster-manager and when it stops being the elected cluster-manager, so that implementations can clean up any * in-flight activity from an earlier mastership. */ void cleanCaches(); diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java index 20f8c0a00e32b..221dd3ee55b21 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java @@ -270,7 +270,7 @@ public void removeTimeoutListener(TimeoutClusterStateListener listener) { } /** - * Add a listener for on/off local node master events + * Add a listener for on/off local node cluster-manager events */ public void addLocalNodeMasterListener(LocalNodeMasterListener listener) { addListener(listener); diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java index 46d65f310a427..27b8e6d29ee49 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java @@ -207,7 +207,7 @@ public void removeListener(ClusterStateListener listener) { } /** - * Add a listener for on/off local node master events + * Add a listener for on/off local node cluster-manager events */ public void addLocalNodeMasterListener(LocalNodeMasterListener listener) { clusterApplierService.addLocalNodeMasterListener(listener); diff --git a/server/src/main/java/org/opensearch/cluster/service/MasterService.java b/server/src/main/java/org/opensearch/cluster/service/MasterService.java index ad0bc599420f1..1aa2ea921e4b0 100644 --- a/server/src/main/java/org/opensearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/MasterService.java @@ -228,14 +228,14 @@ private static boolean isMasterUpdateThread() { } public static boolean assertMasterUpdateThread() { - assert isMasterUpdateThread() : "not called from the master service thread"; + assert isMasterUpdateThread() : "not called from the cluster-manager service thread"; return true; } public static boolean assertNotMasterUpdateThread(String reason) { assert isMasterUpdateThread() == false : "Expected current thread [" + Thread.currentThread() - + "] to not be the master service thread. Reason: [" + + "] to not be the cluster-maanger service thread. Reason: [" + reason + "]"; return true; @@ -244,16 +244,16 @@ assert isMasterUpdateThread() == false : "Expected current thread [" private void runTasks(TaskInputs taskInputs) { final String summary = taskInputs.summary; if (!lifecycle.started()) { - logger.debug("processing [{}]: ignoring, master service not started", summary); + logger.debug("processing [{}]: ignoring, cluster-manager service not started", summary); return; } logger.debug("executing cluster state update for [{}]", summary); final ClusterState previousClusterState = state(); - if (!previousClusterState.nodes().isLocalNodeElectedMaster() && taskInputs.runOnlyWhenMaster()) { - logger.debug("failing [{}]: local node is no longer master", summary); - taskInputs.onNoLongerMaster(); + if (!previousClusterState.nodes().isLocalNodeElectedMaster() && taskInputs.runOnlyWhenClusterManager()) { + logger.debug("failing [{}]: local node is no longer cluster-manager", summary); + taskInputs.onNoLongerClusterManager(); return; } @@ -402,7 +402,7 @@ private ClusterState patchVersions(ClusterState previousClusterState, ClusterTas ClusterState newClusterState = executionResult.resultingState; if (previousClusterState != newClusterState) { - // only the master controls the version numbers + // only the cluster-manager controls the version numbers Builder builder = incrementVersion(newClusterState); if (previousClusterState.routingTable() != newClusterState.routingTable()) { builder.routingTable( @@ -616,7 +616,10 @@ public void onNoLongerMaster(String source) { listener.onNoLongerMaster(source); } catch (Exception e) { logger.error( - () -> new ParameterizedMessage("exception thrown by listener while notifying no longer master from [{}]", source), + () -> new ParameterizedMessage( + "exception thrown by listener while notifying no longer cluster-manager from [{}]", + source + ), e ); } @@ -722,7 +725,7 @@ private static class AckCountDownListener implements Discovery.AckListener { private final AckedClusterStateTaskListener ackedTaskListener; private final CountDown countDown; - private final DiscoveryNode masterNode; + private final DiscoveryNode clusterManagerNode; private final ThreadPool threadPool; private final long clusterStateVersion; private volatile Scheduler.Cancellable ackTimeoutCallback; @@ -737,11 +740,11 @@ private static class AckCountDownListener implements Discovery.AckListener { this.ackedTaskListener = ackedTaskListener; this.clusterStateVersion = clusterStateVersion; this.threadPool = threadPool; - this.masterNode = nodes.getMasterNode(); + this.clusterManagerNode = nodes.getMasterNode(); int countDown = 0; for (DiscoveryNode node : nodes) { - // we always wait for at least the master node - if (node.equals(masterNode) || ackedTaskListener.mustAck(node)) { + // we always wait for at least the cluster-manager node + if (node.equals(clusterManagerNode) || ackedTaskListener.mustAck(node)) { countDown++; } } @@ -771,7 +774,7 @@ public void onCommit(TimeValue commitTime) { @Override public void onNodeAck(DiscoveryNode node, @Nullable Exception e) { - if (node.equals(masterNode) == false && ackedTaskListener.mustAck(node) == false) { + if (node.equals(clusterManagerNode) == false && ackedTaskListener.mustAck(node) == false) { return; } if (e == null) { @@ -879,11 +882,11 @@ private class TaskInputs { this.updateTasks = updateTasks; } - boolean runOnlyWhenMaster() { + boolean runOnlyWhenClusterManager() { return executor.runOnlyOnMaster(); } - void onNoLongerMaster() { + void onNoLongerClusterManager() { updateTasks.forEach(task -> task.listener.onNoLongerMaster(task.source())); } } diff --git a/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java b/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java index fbb37651198d4..8f3cb9e90ee56 100644 --- a/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java +++ b/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java @@ -87,7 +87,7 @@ public ConsistentSettingsService(Settings settings, ClusterService clusterServic /** * Returns a {@link LocalNodeMasterListener} that will publish hashes of all the settings passed in the constructor. These hashes are - * published by the master node only. Note that this is not designed for {@link SecureSettings} implementations that are mutable. + * published by the cluster-manager node only. Note that this is not designed for {@link SecureSettings} implementations that are mutable. */ public LocalNodeMasterListener newHashPublisher() { // eagerly compute hashes to be published @@ -116,7 +116,7 @@ public boolean areAllConsistent() { concreteSecureSetting.getKey() ); } else if (publishedSaltAndHash == null && localHash != null) { - // setting missing on master but present locally + // setting missing on cluster-manager but present locally logger.warn( "no published hash for the consistent secure setting [{}] but it exists on the local node", concreteSecureSetting.getKey() @@ -256,7 +256,7 @@ static final class HashesPublisher implements LocalNodeMasterListener { } @Override - public void onMaster() { + public void onClusterManager() { clusterService.submitStateUpdateTask("publish-secure-settings-hashes", new ClusterStateUpdateTask(Priority.URGENT) { @Override public ClusterState execute(ClusterState currentState) { @@ -282,7 +282,7 @@ public void onFailure(String source, Exception e) { } @Override - public void offMaster() { + public void offClusterManager() { logger.trace("I am no longer master, nothing to do"); } } diff --git a/server/src/main/java/org/opensearch/discovery/AckClusterStatePublishResponseHandler.java b/server/src/main/java/org/opensearch/discovery/AckClusterStatePublishResponseHandler.java index 161450308b384..e281bbbfacba1 100644 --- a/server/src/main/java/org/opensearch/discovery/AckClusterStatePublishResponseHandler.java +++ b/server/src/main/java/org/opensearch/discovery/AckClusterStatePublishResponseHandler.java @@ -53,11 +53,11 @@ public class AckClusterStatePublishResponseHandler extends BlockingClusterStateP * Creates a new AckClusterStatePublishResponseHandler * @param publishingToNodes the set of nodes to which the cluster state will be published and should respond * @param ackListener the {@link org.opensearch.discovery.Discovery.AckListener} to notify for each response - * gotten from non master nodes + * gotten from non cluster-manager nodes */ public AckClusterStatePublishResponseHandler(Set publishingToNodes, Discovery.AckListener ackListener) { - // Don't count the master as acknowledged, because it's not done yet - // otherwise we might end up with all the nodes but the master holding the latest cluster state + // Don't count the cluster-manager as acknowledged, because it's not done yet + // otherwise we might end up with all the nodes but the cluster-manager holding the latest cluster state super(publishingToNodes); this.ackListener = ackListener; } diff --git a/server/src/main/java/org/opensearch/discovery/BlockingClusterStatePublishResponseHandler.java b/server/src/main/java/org/opensearch/discovery/BlockingClusterStatePublishResponseHandler.java index c0cd390b66f78..6ee01d055be04 100644 --- a/server/src/main/java/org/opensearch/discovery/BlockingClusterStatePublishResponseHandler.java +++ b/server/src/main/java/org/opensearch/discovery/BlockingClusterStatePublishResponseHandler.java @@ -41,8 +41,8 @@ import java.util.concurrent.TimeUnit; /** - * Handles responses obtained when publishing a new cluster state from master to all non master nodes. - * Allows to await a reply from all non master nodes, up to a timeout + * Handles responses obtained when publishing a new cluster state from cluster-manager to all non cluster-manager nodes. + * Allows to await a reply from all non cluster-manager nodes, up to a timeout */ public class BlockingClusterStatePublishResponseHandler { @@ -62,7 +62,7 @@ public BlockingClusterStatePublishResponseHandler(Set publishingT } /** - * Called for each response obtained from non master nodes + * Called for each response obtained from non cluster-manager nodes * * @param node the node that replied to the publish event */ @@ -73,7 +73,7 @@ public void onResponse(DiscoveryNode node) { } /** - * Called for each failure obtained from non master nodes + * Called for each failure obtained from non cluster-manager nodes * @param node the node that replied to the publish event */ public void onFailure(DiscoveryNode node, Exception e) { @@ -85,7 +85,7 @@ public void onFailure(DiscoveryNode node, Exception e) { } /** - * Allows to wait for all non master nodes to reply to the publish event up to a timeout + * Allows to wait for all non cluster-manager nodes to reply to the publish event up to a timeout * @param timeout the timeout * @return true if the timeout expired or not, false otherwise */ diff --git a/server/src/main/java/org/opensearch/discovery/Discovery.java b/server/src/main/java/org/opensearch/discovery/Discovery.java index ac5028f6dfc51..25d136d8a2563 100644 --- a/server/src/main/java/org/opensearch/discovery/Discovery.java +++ b/server/src/main/java/org/opensearch/discovery/Discovery.java @@ -37,7 +37,7 @@ /** * A pluggable module allowing to implement discovery of other nodes, publishing of the cluster - * state to all nodes, electing a master of the cluster that raises cluster state change + * state to all nodes, electing a cluster-manager of the cluster that raises cluster state change * events. */ public interface Discovery extends LifecycleComponent, ClusterStatePublisher { diff --git a/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java b/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java index 427615da7e4d0..af3d07a1b12d5 100644 --- a/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java @@ -119,7 +119,7 @@ public DiscoveryModule( TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService, - MasterService masterService, + MasterService clusterManagerService, ClusterApplier clusterApplier, ClusterSettings clusterSettings, List plugins, @@ -195,7 +195,7 @@ public DiscoveryModule( transportService, namedWriteableRegistry, allocationService, - masterService, + clusterManagerService, gatewayMetaState::getPersistedState, seedHostsProvider, clusterApplier, diff --git a/server/src/main/java/org/opensearch/discovery/PeerFinder.java b/server/src/main/java/org/opensearch/discovery/PeerFinder.java index fe669e7b6d073..c174016925696 100644 --- a/server/src/main/java/org/opensearch/discovery/PeerFinder.java +++ b/server/src/main/java/org/opensearch/discovery/PeerFinder.java @@ -208,7 +208,7 @@ private DiscoveryNode getLocalNode() { * Invoked on receipt of a PeersResponse from a node that believes it's an active leader, which this node should therefore try and join. * Note that invocations of this method are not synchronised. By the time it is called we may have been deactivated. */ - protected abstract void onActiveMasterFound(DiscoveryNode masterNode, long term); + protected abstract void onActiveClusterManagerFound(DiscoveryNode clusterManagerNode, long term); /** * Invoked when the set of found peers changes. Note that invocations of this method are not fully synchronised, so we only guarantee @@ -449,7 +449,7 @@ public void handleResponse(PeersResponse response) { if (response.getMasterNode().equals(Optional.of(discoveryNode))) { // Must not hold lock here to avoid deadlock assert holdsLock() == false : "PeerFinder mutex is held in error"; - onActiveMasterFound(discoveryNode, response.getTerm()); + onActiveClusterManagerFound(discoveryNode, response.getTerm()); } } diff --git a/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java b/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java index cb431a6a5d0de..dd4819f5804ac 100644 --- a/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java +++ b/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java @@ -92,13 +92,13 @@ protected void processNodePaths(Terminal terminal, Path[] dataPaths, int nodeLoc assert DiscoveryNode.isDataNode(env.settings()) == false; if (DiscoveryNode.isMasterNode(env.settings()) == false) { - processNoMasterNoDataNode(terminal, dataPaths, env); + processNoClusterManagerNoDataNode(terminal, dataPaths, env); } else { - processMasterNoDataNode(terminal, dataPaths, env); + processClusterManagerNoDataNode(terminal, dataPaths, env); } } - private void processNoMasterNoDataNode(Terminal terminal, Path[] dataPaths, Environment env) throws IOException { + private void processNoClusterManagerNoDataNode(Terminal terminal, Path[] dataPaths, Environment env) throws IOException { NodeEnvironment.NodePath[] nodePaths = toNodePaths(dataPaths); terminal.println(Terminal.Verbosity.VERBOSE, "Collecting shard data paths"); @@ -126,7 +126,7 @@ private void processNoMasterNoDataNode(Terminal terminal, Path[] dataPaths, Envi outputVerboseInformation(terminal, indexPaths, indexUUIDs, metadata); - terminal.println(noMasterMessage(indexUUIDs.size(), shardDataPaths.size(), indexMetadataPaths.size())); + terminal.println(noClusterManagerMessage(indexUUIDs.size(), shardDataPaths.size(), indexMetadataPaths.size())); outputHowToSeeVerboseInformation(terminal); terminal.println("Node is being re-purposed as no-cluster-manager and no-data. Clean-up of index data will be performed."); @@ -140,7 +140,7 @@ private void processNoMasterNoDataNode(Terminal terminal, Path[] dataPaths, Envi terminal.println("Node successfully repurposed to no-cluster-manager and no-data."); } - private void processMasterNoDataNode(Terminal terminal, Path[] dataPaths, Environment env) throws IOException { + private void processClusterManagerNoDataNode(Terminal terminal, Path[] dataPaths, Environment env) throws IOException { NodeEnvironment.NodePath[] nodePaths = toNodePaths(dataPaths); terminal.println(Terminal.Verbosity.VERBOSE, "Collecting shard data paths"); @@ -205,7 +205,7 @@ private Set indexUUIDsFor(Set indexPaths) { return indexPaths.stream().map(Path::getFileName).map(Path::toString).collect(Collectors.toSet()); } - static String noMasterMessage(int indexes, int shards, int indexMetadata) { + static String noClusterManagerMessage(int indexes, int shards, int indexMetadata) { return "Found " + indexes + " indices (" + shards + " shards and " + indexMetadata + " index meta data) to clean up"; } diff --git a/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java b/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java index 3baa5bfb9e410..25a1096919939 100644 --- a/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java +++ b/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java @@ -261,7 +261,7 @@ private IndexMetadata stripAliases(IndexMetadata indexMetadata) { } /** - * Allocates the detected list of dangling indices by sending them to the master node + * Allocates the detected list of dangling indices by sending them to the cluster-manager node * for allocation, provided auto-import is enabled via the * {@link #AUTO_IMPORT_DANGLING_INDICES_SETTING} setting. * @param metadata the current cluster metadata, used to filter out dangling indices that cannot be allocated diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java index 3081c4da8f7a7..0ca70f37afa83 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java @@ -90,12 +90,12 @@ * When started, ensures that this version is compatible with the state stored on disk, and performs a state upgrade if necessary. Note that * the state being loaded when constructing the instance of this class is not necessarily the state that will be used as {@link * ClusterState#metadata()} because it might be stale or incomplete. Cluster-manager-eligible nodes must perform an election to find a complete and - * non-stale state, and master-ineligible nodes receive the real cluster state from the elected master after joining the cluster. + * non-stale state, and cluster-manager-ineligible nodes receive the real cluster state from the elected cluster-manager after joining the cluster. */ public class GatewayMetaState implements Closeable { /** - * Fake node ID for a voting configuration written by a master-ineligible data node to indicate that its on-disk state is potentially + * Fake node ID for a voting configuration written by a cluster-manager-ineligible data node to indicate that its on-disk state is potentially * stale (since it is written asynchronously after application, rather than before acceptance). This node ID means that if the node is * restarted as a cluster-manager-eligible node then it does not win any elections until it has received a fresh cluster state. */ @@ -502,8 +502,8 @@ static class LucenePersistedState implements PersistedState { // (2) the index is currently empty since it was opened with IndexWriterConfig.OpenMode.CREATE // In the common case it's actually sufficient to commit() the existing state and not do any indexing. For instance, - // this is true if there's only one data path on this master node, and the commit we just loaded was already written out - // by this version of OpenSearch. TODO TBD should we avoid indexing when possible? + // this is true if there's only one data path on this cluster-manager node, and the commit we just loaded was already written + // out by this version of OpenSearch. TODO TBD should we avoid indexing when possible? final PersistedClusterStateService.Writer writer = persistedClusterStateService.createWriter(); try { writer.writeFullStateAndCommit(currentTerm, lastAcceptedState); diff --git a/server/src/main/java/org/opensearch/gateway/GatewayService.java b/server/src/main/java/org/opensearch/gateway/GatewayService.java index 47347cea50e27..1a0efbcdf5bfb 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayService.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayService.java @@ -132,8 +132,8 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste private final int expectedNodes; private final int recoverAfterDataNodes; private final int expectedDataNodes; - private final int recoverAfterMasterNodes; - private final int expectedMasterNodes; + private final int recoverAfterClusterManagerNodes; + private final int expectedClusterManagerNodes; private final Runnable recoveryRunnable; @@ -155,22 +155,22 @@ public GatewayService( // allow to control a delay of when indices will get created this.expectedNodes = EXPECTED_NODES_SETTING.get(settings); this.expectedDataNodes = EXPECTED_DATA_NODES_SETTING.get(settings); - this.expectedMasterNodes = EXPECTED_MASTER_NODES_SETTING.get(settings); + this.expectedClusterManagerNodes = EXPECTED_MASTER_NODES_SETTING.get(settings); if (RECOVER_AFTER_TIME_SETTING.exists(settings)) { recoverAfterTime = RECOVER_AFTER_TIME_SETTING.get(settings); - } else if (expectedNodes >= 0 || expectedDataNodes >= 0 || expectedMasterNodes >= 0) { + } else if (expectedNodes >= 0 || expectedDataNodes >= 0 || expectedClusterManagerNodes >= 0) { recoverAfterTime = DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET; } else { recoverAfterTime = null; } this.recoverAfterNodes = RECOVER_AFTER_NODES_SETTING.get(settings); this.recoverAfterDataNodes = RECOVER_AFTER_DATA_NODES_SETTING.get(settings); - // default the recover after master nodes to the minimum master nodes in the discovery + // default the recover after cluster-manager nodes to the minimum cluster-manager nodes in the discovery if (RECOVER_AFTER_MASTER_NODES_SETTING.exists(settings)) { - recoverAfterMasterNodes = RECOVER_AFTER_MASTER_NODES_SETTING.get(settings); + recoverAfterClusterManagerNodes = RECOVER_AFTER_MASTER_NODES_SETTING.get(settings); } else { - recoverAfterMasterNodes = -1; + recoverAfterClusterManagerNodes = -1; } if (discovery instanceof Coordinator) { @@ -216,7 +216,7 @@ public void clusterChanged(final ClusterChangedEvent event) { final DiscoveryNodes nodes = state.nodes(); if (state.nodes().getMasterNodeId() == null) { - logger.debug("not recovering from gateway, no master elected yet"); + logger.debug("not recovering from gateway, no cluster-manager elected yet"); } else if (recoverAfterNodes != -1 && (nodes.getMasterAndDataNodes().size()) < recoverAfterNodes) { logger.debug( "not recovering from gateway, nodes_size (data+master) [{}] < recover_after_nodes [{}]", @@ -229,16 +229,16 @@ public void clusterChanged(final ClusterChangedEvent event) { nodes.getDataNodes().size(), recoverAfterDataNodes ); - } else if (recoverAfterMasterNodes != -1 && nodes.getMasterNodes().size() < recoverAfterMasterNodes) { + } else if (recoverAfterClusterManagerNodes != -1 && nodes.getMasterNodes().size() < recoverAfterClusterManagerNodes) { logger.debug( "not recovering from gateway, nodes_size (master) [{}] < recover_after_master_nodes [{}]", nodes.getMasterNodes().size(), - recoverAfterMasterNodes + recoverAfterClusterManagerNodes ); } else { boolean enforceRecoverAfterTime; String reason; - if (expectedNodes == -1 && expectedMasterNodes == -1 && expectedDataNodes == -1) { + if (expectedNodes == -1 && expectedClusterManagerNodes == -1 && expectedDataNodes == -1) { // no expected is set, honor the setting if they are there enforceRecoverAfterTime = true; reason = "recover_after_time was set to [" + recoverAfterTime + "]"; @@ -252,10 +252,14 @@ public void clusterChanged(final ClusterChangedEvent event) { } else if (expectedDataNodes != -1 && (nodes.getDataNodes().size() < expectedDataNodes)) { // does not meet the expected... enforceRecoverAfterTime = true; reason = "expecting [" + expectedDataNodes + "] data nodes, but only have [" + nodes.getDataNodes().size() + "]"; - } else if (expectedMasterNodes != -1 && (nodes.getMasterNodes().size() < expectedMasterNodes)) { + } else if (expectedClusterManagerNodes != -1 && (nodes.getMasterNodes().size() < expectedClusterManagerNodes)) { // does not meet the expected... enforceRecoverAfterTime = true; - reason = "expecting [" + expectedMasterNodes + "] master nodes, but only have [" + nodes.getMasterNodes().size() + "]"; + reason = "expecting [" + + expectedClusterManagerNodes + + "] cluster-manager nodes, but only have [" + + nodes.getMasterNodes().size() + + "]"; } } performStateRecovery(enforceRecoverAfterTime, reason); @@ -333,7 +337,7 @@ public void clusterStateProcessed(final String source, final ClusterState oldSta @Override public void onNoLongerMaster(String source) { - logger.debug("stepped down as master before recovering state [{}]", source); + logger.debug("stepped down as cluster-manager before recovering state [{}]", source); resetRecoveredFlags(); } diff --git a/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java b/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java index c8ace3d218864..4c29bc6f2692f 100644 --- a/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java +++ b/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java @@ -103,9 +103,9 @@ public LocalAllocateDangledIndices( public void allocateDangled(Collection indices, ActionListener listener) { ClusterState clusterState = clusterService.state(); - DiscoveryNode masterNode = clusterState.nodes().getMasterNode(); - if (masterNode == null) { - listener.onFailure(new MasterNotDiscoveredException("no master to send allocate dangled request")); + DiscoveryNode clusterManagerNode = clusterState.nodes().getMasterNode(); + if (clusterManagerNode == null) { + listener.onFailure(new MasterNotDiscoveredException("no cluster-manager to send allocate dangled request")); return; } AllocateDangledRequest request = new AllocateDangledRequest( @@ -113,7 +113,7 @@ public void allocateDangled(Collection indices, ActionListener(listener, AllocateDangledResponse::new, ThreadPool.Names.SAME) diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java b/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java index f9d6187d60eb8..df8eb8f38cfcb 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java @@ -461,7 +461,7 @@ private static void nested(ParseContext context, ObjectMapper.Nested nested) { } if (nested.isIncludeInRoot()) { ParseContext.Document rootDoc = context.rootDoc(); - // don't add it twice, if its included in parent, and we are handling the master doc... + // don't add it twice, if its included in parent, and we are handling the cluster-manager doc... if (!nested.isIncludeInParent() || parentDoc != rootDoc) { addFields(indexVersion, nestedDoc, rootDoc); } diff --git a/server/src/main/java/org/opensearch/index/mapper/MapperService.java b/server/src/main/java/org/opensearch/index/mapper/MapperService.java index 819df4a6f396e..33c6ff2bc4391 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/opensearch/index/mapper/MapperService.java @@ -94,7 +94,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { */ public enum MergeReason { /** - * Pre-flight check before sending a mapping update to the master + * Pre-flight check before sending a mapping update to the cluster-manager */ MAPPING_UPDATE_PREFLIGHT, /** @@ -303,7 +303,7 @@ public boolean updateMapping(final IndexMetadata currentIndexMetadata, final Ind } // refresh mapping can happen when the parsing/merging of the mapping from the metadata doesn't result in the same - // mapping, in this case, we send to the master to refresh its own version of the mappings (to conform with the + // mapping, in this case, we send to the cluster-manager to refresh its own version of the mappings (to conform with the // merge version of it, which it does when refreshing the mappings), and warn log it. if (documentMapper().mappingSource().equals(incomingMappingSource) == false) { logger.debug( diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 52e858bfdaa92..99509ad5d1da9 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -84,7 +84,7 @@ * This class is responsible for tracking the replication group with its progress and safety markers (local and global checkpoints). * * The global checkpoint is the highest sequence number for which all lower (or equal) sequence number have been processed - * on all shards that are currently active. Since shards count as "active" when the master starts + * on all shards that are currently active. Since shards count as "active" when the cluster-manager starts * them, and before this primary shard has been notified of this fact, we also include shards that have completed recovery. These shards * have received all old operations via the recovery mechanism and are kept up to date by the various replications actions. The set of * shards that are taken into account for the global checkpoint calculation are called the "in-sync shards". @@ -137,7 +137,7 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L * The reason for this is that the handoff might fail and can be aborted (using {@link #abortRelocationHandoff}), in which case * it is important that the global checkpoint tracker does not miss any state updates that might happened during the handoff attempt. * This means, however, that the global checkpoint can still advance after the primary relocation handoff has been initiated, but only - * because the master could have failed some of the in-sync shard copies and marked them as stale. That is ok though, as this + * because the cluster-manager could have failed some of the in-sync shard copies and marked them as stale. That is ok though, as this * information is conveyed through cluster state updates, and the new primary relocation target will also eventually learn about those. */ boolean handoffInProgress; @@ -1165,7 +1165,7 @@ private void addPeerRecoveryRetentionLeaseForSolePrimary() { /** * Notifies the tracker of the current allocation IDs in the cluster state. - * @param applyingClusterStateVersion the cluster state version being applied when updating the allocation IDs from the master + * @param applyingClusterStateVersion the cluster state version being applied when updating the allocation IDs from the cluster-manager * @param inSyncAllocationIds the allocation IDs of the currently in-sync shard copies * @param routingTable the shard routing table */ @@ -1176,14 +1176,14 @@ public synchronized void updateFromMaster( ) { assert invariant(); if (applyingClusterStateVersion > appliedClusterStateVersion) { - // check that the master does not fabricate new in-sync entries out of thin air once we are in primary mode + // check that the cluster-manager does not fabricate new in-sync entries out of thin air once we are in primary mode assert !primaryMode || inSyncAllocationIds.stream().allMatch(inSyncId -> checkpoints.containsKey(inSyncId) && checkpoints.get(inSyncId).inSync) - : "update from master in primary mode contains in-sync ids " + : "update from cluster-manager in primary mode contains in-sync ids " + inSyncAllocationIds + " that have no matching entries in " + checkpoints; - // remove entries which don't exist on master + // remove entries which don't exist on cluster-manager Set initializingAllocationIds = routingTable.getAllInitializingShards() .stream() .map(ShardRouting::allocationId) @@ -1197,7 +1197,7 @@ public synchronized void updateFromMaster( for (String initializingId : initializingAllocationIds) { if (checkpoints.containsKey(initializingId) == false) { final boolean inSync = inSyncAllocationIds.contains(initializingId); - assert inSync == false : "update from master in primary mode has " + assert inSync == false : "update from cluster-manager in primary mode has " + initializingId + " as in-sync but it does not exist locally"; final long localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; @@ -1475,7 +1475,7 @@ public synchronized void activateWithPrimaryContext(PrimaryContext primaryContex assert indexSettings.getIndexVersionCreated().before(LegacyESVersion.V_7_3_0); throw new IllegalStateException("primary context [" + primaryContext + "] does not contain " + shardAllocationId); } - final Runnable runAfter = getMasterUpdateOperationFromCurrentState(); + final Runnable runAfter = getClusterManagerUpdateOperationFromCurrentState(); primaryMode = true; // capture current state to possibly replay missed cluster state update appliedClusterStateVersion = primaryContext.clusterStateVersion(); @@ -1541,7 +1541,7 @@ public synchronized void createMissingPeerRecoveryRetentionLeases(ActionListener } } - private Runnable getMasterUpdateOperationFromCurrentState() { + private Runnable getClusterManagerUpdateOperationFromCurrentState() { assert primaryMode == false; final long lastAppliedClusterStateVersion = appliedClusterStateVersion; final Set inSyncAllocationIds = new HashSet<>(); diff --git a/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java b/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java index 69f283a53ca79..c07798202144b 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java @@ -161,7 +161,7 @@ default void beforeIndexShardDeleted(ShardId shardId, Settings indexSettings) {} default void afterIndexShardDeleted(ShardId shardId, Settings indexSettings) {} /** - * Called on the Master node only before the {@link IndexService} instances is created to simulate an index creation. + * Called on the cluster-manager node only before the {@link IndexService} instances is created to simulate an index creation. * This happens right before the index and it's metadata is registered in the cluster state */ default void beforeIndexAddedToCluster(Index index, Settings indexSettings) {} diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index f2630ad05b488..7a12952316c67 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -516,7 +516,7 @@ public void updateShardState( assert currentRouting.isRelocationTarget() == false || currentRouting.primary() == false || replicationTracker.isPrimaryMode() - : "a primary relocation is completed by the master, but primary mode is not active " + currentRouting; + : "a primary relocation is completed by the cluster-managerr, but primary mode is not active " + currentRouting; changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]"); } else if (currentRouting.primary() @@ -539,7 +539,7 @@ public void updateShardState( if (newRouting.primary()) { if (newPrimaryTerm == pendingPrimaryTerm) { if (currentRouting.initializing() && currentRouting.isRelocationTarget() == false && newRouting.active()) { - // the master started a recovering primary, activate primary mode. + // the cluster-manager started a recovering primary, activate primary mode. replicationTracker.activatePrimaryMode(getLocalCheckpoint()); ensurePeerRecoveryRetentionLeasesExist(); } @@ -549,10 +549,10 @@ public void updateShardState( * in one state causing it's term to be incremented. Note that if both current shard state and new * shard state are initializing, we could replace the current shard and reinitialize it. It is however * possible that this shard is being started. This can happen if: - * 1) Shard is post recovery and sends shard started to the master + * 1) Shard is post recovery and sends shard started to the cluster-manager * 2) Node gets disconnected and rejoins - * 3) Master assigns the shard back to the node - * 4) Master processes the shard started and starts the shard + * 3) Cluster-manager assigns the shard back to the node + * 4) Cluster-manager processes the shard started and starts the shard * 5) The node process the cluster state where the shard is both started and primary term is incremented. * * We could fail the shard in that case, but this will cause it to be removed from the insync allocations list @@ -757,7 +757,7 @@ private void verifyRelocatingState() { throw new IndexShardNotStartedException(shardId, state); } /* - * If the master cancelled recovery, the target will be removed and the recovery will be cancelled. However, it is still possible + * If the cluster-manager cancelled recovery, the target will be removed and the recovery will be cancelled. However, it is still possible * that we concurrently end up here and therefore have to protect that we do not mark the shard as relocated when its shard routing * says otherwise. */ @@ -3398,7 +3398,7 @@ private void innerAcquireReplicaOperationPermit( final IndexShardState shardState = state(); // only roll translog and update primary term if shard has made it past recovery // Having a new primary term here means that the old primary failed and that there is a new primary, which again - // means that the master will fail this shard as all initializing shards are failed when a primary is selected + // means that the cluster-manager will fail this shard as all initializing shards are failed when a primary is selected // We abort early here to prevent an ongoing recovery from the failed primary to mess with the global / local checkpoint if (shardState != IndexShardState.POST_RECOVERY && shardState != IndexShardState.STARTED) { throw new IndexShardNotStartedException(shardId, shardState); diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index 387f77a839d35..6eb7a29984e1c 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -561,7 +561,7 @@ private void restore( final StepListener indexIdListener = new StepListener<>(); // If the index UUID was not found in the recovery source we will have to load RepositoryData and resolve it by index name if (indexId.getId().equals(IndexMetadata.INDEX_UUID_NA_VALUE)) { - // BwC path, running against an old version master that did not add the IndexId to the recovery source + // BwC path, running against an old version cluster-manager that did not add the IndexId to the recovery source repository.getRepositoryData( ActionListener.map(indexIdListener, repositoryData -> repositoryData.resolveIndexId(indexId.getName())) ); diff --git a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java index 9463b51ca3792..858cd238ad700 100644 --- a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java @@ -226,7 +226,7 @@ public synchronized void applyClusterState(final ClusterChangedEvent event) { final ClusterState state = event.state(); // we need to clean the shards and indices we have on this node, since we - // are going to recover them again once state persistence is disabled (no master / not recovered) + // are going to recover them again once state persistence is disabled (no cluster-manager / not recovered) // TODO: feels hacky, a block disables state persistence, and then we clean the allocated shards, maybe another flag in blocks? if (state.blocks().disableStatePersistence()) { for (AllocatedIndex indexService : indicesService) { @@ -244,7 +244,7 @@ public synchronized void applyClusterState(final ClusterChangedEvent event) { failMissingShards(state); - removeShards(state); // removes any local shards that doesn't match what the master expects + removeShards(state); // removes any local shards that doesn't match what the cluster-manager expects updateIndices(event); // can also fail shards, but these are then guaranteed to be in failedShardsCache @@ -267,17 +267,21 @@ private void updateFailedShardsCache(final ClusterState state) { return; } - DiscoveryNode masterNode = state.nodes().getMasterNode(); + DiscoveryNode clusterManagerNode = state.nodes().getMasterNode(); - // remove items from cache which are not in our routing table anymore and resend failures that have not executed on master yet + // remove items from cache which are not in our routing table anymore and + // resend failures that have not executed on cluster-manager yet for (Iterator> iterator = failedShardsCache.entrySet().iterator(); iterator.hasNext();) { ShardRouting failedShardRouting = iterator.next().getValue(); ShardRouting matchedRouting = localRoutingNode.getByShardId(failedShardRouting.shardId()); if (matchedRouting == null || matchedRouting.isSameAllocation(failedShardRouting) == false) { iterator.remove(); } else { - if (masterNode != null) { // TODO: can we remove this? Is resending shard failures the responsibility of shardStateAction? - String message = "master " + masterNode + " has not removed previously failed shard. resending shard failure"; + // TODO: can we remove this? Is resending shard failures the responsibility of shardStateAction? + if (clusterManagerNode != null) { + String message = "cluster-manager " + + clusterManagerNode + + " has not removed previously failed shard. resending shard failure"; logger.trace("[{}] re-sending failed shard [{}], reason [{}]", matchedRouting.shardId(), matchedRouting, message); shardStateAction.localShardFailed(matchedRouting, message, null, SHARD_STATE_ACTION_LISTENER, state); } @@ -401,7 +405,7 @@ private void removeIndices(final ClusterChangedEvent event) { } /** - * Notifies master about shards that don't exist but are supposed to be active on this node. + * Notifies cluster-manager about shards that don't exist but are supposed to be active on this node. * * @param state new cluster state */ @@ -415,7 +419,7 @@ private void failMissingShards(final ClusterState state) { if (shardRouting.initializing() == false && failedShardsCache.containsKey(shardId) == false && indicesService.getShardOrNull(shardId) == null) { - // the master thinks we are active, but we don't have this shard at all, mark it as failed + // the cluster-manager thinks we are active, but we don't have this shard at all, mark it as failed sendFailShard( shardRouting, "master marked shard as active, but shard has not been created, mark shard as failed", @@ -664,12 +668,12 @@ private void updateShard( final IndexShardState state = shard.state(); if (shardRouting.initializing() && (state == IndexShardState.STARTED || state == IndexShardState.POST_RECOVERY)) { - // the master thinks we are initializing, but we are already started or on POST_RECOVERY and waiting - // for master to confirm a shard started message (either master failover, or a cluster event before - // we managed to tell the master we started), mark us as started + // the cluster-manager thinks we are initializing, but we are already started or on POST_RECOVERY and waiting + // for cluster-manager to confirm a shard started message (either cluster-manager failover, or a cluster event before + // we managed to tell the cluster-manager we started), mark us as started if (logger.isTraceEnabled()) { logger.trace( - "{} master marked shard as initializing, but shard has state [{}], resending shard started to {}", + "{} cluster-manager marked shard as initializing, but shard has state [{}], resending shard started to {}", shardRouting.shardId(), state, nodes.getMasterNode() diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java index 127127f5feace..8d9eab24f6027 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java @@ -159,7 +159,7 @@ public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { this.maxConcurrentFileChunks = INDICES_RECOVERY_MAX_CONCURRENT_FILE_CHUNKS_SETTING.get(settings); this.maxConcurrentOperations = INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING.get(settings); // doesn't have to be fast as nodes are reconnected every 10s by default (see InternalClusterService.ReconnectToNodes) - // and we want to give the master time to remove a faulty node + // and we want to give the cluster-manager time to remove a faulty node this.retryDelayNetwork = INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.get(settings); this.internalActionTimeout = INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.get(settings); diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java index 394b093059385..6dbbf21eb9360 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java @@ -242,7 +242,7 @@ public void cancel(String reason) { * fail the recovery and call listener * * @param e exception that encapsulating the failure - * @param sendShardFailure indicates whether to notify the master of the shard failure + * @param sendShardFailure indicates whether to notify the cluster-manager of the shard failure */ public void fail(RecoveryFailedException e, boolean sendShardFailure) { if (finished.compareAndSet(false, true)) { diff --git a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java index d385b2e6aa74e..538a8c871cb5f 100644 --- a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java +++ b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java @@ -203,8 +203,8 @@ private StoreFilesMetadata listStoreMetadata(NodeRequest request) throws IOExcep return new StoreFilesMetadata(shardId, Store.MetadataSnapshot.EMPTY, Collections.emptyList()); } // note that this may fail if it can't get access to the shard lock. Since we check above there is an active shard, this means: - // 1) a shard is being constructed, which means the master will not use a copy of this replica - // 2) A shard is shutting down and has not cleared it's content within lock timeout. In this case the master may not + // 1) a shard is being constructed, which means the cluster-manager will not use a copy of this replica + // 2) A shard is shutting down and has not cleared it's content within lock timeout. In this case the cluster-manager may not // reuse local resources. final Store.MetadataSnapshot metadataSnapshot = Store.readMetadataSnapshot( shardPath.resolveIndex(), diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 46400e5c8d269..c1152afd6fe44 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -1182,7 +1182,7 @@ private Node stop() { // stop any changes happening as a result of cluster state changes injector.getInstance(IndicesClusterStateService.class).stop(); // close discovery early to not react to pings anymore. - // This can confuse other nodes and delay things - mostly if we're the master and we're running tests. + // This can confuse other nodes and delay things - mostly if we're the cluster manager and we're running tests. injector.getInstance(Discovery.class).stop(); // we close indices first, so operations won't be allowed on it injector.getInstance(ClusterService.class).stop(); @@ -1458,7 +1458,7 @@ protected ClusterInfoService newClusterInfoService( ) { final InternalClusterInfoService service = new InternalClusterInfoService(settings, clusterService, threadPool, client); if (DiscoveryNode.isMasterNode(settings)) { - // listen for state changes (this node starts/stops being the elected master, or new nodes are added) + // listen for state changes (this node starts/stops being the elected cluster manager, or new nodes are added) clusterService.addListener(service); } return service; diff --git a/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java b/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java index eaa623b53ac1c..e6a9ae673211f 100644 --- a/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java +++ b/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java @@ -60,7 +60,7 @@ import java.util.Objects; /** - * Component that runs only on the master node and is responsible for assigning running tasks to nodes + * Component that runs only on the cluster-manager node and is responsible for assigning running tasks to nodes */ public class PersistentTasksClusterService implements ClusterStateListener, Closeable { @@ -114,7 +114,7 @@ public void close() { } /** - * Creates a new persistent task on master node + * Creates a new persistent task on cluster-manager node * * @param taskId the task's id * @param taskName the task's name diff --git a/server/src/main/java/org/opensearch/persistent/PersistentTasksNodeService.java b/server/src/main/java/org/opensearch/persistent/PersistentTasksNodeService.java index 4a12208d675e9..0a88204c7cfe9 100644 --- a/server/src/main/java/org/opensearch/persistent/PersistentTasksNodeService.java +++ b/server/src/main/java/org/opensearch/persistent/PersistentTasksNodeService.java @@ -87,7 +87,7 @@ public PersistentTasksNodeService( @Override public void clusterChanged(ClusterChangedEvent event) { if (event.state().blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { - // wait until the gateway has recovered from disk, otherwise if the only master restarts + // wait until the gateway has recovered from disk, otherwise if the only cluster-manager restarts // we start cancelling all local tasks before cluster has a chance to recover. return; } @@ -103,18 +103,18 @@ public void clusterChanged(ClusterChangedEvent event) { // NULL STARTED Remove locally, Mark as PENDING_CANCEL, Cancel // NULL COMPLETED Remove locally - // Master states: + // Cluster-manager states: // NULL - doesn't exist in the cluster state // STARTED - exist in the cluster state // Local state: // NULL - we don't have task registered locally in runningTasks - // STARTED - registered in TaskManager, requires master notification when finishes - // PENDING_CANCEL - registered in TaskManager, doesn't require master notification when finishes - // COMPLETED - not registered in TaskManager, notified, waiting for master to remove it from CS so we can remove locally + // STARTED - registered in TaskManager, requires cluster-manager notification when finishes + // PENDING_CANCEL - registered in TaskManager, doesn't require cluster-manager notification when finishes + // COMPLETED - not registered in TaskManager, notified, waiting for cluster-manager to remove it from CS so we can remove locally // When task finishes if it is marked as STARTED or PENDING_CANCEL it is marked as COMPLETED and unregistered, - // If the task was STARTED, the master notification is also triggered (this is handled by unregisterTask() method, which is + // If the task was STARTED, the cluster-manager notification is also triggered (this is handled by unregisterTask() method, which is // triggered by PersistentTaskListener if (Objects.equals(tasks, previousTasks) == false || event.nodesChanged()) { @@ -162,7 +162,7 @@ public void clusterChanged(ClusterChangedEvent event) { ); runningTasks.remove(id); } else { - // task is running locally, but master doesn't know about it - that means that the persistent task was removed + // task is running locally, but cluster-manager doesn't know about it - that means that the persistent task was removed // cancel the task without notifying master logger.trace( "Found unregistered persistent task [{}] with id [{}] and allocation id [{}] - cancelling", @@ -286,7 +286,7 @@ public void onFailure(Exception notificationException) { } /** - * Unregisters and then cancels the locally running task using the task manager. No notification to master will be send upon + * Unregisters and then cancels the locally running task using the task manager. No notification to cluster-manager will be send upon * cancellation. */ private void cancelTask(Long allocationId) { diff --git a/server/src/main/java/org/opensearch/persistent/PersistentTasksService.java b/server/src/main/java/org/opensearch/persistent/PersistentTasksService.java index a52b623a7a843..c3d78bb614200 100644 --- a/server/src/main/java/org/opensearch/persistent/PersistentTasksService.java +++ b/server/src/main/java/org/opensearch/persistent/PersistentTasksService.java @@ -54,7 +54,7 @@ /** * This service is used by persistent tasks and allocated persistent tasks to communicate changes - * to the master node so that the master can update the cluster state and can track of the states + * to the cluster-manager node so that the cluster-manager can update the cluster state and can track of the states * of the persistent tasks. */ public class PersistentTasksService { @@ -74,7 +74,7 @@ public PersistentTasksService(ClusterService clusterService, ThreadPool threadPo } /** - * Notifies the master node to create new persistent task and to assign it to a node. + * Notifies the cluster-manager node to create new persistent task and to assign it to a node. */ public void sendStartRequest( final String taskId, @@ -89,7 +89,7 @@ public void sendStartRequest( } /** - * Notifies the master node about the completion of a persistent task. + * Notifies the cluster-manager node about the completion of a persistent task. *

    * When {@code failure} is {@code null}, the persistent task is considered as successfully completed. */ @@ -118,7 +118,7 @@ void sendCancelRequest(final long taskId, final String reason, final ActionListe } /** - * Notifies the master node that the state of a persistent task has changed. + * Notifies the cluster-manager node that the state of a persistent task has changed. *

    * Persistent task implementers shouldn't call this method directly and use * {@link AllocatedPersistentTask#updatePersistentTaskState} instead @@ -138,7 +138,7 @@ void sendUpdateStateRequest( } /** - * Notifies the master node to remove a persistent task from the cluster state + * Notifies the cluster-manager node to remove a persistent task from the cluster state */ public void sendRemoveRequest(final String taskId, final ActionListener> listener) { RemovePersistentTaskAction.Request request = new RemovePersistentTaskAction.Request(taskId); diff --git a/server/src/main/java/org/opensearch/persistent/package-info.java b/server/src/main/java/org/opensearch/persistent/package-info.java index 00260b56d2ddb..3eff441642c90 100644 --- a/server/src/main/java/org/opensearch/persistent/package-info.java +++ b/server/src/main/java/org/opensearch/persistent/package-info.java @@ -32,12 +32,12 @@ * In order to be resilient to node restarts, the persistent tasks are using the cluster state instead of a transport service to send * requests and responses. The execution is done in six phases: *

    - * 1. The coordinating node sends an ordinary transport request to the master node to start a new persistent task. This task is handled + * 1. The coordinating node sends an ordinary transport request to the cluster-manager node to start a new persistent task. This task is handled * by the {@link org.opensearch.persistent.PersistentTasksService}, which is using * {@link org.opensearch.persistent.PersistentTasksClusterService} to update cluster state with the record about running persistent * task. *

    - * 2. The master node updates the {@link org.opensearch.persistent.PersistentTasksCustomMetadata} in the cluster state to indicate + * 2. The cluster-manager node updates the {@link org.opensearch.persistent.PersistentTasksCustomMetadata} in the cluster state to indicate * that there is a new persistent task running in the system. *

    * 3. The {@link org.opensearch.persistent.PersistentTasksNodeService} running on every node in the cluster monitors changes in diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java index e7c5804f458a0..b521da8453bb2 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java @@ -140,7 +140,7 @@ public RepositoriesService( /** * Registers new repository in the cluster *

    - * This method can be only called on the master node. It tries to create a new repository on the master + * This method can be only called on the cluster-manager node. It tries to create a new repository on the master * and if it was successful it adds new repository to cluster metadata. * * @param request register repository request @@ -172,7 +172,7 @@ public void registerRepository(final PutRepositoryRequest request, final ActionL registrationListener = listener; } - // Trying to create the new repository on master to make sure it works + // Trying to create the new repository on cluster-manager to make sure it works try { closeRepository(createRepository(newRepositoryMetadata, typesRegistry)); } catch (Exception e) { @@ -235,7 +235,7 @@ public void onFailure(String source, Exception e) { @Override public boolean mustAck(DiscoveryNode discoveryNode) { - // repository is created on both master and data nodes + // repository is created on both cluster-manager and data nodes return discoveryNode.isMasterNode() || discoveryNode.isDataNode(); } } @@ -245,7 +245,7 @@ public boolean mustAck(DiscoveryNode discoveryNode) { /** * Unregisters repository in the cluster *

    - * This method can be only called on the master node. It removes repository information from cluster metadata. + * This method can be only called on the cluster-manager node. It removes repository information from cluster metadata. * * @param request unregister repository request * @param listener unregister repository listener @@ -290,7 +290,7 @@ public ClusterState execute(ClusterState currentState) { @Override public boolean mustAck(DiscoveryNode discoveryNode) { - // repository was created on both master and data nodes + // repository was created on both cluster-manager and data nodes return discoveryNode.isMasterNode() || discoveryNode.isDataNode(); } } @@ -457,7 +457,7 @@ public void getRepositoryData(final String repositoryName, final ActionListener< /** * Returns registered repository *

    - * This method is called only on the master node + * This method is called only on the cluster-manager node * * @param repositoryName repository name * @return registered repository diff --git a/server/src/main/java/org/opensearch/repositories/Repository.java b/server/src/main/java/org/opensearch/repositories/Repository.java index c8907393824c2..18ad02bab48f9 100644 --- a/server/src/main/java/org/opensearch/repositories/Repository.java +++ b/server/src/main/java/org/opensearch/repositories/Repository.java @@ -68,7 +68,7 @@ *

      *
    • Data nodes call {@link Repository#snapshotShard} * for each shard
    • - *
    • When all shard calls return master calls {@link #finalizeSnapshot} with possible list of failures
    • + *
    • When all shard calls return cluster-manager calls {@link #finalizeSnapshot} with possible list of failures
    • *
    */ public interface Repository extends LifecycleComponent { @@ -134,7 +134,7 @@ default Repository create(RepositoryMetadata metadata, Function - * This method is called on master after all shards are snapshotted. + * This method is called on cluster-manager after all shards are snapshotted. * * @param shardGenerations updated shard generations * @param repositoryStateId the unique id identifying the state of the repository when the snapshot began @@ -197,7 +197,7 @@ default RepositoryStats stats() { } /** - * Verifies repository on the master node and returns the verification token. + * Verifies repository on the cluster-manager node and returns the verification token. *

    * If the verification token is not null, it's passed to all data nodes for verification. If it's null - no * additional verification is required diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 7d6cdef76198f..d95612e31ca38 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -318,7 +318,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp *

  • All repositories that are read-only, i.e. for which {@link #isReadOnly()} returns {@code true} because there are no * guarantees that another cluster is not writing to the repository at the same time
  • *
  • The node finds itself in a mixed-version cluster containing nodes older than - * {@link RepositoryMetadata#REPO_GEN_IN_CS_VERSION} where the master node does not update the value of + * {@link RepositoryMetadata#REPO_GEN_IN_CS_VERSION} where the cluster-manager node does not update the value of * {@link RepositoryMetadata#generation()} when writing a new {@code index-N} blob
  • *
  • The value of {@link RepositoryMetadata#generation()} for this repository is {@link RepositoryData#UNKNOWN_REPO_GEN} * indicating that no consistent repository generation is tracked in the cluster state yet.
  • @@ -726,8 +726,8 @@ public void deleteSnapshots( protected void doRun() throws Exception { final Map rootBlobs = blobContainer().listBlobs(); final RepositoryData repositoryData = safeRepositoryData(repositoryStateId, rootBlobs); - // Cache the indices that were found before writing out the new index-N blob so that a stuck master will never - // delete an index that was created by another master node after writing this index-N blob. + // Cache the indices that were found before writing out the new index-N blob so that a stuck cluster-manager will never + // delete an index that was created by another cluster-manager node after writing this index-N blob. final Map foundIndices = blobStore().blobContainer(indicesPath()).children(); doDeleteShardSnapshots( snapshotIds, @@ -1371,9 +1371,9 @@ public void finalizeSnapshot( ); }, onUpdateFailure), 2 + indices.size()); - // We ignore all FileAlreadyExistsException when writing metadata since otherwise a master failover while in this method will - // mean that no snap-${uuid}.dat blob is ever written for this snapshot. This is safe because any updated version of the - // index or global metadata will be compatible with the segments written in this snapshot as well. + // We ignore all FileAlreadyExistsException when writing metadata since otherwise a cluster-manager failover + // while in this method will mean that no snap-${uuid}.dat blob is ever written for this snapshot. This is safe because + // any updated version of the index or global metadata will be compatible with the segments written in this snapshot as well. // Failing on an already existing index-${repoGeneration} below ensures that the index.latest blob is not updated in a way // that decrements the generation it points at @@ -1546,7 +1546,11 @@ public String startVerification() { return seed; } } catch (Exception exp) { - throw new RepositoryVerificationException(metadata.name(), "path " + basePath() + " is not accessible on master node", exp); + throw new RepositoryVerificationException( + metadata.name(), + "path " + basePath() + " is not accessible on cluster-manager node", + exp + ); } } @@ -2782,15 +2786,15 @@ public void verify(String seed, DiscoveryNode localNode) { } catch (NoSuchFileException e) { throw new RepositoryVerificationException( metadata.name(), - "a file written by master to the store [" + "a file written by cluster-manager to the store [" + blobStore() + "] cannot be accessed on the node [" + localNode + "]. " + "This might indicate that the store [" + blobStore() - + "] is not shared between this node and the master node or " - + "that permissions on the store don't allow reading files written by the master node", + + "] is not shared between this node and the cluster-manager node or " + + "that permissions on the store don't allow reading files written by the cluster-manager node", e ); } catch (Exception e) { diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java b/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java index a960cfe70aee7..aacd386cd4bd7 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java @@ -39,7 +39,7 @@ * {@link org.opensearch.repositories.blobstore.BlobStoreRepository#getBlobContainer()}.

    * *

    The blob store is written to and read from by cluster-manager-eligible nodes and data nodes. All metadata related to a snapshot's - * scope and health is written by the master node.

    + * scope and health is written by the cluster-manager node.

    *

    The data-nodes on the other hand, write the data for each individual shard but do not write any blobs outside of shard directories for * shards that they hold the primary of. For each shard, the data-node holding the shard's primary writes the actual data in form of * the shard's segment files to the repository as well as metadata about all the segment files that the repository stores for the shard.

    @@ -131,19 +131,19 @@ *

    Writing Updated RepositoryData to the Repository

    * *

    Writing an updated {@link org.opensearch.repositories.RepositoryData} to a blob store repository is an operation that uses - * the cluster state to ensure that a specific {@code index-N} blob is never accidentally overwritten in a master failover scenario. + * the cluster state to ensure that a specific {@code index-N} blob is never accidentally overwritten in a cluster-manager failover scenario. * The specific steps to writing a new {@code index-N} blob and thus making changes from a snapshot-create or delete operation visible - * to read operations on the repository are as follows and all run on the master node:

    + * to read operations on the repository are as follows and all run on the cluster-manager node:

    * *
      *
    1. Write an updated value of {@link org.opensearch.cluster.metadata.RepositoryMetadata} for the repository that has the same * {@link org.opensearch.cluster.metadata.RepositoryMetadata#generation()} as the existing entry and has a value of * {@link org.opensearch.cluster.metadata.RepositoryMetadata#pendingGeneration()} one greater than the {@code pendingGeneration} of the * existing entry.
    2. - *
    3. On the same master node, after the cluster state has been updated in the first step, write the new {@code index-N} blob and + *
    4. On the same cluster-manager node, after the cluster state has been updated in the first step, write the new {@code index-N} blob and * also update the contents of the {@code index.latest} blob. Note that updating the index.latest blob is done on a best effort - * basis and that there is a chance for a stuck master-node to overwrite the contents of the {@code index.latest} blob after a newer - * {@code index-N} has been written by another master node. This is acceptable since the contents of {@code index.latest} are not used + * basis and that there is a chance for a stuck cluster-manager node to overwrite the contents of the {@code index.latest} blob after a newer + * {@code index-N} has been written by another cluster-manager node. This is acceptable since the contents of {@code index.latest} are not used * during normal operation of the repository and must only be correct for purposes of mounting the contents of a * {@link org.opensearch.repositories.blobstore.BlobStoreRepository} as a read-only url repository.
    5. *
    6. After the write has finished, set the value of {@code RepositoriesState.State#generation} to the value used for @@ -152,7 +152,7 @@ * last valid {@code index-N} blob in the repository.
    7. *
    * - *

    If either of the last two steps in the above fails or master fails over to a new node at any point, then a subsequent operation + *

    If either of the last two steps in the above fails or cluster-manager fails over to a new node at any point, then a subsequent operation * trying to write a new {@code index-N} blob will never use the same value of {@code N} used by a previous attempt. It will always start * over at the first of the above three steps, incrementing the {@code pendingGeneration} generation before attempting a write, thus * ensuring no overwriting of a {@code index-N} blob ever to occur. The use of the cluster state to track the latest repository generation @@ -208,7 +208,7 @@ * *

    Finalizing the Snapshot

    * - *

    After all primaries have finished writing the necessary segment files to the blob store in the previous step, the master node moves on + *

    After all primaries have finished writing the necessary segment files to the blob store in the previous step, the cluster-manager node moves on * to finalizing the snapshot by invoking {@link org.opensearch.repositories.Repository#finalizeSnapshot}. This method executes the * following actions in order:

    *
      @@ -222,7 +222,7 @@ * *

      Deleting a Snapshot

      * - *

      Deleting a snapshot is an operation that is exclusively executed on the master node that runs through the following sequence of + *

      Deleting a snapshot is an operation that is exclusively executed on the cluster-manager node that runs through the following sequence of * action when {@link org.opensearch.repositories.blobstore.BlobStoreRepository#deleteSnapshots} is invoked:

      * *
        diff --git a/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java b/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java index e57246265bb66..51417733ebe61 100644 --- a/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java +++ b/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java @@ -118,7 +118,7 @@ public FsRepository( if (location.isEmpty()) { logger.warn( "the repository location is missing, it should point to a shared file system location" - + " that is available on all master and data nodes" + + " that is available on all cluster-manager and data nodes" ); throw new RepositoryException(metadata.name(), "missing location"); } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetAliasesAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetAliasesAction.java index 4ff519e81f9cd..44e2ace0f7cf4 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetAliasesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetAliasesAction.java @@ -187,7 +187,7 @@ static RestResponse buildRestResponse( public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { // The TransportGetAliasesAction was improved do the same post processing as is happening here. // We can't remove this logic yet to support mixed clusters. We should be able to remove this logic here - // in when 8.0 becomes the new version in the master branch. + // in when 8.0 becomes the new version in the main branch. final boolean namesProvided = request.hasParam("name"); final String[] aliases = request.paramAsStringArrayOrEmptyIfAll("name"); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java index 1b70603edf6e1..d6a620316f489 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java @@ -211,7 +211,7 @@ private void sendGetSettingsRequest( final String[] indices, final IndicesOptions indicesOptions, final boolean local, - final TimeValue masterNodeTimeout, + final TimeValue clusterManagerNodeTimeout, final NodeClient client, final ActionListener listener ) { @@ -219,7 +219,7 @@ private void sendGetSettingsRequest( request.indices(indices); request.indicesOptions(indicesOptions); request.local(local); - request.masterNodeTimeout(masterNodeTimeout); + request.masterNodeTimeout(clusterManagerNodeTimeout); request.names(IndexSettings.INDEX_SEARCH_THROTTLED.getKey()); client.admin().indices().getSettings(request, listener); @@ -229,7 +229,7 @@ private void sendClusterStateRequest( final String[] indices, final IndicesOptions indicesOptions, final boolean local, - final TimeValue masterNodeTimeout, + final TimeValue clusterManagerNodeTimeout, final NodeClient client, final ActionListener listener ) { @@ -238,7 +238,7 @@ private void sendClusterStateRequest( request.indices(indices); request.indicesOptions(indicesOptions); request.local(local); - request.masterNodeTimeout(masterNodeTimeout); + request.masterNodeTimeout(clusterManagerNodeTimeout); client.admin().cluster().state(request, listener); } @@ -247,7 +247,7 @@ private void sendClusterHealthRequest( final String[] indices, final IndicesOptions indicesOptions, final boolean local, - final TimeValue masterNodeTimeout, + final TimeValue clusterManagerNodeTimeout, final NodeClient client, final ActionListener listener ) { @@ -256,7 +256,7 @@ private void sendClusterHealthRequest( request.indices(indices); request.indicesOptions(indicesOptions); request.local(local); - request.masterNodeTimeout(masterNodeTimeout); + request.masterNodeTimeout(clusterManagerNodeTimeout); client.admin().cluster().health(request, listener); } diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestMasterAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestMasterAction.java index 4bcb16c741ecf..64103cba556eb 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestMasterAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestMasterAction.java @@ -106,17 +106,17 @@ private Table buildTable(RestRequest request, ClusterStateResponse state) { DiscoveryNodes nodes = state.getState().nodes(); table.startRow(); - DiscoveryNode master = nodes.get(nodes.getMasterNodeId()); - if (master == null) { + DiscoveryNode clusterManager = nodes.get(nodes.getMasterNodeId()); + if (clusterManager == null) { table.addCell("-"); table.addCell("-"); table.addCell("-"); table.addCell("-"); } else { - table.addCell(master.getId()); - table.addCell(master.getHostName()); - table.addCell(master.getHostAddress()); - table.addCell(master.getName()); + table.addCell(clusterManager.getId()); + table.addCell(clusterManager.getHostName()); + table.addCell(clusterManager.getHostAddress()); + table.addCell(clusterManager.getName()); } table.endRow(); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java index 3052a9736f9a3..820031f9e57e4 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java @@ -334,7 +334,7 @@ Table buildTable( ) { DiscoveryNodes nodes = state.getState().nodes(); - String masterId = nodes.getMasterNodeId(); + String clusterManagerId = nodes.getMasterNodeId(); Table table = getTableWithHeader(req); for (DiscoveryNode node : nodes) { @@ -424,7 +424,7 @@ Table buildTable( roles = node.getRoles().stream().map(DiscoveryNodeRole::roleNameAbbreviation).sorted().collect(Collectors.joining()); } table.addCell(roles); - table.addCell(masterId == null ? "x" : masterId.equals(node.getId()) ? "*" : "-"); + table.addCell(clusterManagerId == null ? "x" : clusterManagerId.equals(node.getId()) ? "*" : "-"); table.addCell(node.getName()); CompletionStats completionStats = indicesStats == null ? null : stats.getIndices().getCompletion(); diff --git a/server/src/main/java/org/opensearch/snapshots/InternalSnapshotsInfoService.java b/server/src/main/java/org/opensearch/snapshots/InternalSnapshotsInfoService.java index 5b44b435042e5..2314cbd11dfdd 100644 --- a/server/src/main/java/org/opensearch/snapshots/InternalSnapshotsInfoService.java +++ b/server/src/main/java/org/opensearch/snapshots/InternalSnapshotsInfoService.java @@ -176,8 +176,8 @@ public void clusterChanged(ClusterChangedEvent event) { } } else if (event.previousState().nodes().isLocalNodeElectedMaster()) { - // TODO Maybe just clear out non-ongoing snapshot recoveries is the node is master eligible, so that we don't - // have to repopulate the data over and over in an unstable master situation? + // TODO Maybe just clear out non-ongoing snapshot recoveries is the node is cluster-manager eligible, so that we don't + // have to repopulate the data over and over in an unstable cluster-manager situation? synchronized (mutex) { // information only needed on current master knownSnapshotShards = ImmutableOpenMap.of(); diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index e1b143b5f5274..4e9f13408e547 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -958,7 +958,7 @@ public void onFailure(final String source, final Exception e) { @Override public void onNoLongerMaster(String source) { - logger.debug("no longer master while processing restore state update [{}]", source); + logger.debug("no longer cluster-manager while processing restore state update [{}]", source); } } diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java index b6c0b63efe3d3..800728077472d 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java @@ -103,7 +103,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements private final Map> shardSnapshots = new HashMap<>(); - // A map of snapshots to the shardIds that we already reported to the master as failed + // A map of snapshots to the shardIds that we already reported to the cluster-manager as failed private final TransportRequestDeduplicator remoteFailedRequestDeduplicator = new TransportRequestDeduplicator<>(); @@ -148,9 +148,9 @@ public void clusterChanged(ClusterChangedEvent event) { } } - String previousMasterNodeId = event.previousState().nodes().getMasterNodeId(); + String previousClusterManagerNodeId = event.previousState().nodes().getMasterNodeId(); String currentMasterNodeId = event.state().nodes().getMasterNodeId(); - if (currentMasterNodeId != null && currentMasterNodeId.equals(previousMasterNodeId) == false) { + if (currentMasterNodeId != null && currentMasterNodeId.equals(previousClusterManagerNodeId) == false) { syncShardStatsOnNewMaster(event); } @@ -251,7 +251,7 @@ private void startNewSnapshots(SnapshotsInProgress snapshotsInProgress) { final IndexShardSnapshotStatus snapshotStatus = snapshotShards.get(shard.key); if (snapshotStatus == null) { // due to CS batching we might have missed the INIT state and straight went into ABORTED - // notify master that abort has completed by moving to FAILED + // notify cluster-manager that abort has completed by moving to FAILED if (shard.value.state() == ShardState.ABORTED && localNodeId.equals(shard.value.nodeId())) { notifyFailedSnapshotShard(snapshot, shard.key, shard.value.reason()); } @@ -424,7 +424,7 @@ private static String getShardStateId(IndexShard indexShard, IndexCommit snapsho } /** - * Checks if any shards were processed that the new master doesn't know about + * Checks if any shards were processed that the new cluster-manager doesn't know about */ private void syncShardStatsOnNewMaster(ClusterChangedEvent event) { SnapshotsInProgress snapshotsInProgress = event.state().custom(SnapshotsInProgress.TYPE); @@ -433,7 +433,7 @@ private void syncShardStatsOnNewMaster(ClusterChangedEvent event) { } // Clear request deduplicator since we need to send all requests that were potentially not handled by the previous - // master again + // cluster-manager again remoteFailedRequestDeduplicator.clear(); for (SnapshotsInProgress.Entry snapshot : snapshotsInProgress.entries()) { if (snapshot.state() == State.STARTED || snapshot.state() == State.ABORTED) { @@ -446,11 +446,11 @@ private void syncShardStatsOnNewMaster(ClusterChangedEvent event) { if (masterShard != null && masterShard.state().completed() == false) { final IndexShardSnapshotStatus.Copy indexShardSnapshotStatus = localShard.getValue().asCopy(); final Stage stage = indexShardSnapshotStatus.getStage(); - // Master knows about the shard and thinks it has not completed + // cluster-manager knows about the shard and thinks it has not completed if (stage == Stage.DONE) { - // but we think the shard is done - we need to make new master know that the shard is done + // but we think the shard is done - we need to make new cluster-manager know that the shard is done logger.debug( - "[{}] new master thinks the shard [{}] is not completed but the shard is done locally, " + "[{}] new cluster-manager thinks the shard [{}] is not completed but the shard is done locally, " + "updating status on the master", snapshot.snapshot(), shardId @@ -458,9 +458,9 @@ private void syncShardStatsOnNewMaster(ClusterChangedEvent event) { notifySuccessfulSnapshotShard(snapshot.snapshot(), shardId, localShard.getValue().generation()); } else if (stage == Stage.FAILURE) { - // but we think the shard failed - we need to make new master know that the shard failed + // but we think the shard failed - we need to make new cluster-manager know that the shard failed logger.debug( - "[{}] new master thinks the shard [{}] is not completed but the shard failed locally, " + "[{}] new cluster-manager thinks the shard [{}] is not completed but the shard failed locally, " + "updating status on master", snapshot.snapshot(), shardId @@ -474,7 +474,7 @@ private void syncShardStatsOnNewMaster(ClusterChangedEvent event) { } } - /** Notify the master node that the given shard has been successfully snapshotted **/ + /** Notify the cluster-manager node that the given shard has been successfully snapshotted **/ private void notifySuccessfulSnapshotShard(final Snapshot snapshot, final ShardId shardId, String generation) { assert generation != null; sendSnapshotShardUpdate( @@ -484,7 +484,7 @@ private void notifySuccessfulSnapshotShard(final Snapshot snapshot, final ShardI ); } - /** Notify the master node that the given shard failed to be snapshotted **/ + /** Notify the cluster-manager node that the given shard failed to be snapshotted **/ private void notifyFailedSnapshotShard(final Snapshot snapshot, final ShardId shardId, final String failure) { sendSnapshotShardUpdate( snapshot, @@ -493,7 +493,7 @@ private void notifyFailedSnapshotShard(final Snapshot snapshot, final ShardId sh ); } - /** Updates the shard snapshot status by sending a {@link UpdateIndexShardSnapshotStatusRequest} to the master node */ + /** Updates the shard snapshot status by sending a {@link UpdateIndexShardSnapshotStatusRequest} to the cluster-manager node */ private void sendSnapshotShardUpdate(final Snapshot snapshot, final ShardId shardId, final ShardSnapshotStatus status) { remoteFailedRequestDeduplicator.executeOnce( new UpdateIndexShardSnapshotStatusRequest(snapshot, shardId, status), diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index 122c13337fa70..746cccef8e596 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -134,7 +134,7 @@ import static org.opensearch.cluster.SnapshotsInProgress.completed; /** - * Service responsible for creating snapshots. This service runs all the steps executed on the master node during snapshot creation and + * Service responsible for creating snapshots. This service runs all the steps executed on the cluster-manager node during snapshot creation and * deletion. * See package level documentation of {@link org.opensearch.snapshots} for details. */ @@ -303,8 +303,8 @@ public ClusterState execute(ClusterState currentState) { } SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); // Fail if there are any concurrently running snapshots. The only exception to this being a snapshot in INIT state from a - // previous master that we can simply ignore and remove from the cluster state because we would clean it up from the - // cluster state anyway in #applyClusterState. + // previous cluster-manager that we can simply ignore and remove from the cluster state because we would clean it up from + // the cluster state anyway in #applyClusterState. if (snapshots != null && snapshots.entries() .stream() @@ -452,7 +452,8 @@ public ClusterState execute(ClusterState currentState) { ); } // Fail if there are any concurrently running snapshots. The only exception to this being a snapshot in INIT state from a - // previous master that we can simply ignore and remove from the cluster state because we would clean it up from the + // previous cluster-manager that we can simply ignore and remove from the cluster state because we would clean it up from + // the // cluster state anyway in #applyClusterState. if (concurrentOperationsAllowed == false && runningSnapshots.stream().anyMatch(entry -> entry.state() != State.INIT)) { throw new ConcurrentSnapshotExecutionException(repositoryName, snapshotName, " a snapshot is already running"); @@ -807,7 +808,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS runReadyClone(target, sourceSnapshot, shardStatusBefore, repoShardId, repository); } } else { - // Extremely unlikely corner case of master failing over between between starting the clone and + // Extremely unlikely corner case of cluster-manager failing over between between starting the clone and // starting shard clones. logger.warn("Did not find expected entry [{}] in the cluster state", cloneEntry); } @@ -986,8 +987,10 @@ protected void doRun() { ); } if (clusterState.nodes().getMinNodeVersion().onOrAfter(NO_REPO_INITIALIZE_VERSION) == false) { - // In mixed version clusters we initialize the snapshot in the repository so that in case of a master failover to an - // older version master node snapshot finalization (that assumes initializeSnapshot was called) produces a valid + // In mixed version clusters we initialize the snapshot in the repository so that in case of a cluster-manager + // failover to an + // older version cluster-manager node snapshot finalization (that assumes initializeSnapshot was called) produces a + // valid // snapshot. repository.initializeSnapshot( snapshot.snapshot().getSnapshotId(), @@ -1116,11 +1119,14 @@ public void onFailure(String source, Exception e) { @Override public void onNoLongerMaster(String source) { - // We are not longer a master - we shouldn't try to do any cleanup - // The new master will take care of it - logger.warn("[{}] failed to create snapshot - no longer a master", snapshot.snapshot().getSnapshotId()); + // We are not longer a cluster-manager - we shouldn't try to do any cleanup + // The new cluster-manager will take care of it + logger.warn( + "[{}] failed to create snapshot - no longer a cluster-manager", + snapshot.snapshot().getSnapshotId() + ); userCreateSnapshotListener.onFailure( - new SnapshotException(snapshot.snapshot(), "master changed during snapshot initialization") + new SnapshotException(snapshot.snapshot(), "cluster-manager changed during snapshot initialization") ); } @@ -1238,7 +1244,7 @@ private static Metadata metadataForSnapshot(SnapshotsInProgress.Entry snapshot, /** * Returns status of the currently running snapshots *

        - * This method is executed on master node + * This method is executed on cluster-manager node *

        * * @param snapshotsInProgress snapshots in progress in the cluster state @@ -1298,20 +1304,22 @@ public static List currentSnapshots( public void applyClusterState(ClusterChangedEvent event) { try { if (event.localNodeMaster()) { - // We don't remove old master when master flips anymore. So, we need to check for change in master + // We don't remove old cluster-manager when cluster-manager flips anymore. So, we need to check for change in + // cluster-manager SnapshotsInProgress snapshotsInProgress = event.state().custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY); - final boolean newMaster = event.previousState().nodes().isLocalNodeElectedMaster() == false; + final boolean newClusterManager = event.previousState().nodes().isLocalNodeElectedMaster() == false; processExternalChanges( - newMaster || removedNodesCleanupNeeded(snapshotsInProgress, event.nodesDelta().removedNodes()), + newClusterManager || removedNodesCleanupNeeded(snapshotsInProgress, event.nodesDelta().removedNodes()), event.routingTableChanged() && waitingShardsStartedOrUnassigned(snapshotsInProgress, event) ); } else if (snapshotCompletionListeners.isEmpty() == false) { - // We have snapshot listeners but are not the master any more. Fail all waiting listeners except for those that already + // We have snapshot listeners but are not the cluster-manager any more. Fail all waiting listeners except for those that + // already // have their snapshots finalizing (those that are already finalizing will fail on their own from to update the cluster // state). for (Snapshot snapshot : new HashSet<>(snapshotCompletionListeners.keySet())) { if (endingSnapshots.add(snapshot)) { - failSnapshotCompletionListeners(snapshot, new SnapshotException(snapshot, "no longer master")); + failSnapshotCompletionListeners(snapshot, new SnapshotException(snapshot, "no longer cluster-manager")); } } } @@ -1326,7 +1334,7 @@ public void applyClusterState(ClusterChangedEvent event) { /** * Cleanup all snapshots found in the given cluster state that have no more work left: * 1. Completed snapshots - * 2. Snapshots in state INIT that a previous master of an older version failed to start + * 2. Snapshots in state INIT that a previous cluster-manager of an older version failed to start * 3. Snapshots in any other state that have all their shard tasks completed */ private void endCompletedSnapshots(ClusterState state) { @@ -1402,11 +1410,11 @@ private static boolean assertNoDanglingSnapshots(ClusterState state) { } /** - * Updates the state of in-progress snapshots in reaction to a change in the configuration of the cluster nodes (master fail-over or + * Updates the state of in-progress snapshots in reaction to a change in the configuration of the cluster nodes (cluster-manager fail-over or * disconnect of a data node that was executing a snapshot) or a routing change that started shards whose snapshot state is * {@link SnapshotsInProgress.ShardState#WAITING}. * - * @param changedNodes true iff either a master fail-over occurred or a data node that was doing snapshot work got removed from the + * @param changedNodes true iff either a cluster-manager fail-over occurred or a data node that was doing snapshot work got removed from the * cluster * @param startShards true iff any waiting shards were started due to a routing change */ @@ -1863,7 +1871,7 @@ private List>> endAndGetListe /** * Handles failure to finalize a snapshot. If the exception indicates that this node was unable to publish a cluster state and stopped - * being the master node, then fail all snapshot create and delete listeners executing on this node by delegating to + * being the cluster-manager node, then fail all snapshot create and delete listeners executing on this node by delegating to * {@link #failAllListenersOnMasterFailOver}. Otherwise, i.e. as a result of failing to write to the snapshot repository for some * reason, remove the snapshot's {@link SnapshotsInProgress.Entry} from the cluster state and move on with other queued snapshot * operations if there are any. @@ -1875,7 +1883,7 @@ private List>> endAndGetListe private void handleFinalizationFailure(Exception e, SnapshotsInProgress.Entry entry, RepositoryData repositoryData) { Snapshot snapshot = entry.snapshot(); if (ExceptionsHelper.unwrap(e, NotMasterException.class, FailedToCommitClusterStateException.class) != null) { - // Failure due to not being master any more, don't try to remove snapshot from cluster state the next master + // Failure due to not being cluster-manager any more, don't try to remove snapshot from cluster state the next cluster-manager // will try ending this snapshot again logger.debug(() -> new ParameterizedMessage("[{}] failed to update cluster state during snapshot finalization", snapshot), e); failSnapshotCompletionListeners( @@ -2082,7 +2090,7 @@ public void onFailure(String source, Exception e) { @Override public void onNoLongerMaster(String source) { - failure.addSuppressed(new SnapshotException(snapshot, "no longer master")); + failure.addSuppressed(new SnapshotException(snapshot, "no longer cluster-manager")); failSnapshotCompletionListeners(snapshot, failure); failAllListenersOnMasterFailOver(new NotMasterException(source)); if (listener != null) { @@ -2249,7 +2257,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { SnapshotsInProgress.of( snapshots.entries() .stream() - // remove init state snapshot we found from a previous master if there was one + // remove init state snapshot we found from a previous cluster-manager if there was one .filter(existing -> abortedDuringInit == false || existing.equals(snapshotEntry) == false) .map(existing -> { if (existing.equals(snapshotEntry)) { @@ -2297,8 +2305,8 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS ); }, e -> { if (ExceptionsHelper.unwrap(e, NotMasterException.class, FailedToCommitClusterStateException.class) != null) { - logger.warn("master failover before deleted snapshot could complete", e); - // Just pass the exception to the transport handler as is so it is retried on the new master + logger.warn("cluster-manager failover before deleted snapshot could complete", e); + // Just pass the exception to the transport handler as is so it is retried on the new cluster-manager listener.onFailure(e); } else { logger.warn("deleted snapshot failed", e); @@ -2588,7 +2596,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS */ private static boolean isWritingToRepository(SnapshotsInProgress.Entry entry) { if (entry.state().completed()) { - // Entry is writing to the repo because it's finalizing on master + // Entry is writing to the repo because it's finalizing on cluster-manager return true; } for (ObjectCursor value : entry.shards().values()) { @@ -2769,19 +2777,19 @@ protected void handleListeners(List> deleteListeners) { } /** - * Handle snapshot or delete failure due to not being master any more so we don't try to do run additional cluster state updates. - * The next master will try handling the missing operations. All we can do is fail all the listeners on this master node so that + * Handle snapshot or delete failure due to not being cluster-manager any more so we don't try to do run additional cluster state updates. + * The next cluster-manager will try handling the missing operations. All we can do is fail all the listeners on this cluster-manager node so that * transport requests return and we don't leak listeners. * - * @param e exception that caused us to realize we are not master any longer + * @param e exception that caused us to realize we are not cluster-manager any longer */ private void failAllListenersOnMasterFailOver(Exception e) { - logger.debug("Failing all snapshot operation listeners because this node is not master any longer", e); + logger.debug("Failing all snapshot operation listeners because this node is not cluster-manager any longer", e); synchronized (currentlyFinalizing) { if (ExceptionsHelper.unwrap(e, NotMasterException.class, FailedToCommitClusterStateException.class) != null) { repositoryOperations.clear(); for (Snapshot snapshot : new HashSet<>(snapshotCompletionListeners.keySet())) { - failSnapshotCompletionListeners(snapshot, new SnapshotException(snapshot, "no longer master")); + failSnapshotCompletionListeners(snapshot, new SnapshotException(snapshot, "no longer cluster-manager")); } final Exception wrapped = new RepositoryException("_all", "Failed to update cluster state during repository operation", e); for (Iterator>> iterator = snapshotDeletionListeners.values().iterator(); iterator.hasNext();) { @@ -3213,7 +3221,7 @@ public boolean assertAllListenersResolved() { * * If the inner loop finds that a shard update task applies to a given snapshot and either a shard-snapshot or shard-clone operation in * it then it will update the state of the snapshot entry accordingly. If that update was a noop, then the task is removed from the - * iteration as it was already applied before and likely just arrived on the master node again due to retries upstream. + * iteration as it was already applied before and likely just arrived on the cluster-manager node again due to retries upstream. * If the update was not a noop, then it means that the shard it applied to is now available for another snapshot or clone operation * to be re-assigned if there is another snapshot operation that is waiting for the shard to become available. We therefore record the * fact that a task was executed by adding it to a collection of executed tasks. If a subsequent execution of the outer loop finds that @@ -3267,7 +3275,8 @@ public boolean assertAllListenersResolved() { updateSnapshotState, entry ); - assert false : "This should never happen, master will not submit a state update for a non-existing clone"; + assert false + : "This should never happen, cluster-manager will not submit a state update for a non-existing clone"; continue; } if (existing.state().completed()) { @@ -3810,8 +3819,8 @@ synchronized void addFinalization(SnapshotsInProgress.Entry entry, Metadata meta } /** - * Clear all state associated with running snapshots. To be used on master-failover if the current node stops - * being master. + * Clear all state associated with running snapshots. To be used on cluster-manager-failover if the current node stops + * being cluster-manager. */ synchronized void clear() { snapshotsToFinalize.clear(); diff --git a/server/src/main/java/org/opensearch/snapshots/package-info.java b/server/src/main/java/org/opensearch/snapshots/package-info.java index 82d7a0d88ff00..f43509cf671f9 100644 --- a/server/src/main/java/org/opensearch/snapshots/package-info.java +++ b/server/src/main/java/org/opensearch/snapshots/package-info.java @@ -30,13 +30,13 @@ * *

        Preliminaries

        * - *

        There are two communication channels between all nodes and master in the snapshot functionality:

        + *

        There are two communication channels between all nodes and cluster-manager in the snapshot functionality:

        *
          - *
        • The master updates the cluster state by adding, removing or altering the contents of its custom entry + *
        • The cluster-manager updates the cluster state by adding, removing or altering the contents of its custom entry * {@link org.opensearch.cluster.SnapshotsInProgress}. All nodes consume the state of the {@code SnapshotsInProgress} and will start or * abort relevant shard snapshot tasks accordingly.
        • *
        • Nodes that are executing shard snapshot tasks report either success or failure of their snapshot task by submitting a - * {@link org.opensearch.snapshots.UpdateIndexShardSnapshotStatusRequest} to the master node that will update the + * {@link org.opensearch.snapshots.UpdateIndexShardSnapshotStatusRequest} to the cluster-manager node that will update the * snapshot's entry in the cluster state accordingly.
        • *
        * @@ -57,8 +57,8 @@ * the {@code SnapshotShardsService} will check if any local primary shards are to be snapshotted (signaled by the shard's snapshot state * being {@code INIT}). For those local primary shards found in state {@code INIT}) the snapshot process of writing the shard's data files * to the snapshot's {@link org.opensearch.repositories.Repository} is executed. Once the snapshot execution finishes for a shard an - * {@code UpdateIndexShardSnapshotStatusRequest} is sent to the master node signaling either status {@code SUCCESS} or {@code FAILED}. - * The master node will then update a shard's state in the snapshots {@code SnapshotsInProgress.Entry} whenever it receives such a + * {@code UpdateIndexShardSnapshotStatusRequest} is sent to the cluster-manager node signaling either status {@code SUCCESS} or {@code FAILED}. + * The cluster-manager node will then update a shard's state in the snapshots {@code SnapshotsInProgress.Entry} whenever it receives such a * {@code UpdateIndexShardSnapshotStatusRequest}. * *
      1. If as a result of the received status update requests, all shards in the cluster state are in a completed state, i.e are marked as @@ -82,12 +82,12 @@ *
      2. Aborting a snapshot starts by updating the state of the snapshot's {@code SnapshotsInProgress.Entry} to {@code ABORTED}.
      3. * *
      4. The snapshot's state change to {@code ABORTED} in cluster state is then picked up by the {@code SnapshotShardsService} on all nodes. - * Those nodes that have shard snapshot actions for the snapshot assigned to them, will abort them and notify master about the shards + * Those nodes that have shard snapshot actions for the snapshot assigned to them, will abort them and notify cluster-manager about the shards * snapshot status accordingly. If the shard snapshot action completed or was in state {@code FINALIZE} when the abort was registered by - * the {@code SnapshotShardsService}, then the shard's state will be reported to master as {@code SUCCESS}. + * the {@code SnapshotShardsService}, then the shard's state will be reported to cluster-manager as {@code SUCCESS}. * Otherwise, it will be reported as {@code FAILED}.
      5. * - *
      6. Once all the shards are reported to master as either {@code SUCCESS} or {@code FAILED} the {@code SnapshotsService} on the master + *
      7. Once all the shards are reported to cluster-manager as either {@code SUCCESS} or {@code FAILED} the {@code SnapshotsService} on the master * will finish the snapshot process as all shard's states are now completed and hence the snapshot can be completed as explained in point 4 * of the snapshot creation section above.
      8. *
      @@ -109,7 +109,7 @@ * *

      Cloning a Snapshot

      * - *

      Cloning part of a snapshot is a process executed entirely on the master node. On a high level, the process of cloning a snapshot is + *

      Cloning part of a snapshot is a process executed entirely on the cluster-manager node. On a high level, the process of cloning a snapshot is * analogous to that of creating a snapshot from data in the cluster except that the source of data files is the snapshot repository * instead of the data nodes. It begins with cloning all shards and then finalizes the cloned snapshot the same way a normal snapshot would * be finalized. Concretely, it is executed as follows:

      @@ -132,7 +132,7 @@ * failures of the relevant indices. *
    1. Once all shard counts are known and the health of all source indices data has been verified, we populate the * {@code SnapshotsInProgress.Entry#clones} map for the clone operation with the the relevant shard clone tasks.
    2. - *
    3. After the clone tasks have been added to the {@code SnapshotsInProgress.Entry}, master executes them on its snapshot thread-pool + *
    4. After the clone tasks have been added to the {@code SnapshotsInProgress.Entry}, cluster-manager executes them on its snapshot thread-pool * by invoking {@link org.opensearch.repositories.Repository#cloneShardSnapshot} for each shard that is to be cloned. Each completed * shard snapshot triggers a call to the {@link org.opensearch.snapshots.SnapshotsService#SHARD_STATE_EXECUTOR} which updates the * clone's {@code SnapshotsInProgress.Entry} to mark the shard clone operation completed.
    5. @@ -151,7 +151,7 @@ * * If multiple snapshot creation jobs are started at the same time, the data-node operations of multiple snapshots may run in parallel * across different shards. If multiple snapshots want to snapshot a certain shard, then the shard snapshots for that shard will be - * executed one by one. This is enforced by the master node setting the shard's snapshot state to + * executed one by one. This is enforced by the cluster-manager node setting the shard's snapshot state to * {@link org.opensearch.cluster.SnapshotsInProgress.ShardSnapshotStatus#UNASSIGNED_QUEUED} for all but one snapshot. The order of * operations on a single shard is given by the order in which the snapshots were started. * As soon as all shards for a given snapshot have finished, it will be finalized as explained above. Finalization will happen one snapshot diff --git a/server/src/main/java/org/opensearch/transport/ConnectionProfile.java b/server/src/main/java/org/opensearch/transport/ConnectionProfile.java index 8ef42436546f1..61129565b23f3 100644 --- a/server/src/main/java/org/opensearch/transport/ConnectionProfile.java +++ b/server/src/main/java/org/opensearch/transport/ConnectionProfile.java @@ -100,7 +100,7 @@ public static ConnectionProfile buildDefaultConnectionProfile(Settings settings) builder.setCompressionEnabled(TransportSettings.TRANSPORT_COMPRESS.get(settings)); builder.addConnections(connectionsPerNodeBulk, TransportRequestOptions.Type.BULK); builder.addConnections(connectionsPerNodePing, TransportRequestOptions.Type.PING); - // if we are not master eligible we don't need a dedicated channel to publish the state + // if we are not cluster-manager eligible we don't need a dedicated channel to publish the state builder.addConnections(DiscoveryNode.isMasterNode(settings) ? connectionsPerNodeState : 0, TransportRequestOptions.Type.STATE); // if we are not a data-node we don't need any dedicated channels for recovery builder.addConnections(DiscoveryNode.isDataNode(settings) ? connectionsPerNodeRecovery : 0, TransportRequestOptions.Type.RECOVERY); diff --git a/server/src/main/java/org/opensearch/transport/TransportRequestDeduplicator.java b/server/src/main/java/org/opensearch/transport/TransportRequestDeduplicator.java index 4d33f071328c1..ba58bb37d8d48 100644 --- a/server/src/main/java/org/opensearch/transport/TransportRequestDeduplicator.java +++ b/server/src/main/java/org/opensearch/transport/TransportRequestDeduplicator.java @@ -68,7 +68,7 @@ public void executeOnce(T request, ActionListener listener, BiConsumer changePredicate ) { onBeforeWaitForNewMasterAndRetry.run(); - super.waitForNewMasterAndRetry(actionName, observer, request, listener, changePredicate); + super.waitForNewClusterManagerAndRetry(actionName, observer, request, listener, changePredicate); onAfterWaitForNewMasterAndRetry.run(); } } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java b/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java index 69dc332e2bd29..b06799312d99a 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java @@ -509,7 +509,7 @@ public void testLeaderBehaviour() { CoordinationStateRejectedException cause = (CoordinationStateRejectedException) handler.transportException.getRootCause(); assertThat( cause.getMessage(), - equalTo("rejecting leader check from [" + otherNode + "] sent to a node that is no longer the master") + equalTo("rejecting leader check from [" + otherNode + "] sent to a node that is no longer the cluster-manager") ); } } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java index 3b309908a1df0..f00361160f2d7 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java @@ -329,17 +329,17 @@ public void testJoinWithHigherTermElectsLeader() { () -> new StatusInfo(HEALTHY, "healthy-info") ); assertFalse(isLocalNodeElectedMaster()); - assertNull(coordinator.getStateForMasterService().nodes().getMasterNodeId()); + assertNull(coordinator.getStateForClusterManagerService().nodes().getMasterNodeId()); long newTerm = initialTerm + randomLongBetween(1, 10); SimpleFuture fut = joinNodeAsync( new JoinRequest(node1, newTerm, Optional.of(new Join(node1, node0, newTerm, initialTerm, initialVersion))) ); assertEquals(Coordinator.Mode.LEADER, coordinator.getMode()); - assertNull(coordinator.getStateForMasterService().nodes().getMasterNodeId()); + assertNull(coordinator.getStateForClusterManagerService().nodes().getMasterNodeId()); deterministicTaskQueue.runAllRunnableTasks(); assertTrue(fut.isDone()); assertTrue(isLocalNodeElectedMaster()); - assertTrue(coordinator.getStateForMasterService().nodes().isLocalNodeElectedMaster()); + assertTrue(coordinator.getStateForClusterManagerService().nodes().isLocalNodeElectedMaster()); } public void testJoinWithHigherTermButBetterStateGetsRejected() { diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java index aff9e1cfe7a8c..bc36a57fed125 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java @@ -277,9 +277,9 @@ public void testDeltas() { DiscoveryNodes.Delta delta = discoNodesB.delta(discoNodesA); if (masterA == null) { - assertThat(delta.previousMasterNode(), nullValue()); + assertThat(delta.previousClusterManagerNode(), nullValue()); } else { - assertThat(delta.previousMasterNode().getId(), equalTo(masterAId)); + assertThat(delta.previousClusterManagerNode().getId(), equalTo(masterAId)); } if (masterB == null) { assertThat(delta.newMasterNode(), nullValue()); diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java index b3c24ef55c3ba..04b4044864dbd 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java @@ -298,12 +298,12 @@ public void testLocalNodeMasterListenerCallbacks() { AtomicBoolean isMaster = new AtomicBoolean(); timedClusterApplierService.addLocalNodeMasterListener(new LocalNodeMasterListener() { @Override - public void onMaster() { + public void onClusterManager() { isMaster.set(true); } @Override - public void offMaster() { + public void offClusterManager() { isMaster.set(false); } }); diff --git a/server/src/test/java/org/opensearch/common/settings/ConsistentSettingsServiceTests.java b/server/src/test/java/org/opensearch/common/settings/ConsistentSettingsServiceTests.java index e7873723bec22..8a872bc50aeb0 100644 --- a/server/src/test/java/org/opensearch/common/settings/ConsistentSettingsServiceTests.java +++ b/server/src/test/java/org/opensearch/common/settings/ConsistentSettingsServiceTests.java @@ -75,7 +75,7 @@ public void testSingleStringSetting() throws Exception { // hashes not yet published assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).areAllConsistent(), is(false)); // publish - new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).newHashPublisher().onMaster(); + new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).newHashPublisher().onClusterManager(); ConsistentSettingsService consistentService = new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)); assertThat(consistentService.areAllConsistent(), is(true)); // change value @@ -83,7 +83,7 @@ public void testSingleStringSetting() throws Exception { assertThat(consistentService.areAllConsistent(), is(false)); assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).areAllConsistent(), is(false)); // publish change - new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).newHashPublisher().onMaster(); + new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).newHashPublisher().onClusterManager(); assertThat(consistentService.areAllConsistent(), is(true)); assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).areAllConsistent(), is(true)); } @@ -108,7 +108,7 @@ public void testSingleAffixSetting() throws Exception { is(false) ); // publish - new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onMaster(); + new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onClusterManager(); ConsistentSettingsService consistentService = new ConsistentSettingsService( settings, clusterService, @@ -123,7 +123,7 @@ public void testSingleAffixSetting() throws Exception { is(false) ); // publish change - new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onMaster(); + new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onClusterManager(); assertThat(consistentService.areAllConsistent(), is(true)); assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).areAllConsistent(), is(true)); // add value @@ -136,7 +136,7 @@ public void testSingleAffixSetting() throws Exception { is(false) ); // publish - new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onMaster(); + new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onClusterManager(); assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).areAllConsistent(), is(true)); // remove value secureSettings = new MockSecureSettings(); @@ -173,7 +173,7 @@ public void testStringAndAffixSettings() throws Exception { is(false) ); // publish only the simple string setting - new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).newHashPublisher().onMaster(); + new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).newHashPublisher().onClusterManager(); assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).areAllConsistent(), is(true)); assertThat( new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).areAllConsistent(), @@ -184,7 +184,7 @@ public void testStringAndAffixSettings() throws Exception { is(false) ); // publish only the affix string setting - new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onMaster(); + new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onClusterManager(); assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).areAllConsistent(), is(false)); assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).areAllConsistent(), is(true)); assertThat( @@ -193,7 +193,7 @@ public void testStringAndAffixSettings() throws Exception { ); // publish both settings new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting, affixStringSetting)).newHashPublisher() - .onMaster(); + .onClusterManager(); assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).areAllConsistent(), is(true)); assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).areAllConsistent(), is(true)); assertThat( diff --git a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java index 6558f9d06c2f7..d6cafb3421f7d 100644 --- a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java +++ b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java @@ -173,7 +173,7 @@ class TestPeerFinder extends PeerFinder { } @Override - protected void onActiveMasterFound(DiscoveryNode masterNode, long term) { + protected void onActiveClusterManagerFound(DiscoveryNode masterNode, long term) { assert holdsLock() == false : "PeerFinder lock held in error"; assertThat(discoveredMasterNode, nullValue()); assertFalse(discoveredMasterTerm.isPresent()); diff --git a/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java b/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java index 9897ad1a3650b..7a346d4cf9fc5 100644 --- a/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java +++ b/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java @@ -160,7 +160,7 @@ public void testCleanupAll() throws Exception { boolean hasClusterState = randomBoolean(); createIndexDataFiles(dataMasterSettings, shardCount, hasClusterState); - String messageText = NodeRepurposeCommand.noMasterMessage(1, environment.dataFiles().length * shardCount, 0); + String messageText = NodeRepurposeCommand.noClusterManagerMessage(1, environment.dataFiles().length * shardCount, 0); Matcher outputMatcher = allOf( containsString(messageText), diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/CoordinationStateTestCluster.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/CoordinationStateTestCluster.java index 291eee501c4df..2f1e18058d544 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/CoordinationStateTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/CoordinationStateTestCluster.java @@ -149,8 +149,8 @@ static class ClusterNode { void reboot() { if (localNode.isMasterNode() == false && rarely()) { - // master-ineligible nodes can't be trusted to persist the cluster state properly, but will not lose the fact that they - // were bootstrapped + // cluster-manager-ineligible nodes can't be trusted to persist the cluster state properly, + // but will not lose the fact that they were bootstrapped final CoordinationMetadata.VotingConfiguration votingConfiguration = persistedState.getLastAcceptedState() .getLastAcceptedConfiguration() .isEmpty() From dd7add25681b9fa206a92dceee97c69ca21948ef Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Wed, 27 Apr 2022 06:09:08 -0700 Subject: [PATCH 129/653] Replace internal usages of 'master' term in 'client' directory (#3088) * Replace internal usages of 'master' term in 'client' directory Signed-off-by: Tianli Feng * Add a uni test for NodeSelector to test deprecated master role Signed-off-by: Tianli Feng --- .../opensearch/client/RequestConverters.java | 4 +- .../org/opensearch/client/TimedRequest.java | 14 ++--- .../indices/GetComponentTemplatesRequest.java | 16 +++--- .../GetComposableIndexTemplateRequest.java | 16 +++--- .../client/indices/GetIndexRequest.java | 4 +- .../indices/GetIndexTemplatesRequest.java | 16 +++--- .../client/ClusterRequestConvertersTests.java | 10 ++-- .../opensearch/client/TimedRequestTests.java | 6 +-- .../indices/CloseIndexRequestTests.java | 6 +-- .../main/java/org/opensearch/client/Node.java | 2 +- .../org/opensearch/client/NodeSelector.java | 6 +-- .../opensearch/client/NodeSelectorTests.java | 53 ++++++++++++++----- .../client/RestClientMultipleHostsTests.java | 2 +- .../RestClientDocumentation.java | 2 +- .../section/ClientYamlTestSuiteTests.java | 6 +-- 15 files changed, 96 insertions(+), 67 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java index 7a6227a7c2ec2..afecdc3eea1a3 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java @@ -895,8 +895,8 @@ Params withFields(String[] fields) { * @deprecated As of 2.0, because supporting inclusive language, replaced by {@link #withClusterManagerTimeout(TimeValue)} */ @Deprecated - Params withMasterTimeout(TimeValue masterTimeout) { - return putParam("master_timeout", masterTimeout); + Params withMasterTimeout(TimeValue clusterManagerTimeout) { + return putParam("master_timeout", clusterManagerTimeout); } Params withClusterManagerTimeout(TimeValue clusterManagerTimeout) { diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java index 3310425df4662..b5e7209a5212b 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java @@ -47,7 +47,7 @@ public abstract class TimedRequest implements Validatable { public static final TimeValue DEFAULT_MASTER_NODE_TIMEOUT = TimeValue.timeValueSeconds(30); private TimeValue timeout = DEFAULT_ACK_TIMEOUT; - private TimeValue masterTimeout = DEFAULT_MASTER_NODE_TIMEOUT; + private TimeValue clusterManagerTimeout = DEFAULT_MASTER_NODE_TIMEOUT; /** * Sets the timeout to wait for the all the nodes to acknowledge @@ -58,11 +58,11 @@ public void setTimeout(TimeValue timeout) { } /** - * Sets the timeout to connect to the master node - * @param masterTimeout timeout as a {@link TimeValue} + * Sets the timeout to connect to the cluster-manager node + * @param clusterManagerTimeout timeout as a {@link TimeValue} */ - public void setMasterTimeout(TimeValue masterTimeout) { - this.masterTimeout = masterTimeout; + public void setMasterTimeout(TimeValue clusterManagerTimeout) { + this.clusterManagerTimeout = clusterManagerTimeout; } /** @@ -73,9 +73,9 @@ public TimeValue timeout() { } /** - * Returns the timeout for the request to be completed on the master node + * Returns the timeout for the request to be completed on the cluster-manager node */ public TimeValue masterNodeTimeout() { - return masterTimeout; + return clusterManagerTimeout; } } diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComponentTemplatesRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComponentTemplatesRequest.java index f70682fee3763..ba9702fd6f2f2 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComponentTemplatesRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComponentTemplatesRequest.java @@ -44,7 +44,7 @@ public class GetComponentTemplatesRequest implements Validatable { private final String name; - private TimeValue masterNodeTimeout = TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT; + private TimeValue clusterManagerNodeTimeout = TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT; private boolean local = false; /** @@ -65,23 +65,23 @@ public String name() { } /** - * @return the timeout for waiting for the master node to respond + * @return the timeout for waiting for the cluster-manager node to respond */ public TimeValue getMasterNodeTimeout() { - return masterNodeTimeout; + return clusterManagerNodeTimeout; } - public void setMasterNodeTimeout(@Nullable TimeValue masterNodeTimeout) { - this.masterNodeTimeout = masterNodeTimeout; + public void setMasterNodeTimeout(@Nullable TimeValue clusterManagerNodeTimeout) { + this.clusterManagerNodeTimeout = clusterManagerNodeTimeout; } - public void setMasterNodeTimeout(String masterNodeTimeout) { - final TimeValue timeValue = TimeValue.parseTimeValue(masterNodeTimeout, getClass().getSimpleName() + ".masterNodeTimeout"); + public void setMasterNodeTimeout(String clusterManagerNodeTimeout) { + final TimeValue timeValue = TimeValue.parseTimeValue(clusterManagerNodeTimeout, getClass().getSimpleName() + ".masterNodeTimeout"); setMasterNodeTimeout(timeValue); } /** - * @return true if this request is to read from the local cluster state, rather than the master node - false otherwise + * @return true if this request is to read from the local cluster state, rather than the cluster-manager node - false otherwise */ public boolean isLocal() { return local; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComposableIndexTemplateRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComposableIndexTemplateRequest.java index 572a5eeec2d23..cc8e820d5929f 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComposableIndexTemplateRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComposableIndexTemplateRequest.java @@ -44,7 +44,7 @@ public class GetComposableIndexTemplateRequest implements Validatable { private final String name; - private TimeValue masterNodeTimeout = TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT; + private TimeValue clusterManagerNodeTimeout = TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT; private boolean local = false; /** @@ -65,23 +65,23 @@ public String name() { } /** - * @return the timeout for waiting for the master node to respond + * @return the timeout for waiting for the cluster-manager node to respond */ public TimeValue getMasterNodeTimeout() { - return masterNodeTimeout; + return clusterManagerNodeTimeout; } - public void setMasterNodeTimeout(@Nullable TimeValue masterNodeTimeout) { - this.masterNodeTimeout = masterNodeTimeout; + public void setMasterNodeTimeout(@Nullable TimeValue clusterManagerNodeTimeout) { + this.clusterManagerNodeTimeout = clusterManagerNodeTimeout; } - public void setMasterNodeTimeout(String masterNodeTimeout) { - final TimeValue timeValue = TimeValue.parseTimeValue(masterNodeTimeout, getClass().getSimpleName() + ".masterNodeTimeout"); + public void setMasterNodeTimeout(String clusterManagerNodeTimeout) { + final TimeValue timeValue = TimeValue.parseTimeValue(clusterManagerNodeTimeout, getClass().getSimpleName() + ".masterNodeTimeout"); setMasterNodeTimeout(timeValue); } /** - * @return true if this request is to read from the local cluster state, rather than the master node - false otherwise + * @return true if this request is to read from the local cluster state, rather than the cluster-manager node - false otherwise */ public boolean isLocal() { return local; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetIndexRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetIndexRequest.java index 5e5ab6aeae305..c5ef5cb9c1795 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetIndexRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetIndexRequest.java @@ -82,9 +82,9 @@ public final GetIndexRequest local(boolean local) { } /** - * Return local information, do not retrieve the state from master node (default: false). + * Return local information, do not retrieve the state from cluster-manager node (default: false). * @return true if local information is to be returned; - * false if information is to be retrieved from master node (default). + * false if information is to be retrieved from cluster-manager node (default). */ public final boolean local() { return local; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetIndexTemplatesRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetIndexTemplatesRequest.java index 071bcc7a75a71..f46af130cc9b0 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetIndexTemplatesRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetIndexTemplatesRequest.java @@ -51,7 +51,7 @@ public class GetIndexTemplatesRequest implements Validatable { private final List names; - private TimeValue masterNodeTimeout = TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT; + private TimeValue clusterManagerNodeTimeout = TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT; private boolean local = false; /** @@ -84,23 +84,23 @@ public List names() { } /** - * @return the timeout for waiting for the master node to respond + * @return the timeout for waiting for the cluster-manager node to respond */ public TimeValue getMasterNodeTimeout() { - return masterNodeTimeout; + return clusterManagerNodeTimeout; } - public void setMasterNodeTimeout(@Nullable TimeValue masterNodeTimeout) { - this.masterNodeTimeout = masterNodeTimeout; + public void setMasterNodeTimeout(@Nullable TimeValue clusterManagerNodeTimeout) { + this.clusterManagerNodeTimeout = clusterManagerNodeTimeout; } - public void setMasterNodeTimeout(String masterNodeTimeout) { - final TimeValue timeValue = TimeValue.parseTimeValue(masterNodeTimeout, getClass().getSimpleName() + ".masterNodeTimeout"); + public void setMasterNodeTimeout(String clusterManagerNodeTimeout) { + final TimeValue timeValue = TimeValue.parseTimeValue(clusterManagerNodeTimeout, getClass().getSimpleName() + ".masterNodeTimeout"); setMasterNodeTimeout(timeValue); } /** - * @return true if this request is to read from the local cluster state, rather than the master node - false otherwise + * @return true if this request is to read from the local cluster state, rather than the cluster-manager node - false otherwise */ public boolean isLocal() { return local; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java index ed0a973081b62..ec6847630dc92 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java @@ -89,9 +89,9 @@ public void testClusterHealth() { ClusterHealthRequest healthRequest = new ClusterHealthRequest(); Map expectedParams = new HashMap<>(); RequestConvertersTests.setRandomLocal(healthRequest::local, expectedParams); - String timeoutType = OpenSearchTestCase.randomFrom("timeout", "masterTimeout", "both", "none"); + String timeoutType = OpenSearchTestCase.randomFrom("timeout", "clusterManagerTimeout", "both", "none"); String timeout = OpenSearchTestCase.randomTimeValue(); - String masterTimeout = OpenSearchTestCase.randomTimeValue(); + String clusterManagerTimeout = OpenSearchTestCase.randomTimeValue(); switch (timeoutType) { case "timeout": healthRequest.timeout(timeout); @@ -99,10 +99,10 @@ public void testClusterHealth() { // If Cluster Manager Timeout wasn't set it uses the same value as Timeout expectedParams.put("cluster_manager_timeout", timeout); break; - case "masterTimeout": + case "clusterManagerTimeout": expectedParams.put("timeout", "30s"); - healthRequest.masterNodeTimeout(masterTimeout); - expectedParams.put("cluster_manager_timeout", masterTimeout); + healthRequest.masterNodeTimeout(clusterManagerTimeout); + expectedParams.put("cluster_manager_timeout", clusterManagerTimeout); break; case "both": healthRequest.timeout(timeout); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/TimedRequestTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/TimedRequestTests.java index 3026472bb8e53..659238debccad 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/TimedRequestTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/TimedRequestTests.java @@ -48,10 +48,10 @@ public void testNonDefaults() { TimedRequest timedRequest = new TimedRequest() { }; TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(0, 1000)); - TimeValue masterTimeout = TimeValue.timeValueSeconds(randomIntBetween(0, 1000)); + TimeValue clusterManagerTimeout = TimeValue.timeValueSeconds(randomIntBetween(0, 1000)); timedRequest.setTimeout(timeout); - timedRequest.setMasterTimeout(masterTimeout); + timedRequest.setMasterTimeout(clusterManagerTimeout); assertEquals(timedRequest.timeout(), timeout); - assertEquals(timedRequest.masterNodeTimeout(), masterTimeout); + assertEquals(timedRequest.masterNodeTimeout(), clusterManagerTimeout); } } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexRequestTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexRequestTests.java index c96e296891fb9..5bfb0abab9f37 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexRequestTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexRequestTests.java @@ -80,10 +80,10 @@ public void testTimeout() { final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(0, 1000)); request.setTimeout(timeout); - final TimeValue masterTimeout = TimeValue.timeValueSeconds(randomIntBetween(0, 1000)); - request.setMasterTimeout(masterTimeout); + final TimeValue clusterManagerTimeout = TimeValue.timeValueSeconds(randomIntBetween(0, 1000)); + request.setMasterTimeout(clusterManagerTimeout); assertEquals(request.timeout(), timeout); - assertEquals(request.masterNodeTimeout(), masterTimeout); + assertEquals(request.masterNodeTimeout(), clusterManagerTimeout); } } diff --git a/client/rest/src/main/java/org/opensearch/client/Node.java b/client/rest/src/main/java/org/opensearch/client/Node.java index c982ae8eb931f..952823cf29d6c 100644 --- a/client/rest/src/main/java/org/opensearch/client/Node.java +++ b/client/rest/src/main/java/org/opensearch/client/Node.java @@ -210,7 +210,7 @@ public Roles(final Set roles) { } /** - * Returns whether or not the node could be elected master. + * Returns whether or not the node could be elected cluster-manager. */ public boolean isMasterEligible() { return roles.contains("master") || roles.contains("cluster_manager"); diff --git a/client/rest/src/main/java/org/opensearch/client/NodeSelector.java b/client/rest/src/main/java/org/opensearch/client/NodeSelector.java index 09d5a2c1fe576..1d1c09f33fef7 100644 --- a/client/rest/src/main/java/org/opensearch/client/NodeSelector.java +++ b/client/rest/src/main/java/org/opensearch/client/NodeSelector.java @@ -36,7 +36,7 @@ /** * Selects nodes that can receive requests. Used to keep requests away - * from master nodes or to send them to nodes with a particular attribute. + * from cluster-manager nodes or to send them to nodes with a particular attribute. * Use with {@link RestClientBuilder#setNodeSelector(NodeSelector)}. */ public interface NodeSelector { @@ -80,10 +80,10 @@ public String toString() { /** * Selector that matches any node that has metadata and doesn't - * have the {@code master} role OR it has the data {@code data} + * have the {@code cluster_manager} role OR it has the data {@code data} * role. */ - NodeSelector SKIP_DEDICATED_MASTERS = new NodeSelector() { + NodeSelector SKIP_DEDICATED_CLUSTER_MANAGERS = new NodeSelector() { @Override public void select(Iterable nodes) { for (Iterator itr = nodes.iterator(); itr.hasNext();) { diff --git a/client/rest/src/test/java/org/opensearch/client/NodeSelectorTests.java b/client/rest/src/test/java/org/opensearch/client/NodeSelectorTests.java index f7cb0733bb8c5..65a831e59bfb0 100644 --- a/client/rest/src/test/java/org/opensearch/client/NodeSelectorTests.java +++ b/client/rest/src/test/java/org/opensearch/client/NodeSelectorTests.java @@ -55,33 +55,33 @@ public void testAny() { assertEquals(expected, nodes); } - public void testNotMasterOnly() { - Node masterOnly = dummyNode(true, false, false); + public void testNotClusterManagerOnly() { + Node clusterManagerOnly = dummyNode(true, false, false); Node all = dummyNode(true, true, true); - Node masterAndData = dummyNode(true, true, false); - Node masterAndIngest = dummyNode(true, false, true); + Node clusterManagerAndData = dummyNode(true, true, false); + Node clusterManagerAndIngest = dummyNode(true, false, true); Node coordinatingOnly = dummyNode(false, false, false); Node ingestOnly = dummyNode(false, false, true); Node data = dummyNode(false, true, randomBoolean()); List nodes = new ArrayList<>(); - nodes.add(masterOnly); + nodes.add(clusterManagerOnly); nodes.add(all); - nodes.add(masterAndData); - nodes.add(masterAndIngest); + nodes.add(clusterManagerAndData); + nodes.add(clusterManagerAndIngest); nodes.add(coordinatingOnly); nodes.add(ingestOnly); nodes.add(data); Collections.shuffle(nodes, getRandom()); List expected = new ArrayList<>(nodes); - expected.remove(masterOnly); - NodeSelector.SKIP_DEDICATED_MASTERS.select(nodes); + expected.remove(clusterManagerOnly); + NodeSelector.SKIP_DEDICATED_CLUSTER_MANAGERS.select(nodes); assertEquals(expected, nodes); } - private static Node dummyNode(boolean master, boolean data, boolean ingest) { + private static Node dummyNode(boolean clusterManager, boolean data, boolean ingest) { final Set roles = new TreeSet<>(); - if (master) { - roles.add("master"); + if (clusterManager) { + roles.add("cluster_manager"); } if (data) { roles.add("data"); @@ -98,4 +98,33 @@ private static Node dummyNode(boolean master, boolean data, boolean ingest) { Collections.>emptyMap() ); } + + /* + * Validate SKIP_DEDICATED_CLUSTER_MANAGERS can filter both the deprecated "master" role and the new "cluster_manager" role. + * The test is a modified copy of the above testNotClusterManagerOnly(). + */ + public void testDeprecatedNotMasterOnly() { + Node clusterManagerOnly = dummyNode(true, false, false); + Node all = dummyNode(true, true, true); + Node data = dummyNode(false, true, randomBoolean()); + Node deprecatedMasterOnly = new Node( + new HttpHost("dummy"), + Collections.emptySet(), + randomAsciiAlphanumOfLength(5), + randomAsciiAlphanumOfLength(5), + new Roles(Collections.singleton("master")), + Collections.emptyMap() + ); + List nodes = new ArrayList<>(); + nodes.add(clusterManagerOnly); + nodes.add(all); + nodes.add(data); + nodes.add(deprecatedMasterOnly); + Collections.shuffle(nodes, getRandom()); + List expected = new ArrayList<>(nodes); + expected.remove(clusterManagerOnly); + expected.remove(deprecatedMasterOnly); + NodeSelector.SKIP_DEDICATED_CLUSTER_MANAGERS.select(nodes); + assertEquals(expected, nodes); + } } diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsTests.java index 0b7d2881ccb54..d88d4f4afd9b1 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsTests.java @@ -297,7 +297,7 @@ public void testNodeSelector() throws Exception { } public void testSetNodes() throws Exception { - RestClient restClient = createRestClient(NodeSelector.SKIP_DEDICATED_MASTERS); + RestClient restClient = createRestClient(NodeSelector.SKIP_DEDICATED_CLUSTER_MANAGERS); List newNodes = new ArrayList<>(nodes.size()); for (int i = 0; i < nodes.size(); i++) { Node.Roles roles = i == 0 diff --git a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java index 82c4fc2896213..066419844f048 100644 --- a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java @@ -133,7 +133,7 @@ public void usage() throws IOException, InterruptedException { //tag::rest-client-init-node-selector RestClientBuilder builder = RestClient.builder( new HttpHost("localhost", 9200, "http")); - builder.setNodeSelector(NodeSelector.SKIP_DEDICATED_MASTERS); // <1> + builder.setNodeSelector(NodeSelector.SKIP_DEDICATED_CLUSTER_MANAGERS); // <1> //end::rest-client-init-node-selector } { diff --git a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java index fc4564ce55df7..f995e18d0f2df 100644 --- a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java +++ b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java @@ -486,7 +486,7 @@ public void testAddingDoWithNodeSelectorWithoutSkipNodeSelector() { int lineNumber = between(1, 10000); DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); ApiCallSection apiCall = new ApiCallSection("test"); - apiCall.setNodeSelector(NodeSelector.SKIP_DEDICATED_MASTERS); + apiCall.setNodeSelector(NodeSelector.SKIP_DEDICATED_CLUSTER_MANAGERS); doSection.setApiCallSection(apiCall); ClientYamlTestSuite testSuite = createTestSuite(SkipSection.EMPTY, doSection); Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); @@ -553,7 +553,7 @@ public void testMultipleValidationErrors() { { DoSection doSection = new DoSection(new XContentLocation(thirdLineNumber, 0)); ApiCallSection apiCall = new ApiCallSection("test"); - apiCall.setNodeSelector(NodeSelector.SKIP_DEDICATED_MASTERS); + apiCall.setNodeSelector(NodeSelector.SKIP_DEDICATED_CLUSTER_MANAGERS); doSection.setApiCallSection(apiCall); doSections.add(doSection); } @@ -593,7 +593,7 @@ public void testAddingDoWithNodeSelectorWithSkip() { SkipSection skipSection = new SkipSection(null, singletonList("node_selector"), null); DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); ApiCallSection apiCall = new ApiCallSection("test"); - apiCall.setNodeSelector(NodeSelector.SKIP_DEDICATED_MASTERS); + apiCall.setNodeSelector(NodeSelector.SKIP_DEDICATED_CLUSTER_MANAGERS); doSection.setApiCallSection(apiCall); createTestSuite(skipSection, doSection).validate(); } From da8077de4440add5adb762482efda1f13fccce17 Mon Sep 17 00:00:00 2001 From: Prudhvi Godithi Date: Wed, 27 Apr 2022 09:22:14 -0700 Subject: [PATCH 130/653] Custom plugin backport to main branch (#3095) Signed-off-by: pgodithi --- .../org/opensearch/gradle/PublishPlugin.java | 23 ++-- .../opensearch/gradle/pluginzip/Publish.java | 74 +++++++++++++ .../opensearch.pluginzip.properties | 1 + .../gradle/pluginzip/PublishTests.java | 104 ++++++++++++++++++ 4 files changed, 192 insertions(+), 10 deletions(-) create mode 100644 buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java create mode 100644 buildSrc/src/main/resources/META-INF/gradle-plugins/opensearch.pluginzip.properties create mode 100644 buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java diff --git a/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java index d164b54c7506c..2a0521b17d55e 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java @@ -125,18 +125,21 @@ public String call() throws Exception { // Add git origin info to generated POM files publication.getPom().withXml(PublishPlugin::addScmInfo); - // have to defer this until archivesBaseName is set - project.afterEvaluate(p -> publication.setArtifactId(getArchivesBaseName(project))); + if (!publication.getName().toLowerCase().contains("zip")) { - // publish sources and javadoc for Java projects. - if (project.getPluginManager().hasPlugin("opensearch.java")) { - publication.artifact(project.getTasks().getByName("sourcesJar")); - publication.artifact(project.getTasks().getByName("javadocJar")); - } + // have to defer this until archivesBaseName is set + project.afterEvaluate(p -> publication.setArtifactId(getArchivesBaseName(project))); + + // publish sources and javadoc for Java projects. + if (project.getPluginManager().hasPlugin("opensearch.java")) { + publication.artifact(project.getTasks().getByName("sourcesJar")); + publication.artifact(project.getTasks().getByName("javadocJar")); + } - generatePomTask.configure( - t -> t.dependsOn(String.format("generatePomFileFor%sPublication", Util.capitalize(publication.getName()))) - ); + generatePomTask.configure( + t -> t.dependsOn(String.format("generatePomFileFor%sPublication", Util.capitalize(publication.getName()))) + ); + } }); } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java new file mode 100644 index 0000000000000..e8b4ecec7a56d --- /dev/null +++ b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.gradle.pluginzip; + +import java.util.*; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.publish.PublishingExtension; +import org.gradle.api.publish.maven.MavenPublication; +import org.gradle.api.publish.maven.plugins.MavenPublishPlugin; +import java.nio.file.Path; + +public class Publish implements Plugin { + private Project project; + + public final static String EXTENSION_NAME = "zipmavensettings"; + public final static String PUBLICATION_NAME = "pluginZip"; + public final static String STAGING_REPO = "zipStaging"; + public final static String PLUGIN_ZIP_PUBLISH_POM_TASK = "generatePomFileForPluginZipPublication"; + public final static String LOCALMAVEN = "publishToMavenLocal"; + public final static String LOCAL_STAGING_REPO_PATH = "/build/local-staging-repo"; + public String zipDistributionLocation = "/build/distributions/"; + + public static void configMaven(Project project) { + final Path buildDirectory = project.getRootDir().toPath(); + project.getPluginManager().apply(MavenPublishPlugin.class); + project.getExtensions().configure(PublishingExtension.class, publishing -> { + publishing.repositories(repositories -> { + repositories.maven(maven -> { + maven.setName(STAGING_REPO); + maven.setUrl(buildDirectory.toString() + LOCAL_STAGING_REPO_PATH); + }); + }); + publishing.publications(publications -> { + publications.create(PUBLICATION_NAME, MavenPublication.class, mavenZip -> { + String zipGroup = "org.opensearch.plugin"; + String zipArtifact = project.getName(); + String zipVersion = getProperty("version", project); + mavenZip.artifact(project.getTasks().named("bundlePlugin")); + mavenZip.setGroupId(zipGroup); + mavenZip.setArtifactId(zipArtifact); + mavenZip.setVersion(zipVersion); + }); + }); + }); + } + + static String getProperty(String name, Project project) { + if (project.hasProperty(name)) { + Object property = project.property(name); + if (property != null) { + return property.toString(); + } + } + return null; + } + + @Override + public void apply(Project project) { + this.project = project; + project.afterEvaluate(evaluatedProject -> { configMaven(project); }); + project.getGradle().getTaskGraph().whenReady(graph -> { + if (graph.hasTask(LOCALMAVEN)) { + project.getTasks().getByName(PLUGIN_ZIP_PUBLISH_POM_TASK).setEnabled(false); + } + + }); + } +} diff --git a/buildSrc/src/main/resources/META-INF/gradle-plugins/opensearch.pluginzip.properties b/buildSrc/src/main/resources/META-INF/gradle-plugins/opensearch.pluginzip.properties new file mode 100644 index 0000000000000..600218ff76835 --- /dev/null +++ b/buildSrc/src/main/resources/META-INF/gradle-plugins/opensearch.pluginzip.properties @@ -0,0 +1 @@ +implementation-class=org.opensearch.gradle.pluginzip.Publish diff --git a/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java new file mode 100644 index 0000000000000..ae94ace55e637 --- /dev/null +++ b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gradle.pluginzip; + +import org.gradle.testkit.runner.BuildResult; +import org.gradle.testkit.runner.GradleRunner; +import org.gradle.testfixtures.ProjectBuilder; +import org.gradle.api.Project; +import org.opensearch.gradle.test.GradleUnitTestCase; +import org.junit.Test; +import java.io.IOException; +import org.gradle.api.publish.maven.tasks.PublishToMavenRepository; +import java.io.File; +import org.gradle.testkit.runner.BuildResult; +import java.io.FileWriter; +import java.io.Writer; +import static org.gradle.testkit.runner.TaskOutcome.SUCCESS; +import static org.junit.Assert.assertEquals; +import java.nio.file.Files; +import org.apache.maven.model.Model; +import org.apache.maven.model.io.xpp3.MavenXpp3Reader; +import org.codehaus.plexus.util.xml.pull.XmlPullParserException; +import java.io.FileReader; +import org.gradle.api.tasks.bundling.Zip; + +public class PublishTests extends GradleUnitTestCase { + + @Test + public void testZipPublish() throws IOException, XmlPullParserException { + Project project = ProjectBuilder.builder().build(); + String zipPublishTask = "publishPluginZipPublicationToZipStagingRepository"; + // Apply the opensearch.pluginzip plugin + project.getPluginManager().apply("opensearch.pluginzip"); + // Check if the plugin has been applied to the project + assertTrue(project.getPluginManager().hasPlugin("opensearch.pluginzip")); + // Check if the project has the task from class PublishToMavenRepository after plugin apply + assertNotNull(project.getTasks().withType(PublishToMavenRepository.class)); + // Create a mock bundlePlugin task + Zip task = project.getTasks().create("bundlePlugin", Zip.class); + Publish.configMaven(project); + // Check if the main task publishPluginZipPublicationToZipStagingRepository exists after plugin apply + assertTrue(project.getTasks().getNames().contains(zipPublishTask)); + assertNotNull("Task to generate: ", project.getTasks().getByName(zipPublishTask)); + // Run Gradle functional tests, but calling a build.gradle file, that resembles the plugin publish behavior + File projectDir = new File("build/functionalTest"); + // Create a sample plugin zip file + File sampleZip = new File("build/functionalTest/sample-plugin.zip"); + Files.createDirectories(projectDir.toPath()); + Files.createFile(sampleZip.toPath()); + writeString(new File(projectDir, "settings.gradle"), ""); + // Generate the build.gradle file + String buildFileContent = "apply plugin: 'maven-publish' \n" + + "publishing {\n" + + " repositories {\n" + + " maven {\n" + + " url = 'local-staging-repo/'\n" + + " name = 'zipStaging'\n" + + " }\n" + + " }\n" + + " publications {\n" + + " pluginZip(MavenPublication) {\n" + + " groupId = 'org.opensearch.plugin' \n" + + " artifactId = 'sample-plugin' \n" + + " version = '2.0.0.0' \n" + + " artifact('sample-plugin.zip') \n" + + " }\n" + + " }\n" + + "}"; + writeString(new File(projectDir, "build.gradle"), buildFileContent); + // Execute the task publishPluginZipPublicationToZipStagingRepository + GradleRunner runner = GradleRunner.create(); + runner.forwardOutput(); + runner.withPluginClasspath(); + runner.withArguments(zipPublishTask); + runner.withProjectDir(projectDir); + BuildResult result = runner.build(); + // Check if task publishMavenzipPublicationToZipstagingRepository has ran well + assertEquals(SUCCESS, result.task(":" + zipPublishTask).getOutcome()); + // check if the zip has been published to local staging repo + assertTrue( + new File("build/functionalTest/local-staging-repo/org/opensearch/plugin/sample-plugin/2.0.0.0/sample-plugin-2.0.0.0.zip") + .exists() + ); + // Parse the maven file and validate the groupID to org.opensearch.plugin + MavenXpp3Reader reader = new MavenXpp3Reader(); + Model model = reader.read( + new FileReader("build/functionalTest/local-staging-repo/org/opensearch/plugin/sample-plugin/2.0.0.0/sample-plugin-2.0.0.0.pom") + ); + assertEquals(model.getGroupId(), "org.opensearch.plugin"); + } + + private void writeString(File file, String string) throws IOException { + try (Writer writer = new FileWriter(file)) { + writer.write(string); + } + } + +} From d86c88fe591fb0cca139b62c046633ed22305d13 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Thu, 28 Apr 2022 09:58:26 -0500 Subject: [PATCH 131/653] [Refactor] XContentType to parse Accept or Content-Type headers (#3077) Refactors XContentType.fromMediaTypeOrFormat to fromMediaType so Accept headers and Content-Type headers can be parsed separately. This helps in reusing the same parse logic in for REST Versioning API support. Signed-off-by: Nicholas Walter Knize --- .../client/RestHighLevelClient.java | 2 +- .../client/RequestConvertersTests.java | 2 +- .../opensearch/common/xcontent/MediaType.java | 65 +++++++++ .../common/xcontent/MediaTypeParser.java | 135 ++++++++++++++++++ .../common/xcontent/XContentType.java | 101 ++++++------- .../common/xcontent/MediaTypeParserTests.java | 84 +++++++++++ .../opensearch/rest/AbstractRestChannel.java | 10 +- .../opensearch/rest/action/cat/RestTable.java | 9 +- .../common/xcontent/XContentTypeTests.java | 70 +++++---- .../rest/BytesRestResponseTests.java | 2 +- .../test/rest/OpenSearchRestTestCase.java | 6 +- .../rest/yaml/ClientYamlTestResponse.java | 2 +- .../opensearch/test/rest/yaml/ObjectPath.java | 2 +- 13 files changed, 392 insertions(+), 98 deletions(-) create mode 100644 libs/x-content/src/main/java/org/opensearch/common/xcontent/MediaType.java create mode 100644 libs/x-content/src/main/java/org/opensearch/common/xcontent/MediaTypeParser.java create mode 100644 libs/x-content/src/test/java/org/opensearch/common/xcontent/MediaTypeParserTests.java diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java index 16e6648e7747e..d293b979debb5 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java @@ -2073,7 +2073,7 @@ protected final Resp parseEntity(final HttpEntity entity, final CheckedFu if (entity.getContentType() == null) { throw new IllegalStateException("OpenSearch didn't return the [Content-Type] header, unable to parse response body"); } - XContentType xContentType = XContentType.fromMediaTypeOrFormat(entity.getContentType().getValue()); + XContentType xContentType = XContentType.fromMediaType(entity.getContentType().getValue()); if (xContentType == null) { throw new IllegalStateException("Unsupported Content-Type: " + entity.getContentType().getValue()); } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java index 66581fdc42c2b..0415b864ba35e 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java @@ -806,7 +806,7 @@ public void testUpdate() throws IOException { UpdateRequest parsedUpdateRequest = new UpdateRequest(); - XContentType entityContentType = XContentType.fromMediaTypeOrFormat(entity.getContentType().getValue()); + XContentType entityContentType = XContentType.fromMediaType(entity.getContentType().getValue()); try (XContentParser parser = createParser(entityContentType.xContent(), entity.getContent())) { parsedUpdateRequest.fromXContent(parser); } diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/MediaType.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/MediaType.java new file mode 100644 index 0000000000000..5cfc52b20bfc5 --- /dev/null +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/MediaType.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.common.xcontent; + +/** + * Abstracts a Media Type and a format parameter. + * Media types are used as values on Content-Type and Accept headers + * format is an URL parameter, specifies response media type. + */ +public interface MediaType { + /** + * Returns a type part of a MediaType + * i.e. application for application/json + */ + String type(); + + /** + * Returns a subtype part of a MediaType. + * i.e. json for application/json + */ + String subtype(); + + /** + * Returns a corresponding format for a MediaType. i.e. json for application/json media type + * Can differ from the MediaType's subtype i.e plain/text has a subtype of text but format is txt + */ + String format(); + + /** + * returns a string representation of a media type. + */ + default String typeWithSubtype() { + return type() + "/" + subtype(); + } +} diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/MediaTypeParser.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/MediaTypeParser.java new file mode 100644 index 0000000000000..cbd3589f5b500 --- /dev/null +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/MediaTypeParser.java @@ -0,0 +1,135 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.common.xcontent; + +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +public class MediaTypeParser { + private final Map formatToMediaType; + private final Map typeWithSubtypeToMediaType; + + public MediaTypeParser(T[] acceptedMediaTypes) { + this(acceptedMediaTypes, Map.of()); + } + + public MediaTypeParser(T[] acceptedMediaTypes, Map additionalMediaTypes) { + final int size = acceptedMediaTypes.length + additionalMediaTypes.size(); + Map formatMap = new HashMap<>(size); + Map typeMap = new HashMap<>(size); + for (T mediaType : acceptedMediaTypes) { + typeMap.put(mediaType.typeWithSubtype(), mediaType); + formatMap.put(mediaType.format(), mediaType); + } + for (Map.Entry entry : additionalMediaTypes.entrySet()) { + String typeWithSubtype = entry.getKey(); + T mediaType = entry.getValue(); + + typeMap.put(typeWithSubtype.toLowerCase(Locale.ROOT), mediaType); + formatMap.put(mediaType.format(), mediaType); + } + + this.formatToMediaType = Map.copyOf(formatMap); + this.typeWithSubtypeToMediaType = Map.copyOf(typeMap); + } + + public T fromMediaType(String mediaType) { + ParsedMediaType parsedMediaType = parseMediaType(mediaType); + return parsedMediaType != null ? parsedMediaType.getMediaType() : null; + } + + public T fromFormat(String format) { + if (format == null) { + return null; + } + return formatToMediaType.get(format.toLowerCase(Locale.ROOT)); + } + + /** + * parsing media type that follows https://tools.ietf.org/html/rfc7231#section-3.1.1.1 + * @param headerValue a header value from Accept or Content-Type + * @return a parsed media-type + */ + public ParsedMediaType parseMediaType(String headerValue) { + if (headerValue != null) { + String[] split = headerValue.toLowerCase(Locale.ROOT).split(";"); + + String[] typeSubtype = split[0].trim().split("/"); + if (typeSubtype.length == 2) { + String type = typeSubtype[0]; + String subtype = typeSubtype[1]; + T xContentType = typeWithSubtypeToMediaType.get(type + "/" + subtype); + if (xContentType != null) { + Map parameters = new HashMap<>(); + for (int i = 1; i < split.length; i++) { + // spaces are allowed between parameters, but not between '=' sign + String[] keyValueParam = split[i].trim().split("="); + if (keyValueParam.length != 2 || hasSpaces(keyValueParam[0]) || hasSpaces(keyValueParam[1])) { + return null; + } + parameters.put(keyValueParam[0], keyValueParam[1]); + } + return new ParsedMediaType(xContentType, parameters); + } + } + + } + return null; + } + + private boolean hasSpaces(String s) { + return s.trim().equals(s) == false; + } + + /** + * A media type object that contains all the information provided on a Content-Type or Accept header + */ + public class ParsedMediaType { + private final Map parameters; + private final T mediaType; + + public ParsedMediaType(T mediaType, Map parameters) { + this.parameters = parameters; + this.mediaType = mediaType; + } + + public T getMediaType() { + return mediaType; + } + + public Map getParameters() { + return parameters; + } + } +} diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentType.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentType.java index b0986d603ef23..1c745f591e38b 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentType.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentType.java @@ -38,12 +38,12 @@ import org.opensearch.common.xcontent.yaml.YamlXContent; import java.util.Locale; -import java.util.Objects; +import java.util.Map; /** * The content type of {@link org.opensearch.common.xcontent.XContent}. */ -public enum XContentType { +public enum XContentType implements MediaType { /** * A JSON based content type. @@ -60,7 +60,7 @@ public String mediaType() { } @Override - public String shortName() { + public String subtype() { return "json"; } @@ -79,7 +79,7 @@ public String mediaTypeWithoutParameters() { } @Override - public String shortName() { + public String subtype() { return "smile"; } @@ -98,7 +98,7 @@ public String mediaTypeWithoutParameters() { } @Override - public String shortName() { + public String subtype() { return "yaml"; } @@ -117,7 +117,7 @@ public String mediaTypeWithoutParameters() { } @Override - public String shortName() { + public String subtype() { return "cbor"; } @@ -127,34 +127,42 @@ public XContent xContent() { } }; + /** a parser of media types */ + private static final MediaTypeParser MEDIA_TYPE_PARSER = new MediaTypeParser<>( + XContentType.values(), + Map.of("application/*", JSON, "application/x-ndjson", JSON) + ); + + /** gets the {@link MediaTypeParser} singleton for use outside class */ + @SuppressWarnings("rawtypes") + public static MediaTypeParser getMediaTypeParser() { + return MEDIA_TYPE_PARSER; + } + /** - * Accepts either a format string, which is equivalent to {@link XContentType#shortName()} or a media type that optionally has - * parameters and attempts to match the value to an {@link XContentType}. The comparisons are done in lower case format and this method - * also supports a wildcard accept for {@code application/*}. This method can be used to parse the {@code Accept} HTTP header or a - * format query string parameter. This method will return {@code null} if no match is found + * Accepts a format string, which is most of the time is equivalent to {@link XContentType#subtype()} + * and attempts to match the value to an {@link XContentType}. + * The comparisons are done in lower case format. + * This method will return {@code null} if no match is found */ - public static XContentType fromMediaTypeOrFormat(String mediaType) { - if (mediaType == null) { - return null; - } - - mediaType = removeVersionInMediaType(mediaType); - for (XContentType type : values()) { - if (isSameMediaTypeOrFormatAs(mediaType, type)) { - return type; - } - } - final String lowercaseMediaType = mediaType.toLowerCase(Locale.ROOT); - if (lowercaseMediaType.startsWith("application/*")) { - return JSON; - } + public static XContentType fromFormat(String mediaType) { + return MEDIA_TYPE_PARSER.fromFormat(mediaType); + } - return null; + /** + * Attempts to match the given media type with the known {@link XContentType} values. This match is done in a case-insensitive manner. + * The provided media type can optionally has parameters. + * This method is suitable for parsing of the {@code Content-Type} and {@code Accept} HTTP headers. + * This method will return {@code null} if no match is found + */ + public static XContentType fromMediaType(String mediaTypeHeaderValue) { + mediaTypeHeaderValue = removeVersionInMediaType(mediaTypeHeaderValue); + return MEDIA_TYPE_PARSER.fromMediaType(mediaTypeHeaderValue); } /** * Clients compatible with ES 7.x might start sending media types with versioned media type - * in a form of application/vnd.opensearch+json;compatible-with=7. + * in a form of application/vnd.elasticsearch+json;compatible-with=7. * This has to be removed in order to be used in 7.x server. * The same client connecting using that media type will be able to communicate with ES 8 thanks to compatible API. * @param mediaType - a media type used on Content-Type header, might contain versioned media type. @@ -162,38 +170,12 @@ public static XContentType fromMediaTypeOrFormat(String mediaType) { * @return a media type string without */ private static String removeVersionInMediaType(String mediaType) { - if (mediaType.contains("vnd.opensearch")) { + if (mediaType != null && (mediaType = mediaType.toLowerCase(Locale.ROOT)).contains("vnd.opensearch")) { return mediaType.replaceAll("vnd.opensearch\\+", "").replaceAll("\\s*;\\s*compatible-with=\\d+", ""); } return mediaType; } - /** - * Attempts to match the given media type with the known {@link XContentType} values. This match is done in a case-insensitive manner. - * The provided media type should not include any parameters. This method is suitable for parsing part of the {@code Content-Type} - * HTTP header. This method will return {@code null} if no match is found - */ - public static XContentType fromMediaType(String mediaType) { - final String lowercaseMediaType = Objects.requireNonNull(mediaType, "mediaType cannot be null").toLowerCase(Locale.ROOT); - for (XContentType type : values()) { - if (type.mediaTypeWithoutParameters().equals(lowercaseMediaType)) { - return type; - } - } - // we also support newline delimited JSON: http://specs.okfnlabs.org/ndjson/ - if (lowercaseMediaType.toLowerCase(Locale.ROOT).equals("application/x-ndjson")) { - return XContentType.JSON; - } - - return null; - } - - private static boolean isSameMediaTypeOrFormatAs(String stringType, XContentType type) { - return type.mediaTypeWithoutParameters().equalsIgnoreCase(stringType) - || stringType.toLowerCase(Locale.ROOT).startsWith(type.mediaTypeWithoutParameters().toLowerCase(Locale.ROOT) + ";") - || type.shortName().equalsIgnoreCase(stringType); - } - private int index; XContentType(int index) { @@ -208,10 +190,17 @@ public String mediaType() { return mediaTypeWithoutParameters(); } - public abstract String shortName(); - public abstract XContent xContent(); public abstract String mediaTypeWithoutParameters(); + @Override + public String type() { + return "application"; + } + + @Override + public String format() { + return subtype(); + } } diff --git a/libs/x-content/src/test/java/org/opensearch/common/xcontent/MediaTypeParserTests.java b/libs/x-content/src/test/java/org/opensearch/common/xcontent/MediaTypeParserTests.java new file mode 100644 index 0000000000000..06dbd4ebd24dc --- /dev/null +++ b/libs/x-content/src/test/java/org/opensearch/common/xcontent/MediaTypeParserTests.java @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.common.xcontent; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Collections; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class MediaTypeParserTests extends OpenSearchTestCase { + + @SuppressWarnings("unchecked") + MediaTypeParser mediaTypeParser = XContentType.getMediaTypeParser(); + + public void testJsonWithParameters() throws Exception { + String mediaType = "application/json"; + assertThat(mediaTypeParser.parseMediaType(mediaType).getParameters(), equalTo(Collections.emptyMap())); + assertThat(mediaTypeParser.parseMediaType(mediaType + ";").getParameters(), equalTo(Collections.emptyMap())); + assertThat(mediaTypeParser.parseMediaType(mediaType + "; charset=UTF-8").getParameters(), equalTo(Map.of("charset", "utf-8"))); + assertThat( + mediaTypeParser.parseMediaType(mediaType + "; custom=123;charset=UTF-8").getParameters(), + equalTo(Map.of("charset", "utf-8", "custom", "123")) + ); + } + + public void testWhiteSpaceInTypeSubtype() { + String mediaType = " application/json "; + assertThat(mediaTypeParser.parseMediaType(mediaType).getMediaType(), equalTo(XContentType.JSON)); + + assertThat( + mediaTypeParser.parseMediaType(mediaType + "; custom=123; charset=UTF-8").getParameters(), + equalTo(Map.of("charset", "utf-8", "custom", "123")) + ); + assertThat( + mediaTypeParser.parseMediaType(mediaType + "; custom=123;\n charset=UTF-8").getParameters(), + equalTo(Map.of("charset", "utf-8", "custom", "123")) + ); + + mediaType = " application / json "; + assertThat(mediaTypeParser.parseMediaType(mediaType), is(nullValue())); + } + + public void testInvalidParameters() { + String mediaType = "application/json"; + assertThat(mediaTypeParser.parseMediaType(mediaType + "; keyvalueNoEqualsSign"), is(nullValue())); + + assertThat(mediaTypeParser.parseMediaType(mediaType + "; key = value"), is(nullValue())); + assertThat(mediaTypeParser.parseMediaType(mediaType + "; key="), is(nullValue())); + } +} diff --git a/server/src/main/java/org/opensearch/rest/AbstractRestChannel.java b/server/src/main/java/org/opensearch/rest/AbstractRestChannel.java index 536972aa5a5d7..eb5fcccee3868 100644 --- a/server/src/main/java/org/opensearch/rest/AbstractRestChannel.java +++ b/server/src/main/java/org/opensearch/rest/AbstractRestChannel.java @@ -58,6 +58,7 @@ public abstract class AbstractRestChannel implements RestChannel { private final String filterPath; private final boolean pretty; private final boolean human; + private final String acceptHeader; private BytesStreamOutput bytesOut; @@ -71,7 +72,8 @@ public abstract class AbstractRestChannel implements RestChannel { protected AbstractRestChannel(RestRequest request, boolean detailedErrorsEnabled) { this.request = request; this.detailedErrorsEnabled = detailedErrorsEnabled; - this.format = request.param("format", request.header("Accept")); + this.format = request.param("format"); + this.acceptHeader = request.header("Accept"); this.filterPath = request.param("filter_path", null); this.pretty = request.paramAsBoolean("pretty", false); this.human = request.paramAsBoolean("human", false); @@ -112,7 +114,11 @@ public XContentBuilder newBuilder( boolean useFiltering ) throws IOException { if (responseContentType == null) { - responseContentType = XContentType.fromMediaTypeOrFormat(format); + // TODO should format vs acceptHeader always be the same, do we allow overriding? + responseContentType = XContentType.fromFormat(format); + if (responseContentType == null) { + responseContentType = XContentType.fromMediaType(acceptHeader); + } } // try to determine the response content type from the media type or the format query string parameter, with the format parameter // taking precedence over the Accept header diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestTable.java b/server/src/main/java/org/opensearch/rest/action/cat/RestTable.java index 69977cb73645a..542c428901475 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestTable.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestTable.java @@ -64,13 +64,20 @@ public class RestTable { public static RestResponse buildResponse(Table table, RestChannel channel) throws Exception { RestRequest request = channel.request(); - XContentType xContentType = XContentType.fromMediaTypeOrFormat(request.param("format", request.header("Accept"))); + XContentType xContentType = getXContentType(request); if (xContentType != null) { return buildXContentBuilder(table, channel); } return buildTextPlainResponse(table, channel); } + private static XContentType getXContentType(RestRequest request) { + if (request.hasParam("format")) { + return XContentType.fromFormat(request.param("format")); + } + return XContentType.fromMediaType(request.header("Accept")); + } + public static RestResponse buildXContentBuilder(Table table, RestChannel channel) throws Exception { RestRequest request = channel.request(); XContentBuilder builder = channel.newBuilder(); diff --git a/server/src/test/java/org/opensearch/common/xcontent/XContentTypeTests.java b/server/src/test/java/org/opensearch/common/xcontent/XContentTypeTests.java index 4d4fea5a41b82..978db14225f00 100644 --- a/server/src/test/java/org/opensearch/common/xcontent/XContentTypeTests.java +++ b/server/src/test/java/org/opensearch/common/xcontent/XContentTypeTests.java @@ -43,72 +43,80 @@ public class XContentTypeTests extends OpenSearchTestCase { public void testFromJson() throws Exception { String mediaType = "application/json"; XContentType expectedXContentType = XContentType.JSON; - assertThat(XContentType.fromMediaTypeOrFormat(mediaType), equalTo(expectedXContentType)); - assertThat(XContentType.fromMediaTypeOrFormat(mediaType + ";"), equalTo(expectedXContentType)); - assertThat(XContentType.fromMediaTypeOrFormat(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType)); + } + + public void testFromNdJson() throws Exception { + String mediaType = "application/x-ndjson"; + XContentType expectedXContentType = XContentType.JSON; + assertThat(XContentType.fromMediaType(mediaType), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType)); } public void testFromJsonUppercase() throws Exception { String mediaType = "application/json".toUpperCase(Locale.ROOT); XContentType expectedXContentType = XContentType.JSON; - assertThat(XContentType.fromMediaTypeOrFormat(mediaType), equalTo(expectedXContentType)); - assertThat(XContentType.fromMediaTypeOrFormat(mediaType + ";"), equalTo(expectedXContentType)); - assertThat(XContentType.fromMediaTypeOrFormat(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType)); } public void testFromYaml() throws Exception { String mediaType = "application/yaml"; XContentType expectedXContentType = XContentType.YAML; - assertThat(XContentType.fromMediaTypeOrFormat(mediaType), equalTo(expectedXContentType)); - assertThat(XContentType.fromMediaTypeOrFormat(mediaType + ";"), equalTo(expectedXContentType)); - assertThat(XContentType.fromMediaTypeOrFormat(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType)); } public void testFromSmile() throws Exception { String mediaType = "application/smile"; XContentType expectedXContentType = XContentType.SMILE; - assertThat(XContentType.fromMediaTypeOrFormat(mediaType), equalTo(expectedXContentType)); - assertThat(XContentType.fromMediaTypeOrFormat(mediaType + ";"), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType)); } public void testFromCbor() throws Exception { String mediaType = "application/cbor"; XContentType expectedXContentType = XContentType.CBOR; - assertThat(XContentType.fromMediaTypeOrFormat(mediaType), equalTo(expectedXContentType)); - assertThat(XContentType.fromMediaTypeOrFormat(mediaType + ";"), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType)); } public void testFromWildcard() throws Exception { String mediaType = "application/*"; XContentType expectedXContentType = XContentType.JSON; - assertThat(XContentType.fromMediaTypeOrFormat(mediaType), equalTo(expectedXContentType)); - assertThat(XContentType.fromMediaTypeOrFormat(mediaType + ";"), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType)); } public void testFromWildcardUppercase() throws Exception { String mediaType = "APPLICATION/*"; XContentType expectedXContentType = XContentType.JSON; - assertThat(XContentType.fromMediaTypeOrFormat(mediaType), equalTo(expectedXContentType)); - assertThat(XContentType.fromMediaTypeOrFormat(mediaType + ";"), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType), equalTo(expectedXContentType)); + assertThat(XContentType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType)); } public void testFromRubbish() throws Exception { - assertThat(XContentType.fromMediaTypeOrFormat(null), nullValue()); - assertThat(XContentType.fromMediaTypeOrFormat(""), nullValue()); - assertThat(XContentType.fromMediaTypeOrFormat("text/plain"), nullValue()); - assertThat(XContentType.fromMediaTypeOrFormat("gobbly;goop"), nullValue()); + assertThat(XContentType.fromMediaType(null), nullValue()); + assertThat(XContentType.fromMediaType(""), nullValue()); + assertThat(XContentType.fromMediaType("text/plain"), nullValue()); + assertThat(XContentType.fromMediaType("gobbly;goop"), nullValue()); } public void testVersionedMediaType() throws Exception { - assertThat(XContentType.fromMediaTypeOrFormat("application/vnd.opensearch+json;compatible-with=7"), equalTo(XContentType.JSON)); - assertThat(XContentType.fromMediaTypeOrFormat("application/vnd.opensearch+yaml;compatible-with=7"), equalTo(XContentType.YAML)); - assertThat(XContentType.fromMediaTypeOrFormat("application/vnd.opensearch+cbor;compatible-with=7"), equalTo(XContentType.CBOR)); - assertThat(XContentType.fromMediaTypeOrFormat("application/vnd.opensearch+smile;compatible-with=7"), equalTo(XContentType.SMILE)); - - assertThat(XContentType.fromMediaTypeOrFormat("application/vnd.opensearch+json ;compatible-with=7"), equalTo(XContentType.JSON)); - assertThat( - XContentType.fromMediaTypeOrFormat("application/vnd.opensearch+json ;compatible-with=7;charset=utf-8"), - equalTo(XContentType.JSON) - ); + assertThat(XContentType.fromMediaType("application/vnd.opensearch+json;compatible-with=7"), equalTo(XContentType.JSON)); + assertThat(XContentType.fromMediaType("application/vnd.opensearch+yaml;compatible-with=7"), equalTo(XContentType.YAML)); + assertThat(XContentType.fromMediaType("application/vnd.opensearch+cbor;compatible-with=7"), equalTo(XContentType.CBOR)); + assertThat(XContentType.fromMediaType("application/vnd.opensearch+smile;compatible-with=7"), equalTo(XContentType.SMILE)); + + assertThat(XContentType.fromMediaType("application/vnd.opensearch+json ;compatible-with=7"), equalTo(XContentType.JSON)); + + String mthv = "application/vnd.opensearch+json ;compatible-with=7;charset=utf-8"; + assertThat(XContentType.fromMediaType(mthv), equalTo(XContentType.JSON)); + assertThat(XContentType.fromMediaType(mthv.toUpperCase(Locale.ROOT)), equalTo(XContentType.JSON)); } } diff --git a/server/src/test/java/org/opensearch/rest/BytesRestResponseTests.java b/server/src/test/java/org/opensearch/rest/BytesRestResponseTests.java index 4e6d9b25409a8..1ea7f006cf482 100644 --- a/server/src/test/java/org/opensearch/rest/BytesRestResponseTests.java +++ b/server/src/test/java/org/opensearch/rest/BytesRestResponseTests.java @@ -325,7 +325,7 @@ public void testErrorToAndFromXContent() throws IOException { final XContentType xContentType = randomFrom(XContentType.values()); - Map params = Collections.singletonMap("format", xContentType.mediaType()); + Map params = Collections.singletonMap("format", xContentType.format()); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params).build(); RestChannel channel = detailed ? new DetailedExceptionRestChannel(request) : new SimpleExceptionRestChannel(request); diff --git a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java index 9624a9d3d0554..f2b68b6fdaca0 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java @@ -136,7 +136,7 @@ public abstract class OpenSearchRestTestCase extends OpenSearchTestCase { * Convert the entity from a {@link Response} into a map of maps. */ public static Map entityAsMap(Response response) throws IOException { - XContentType xContentType = XContentType.fromMediaTypeOrFormat(response.getEntity().getContentType().getValue()); + XContentType xContentType = XContentType.fromMediaType(response.getEntity().getContentType().getValue()); // EMPTY and THROW are fine here because `.map` doesn't use named x content or deprecation try ( XContentParser parser = xContentType.xContent() @@ -154,7 +154,7 @@ public static Map entityAsMap(Response response) throws IOExcept * Convert the entity from a {@link Response} into a list of maps. */ public static List entityAsList(Response response) throws IOException { - XContentType xContentType = XContentType.fromMediaTypeOrFormat(response.getEntity().getContentType().getValue()); + XContentType xContentType = XContentType.fromMediaType(response.getEntity().getContentType().getValue()); // EMPTY and THROW are fine here because `.map` doesn't use named x content or deprecation try ( XContentParser parser = xContentType.xContent() @@ -1082,7 +1082,7 @@ protected static Map getAsMap(final String endpoint) throws IOEx } protected static Map responseAsMap(Response response) throws IOException { - XContentType entityContentType = XContentType.fromMediaTypeOrFormat(response.getEntity().getContentType().getValue()); + XContentType entityContentType = XContentType.fromMediaType(response.getEntity().getContentType().getValue()); Map responseEntity = XContentHelper.convertToMap( entityContentType.xContent(), response.getEntity().getContent(), diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java index 4e8799b9a618e..8fc0554e2b31e 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java @@ -66,7 +66,7 @@ public ClientYamlTestResponse(Response response) throws IOException { this.response = response; if (response.getEntity() != null) { String contentType = response.getHeader("Content-Type"); - this.bodyContentType = XContentType.fromMediaTypeOrFormat(contentType); + this.bodyContentType = XContentType.fromMediaType(contentType); try { byte[] bytes = EntityUtils.toByteArray(response.getEntity()); // skip parsing if we got text back (e.g. if we called _cat apis) diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ObjectPath.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ObjectPath.java index 0ff1b36d1f5ae..473511825ef60 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ObjectPath.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ObjectPath.java @@ -57,7 +57,7 @@ public class ObjectPath { public static ObjectPath createFromResponse(Response response) throws IOException { byte[] bytes = EntityUtils.toByteArray(response.getEntity()); String contentType = response.getHeader("Content-Type"); - XContentType xContentType = XContentType.fromMediaTypeOrFormat(contentType); + XContentType xContentType = XContentType.fromMediaType(contentType); return ObjectPath.createFromXContent(xContentType.xContent(), new BytesArray(bytes)); } From 017773c62d345edac607b4474bac99f200c560f8 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Thu, 28 Apr 2022 10:57:49 -0700 Subject: [PATCH 132/653] Rename BecomeMasterTask to BecomeClusterManagerTask in JoinTaskExecutor (#3099) Signed-off-by: Tianli Feng --- .../coordination/JoinTaskExecutor.java | 19 ++++++++++++++++++- .../coordination/JoinTaskExecutorTests.java | 11 +++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index b8f7dfd116b7e..f0edeeb9319c5 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -94,14 +94,19 @@ public String toString() { } public boolean isBecomeMasterTask() { - return reason.equals(BECOME_MASTER_TASK_REASON); + return reason.equals(BECOME_MASTER_TASK_REASON) || reason.equals(BECOME_CLUSTER_MANAGER_TASK_REASON); } public boolean isFinishElectionTask() { return reason.equals(FINISH_ELECTION_TASK_REASON); } + /** + * @deprecated As of 2.0, because supporting inclusive language, replaced by {@link #BECOME_CLUSTER_MANAGER_TASK_REASON} + */ + @Deprecated private static final String BECOME_MASTER_TASK_REASON = "_BECOME_MASTER_TASK_"; + private static final String BECOME_CLUSTER_MANAGER_TASK_REASON = "_BECOME_CLUSTER_MANAGER_TASK_"; private static final String FINISH_ELECTION_TASK_REASON = "_FINISH_ELECTION_"; } @@ -331,10 +336,22 @@ public boolean runOnlyOnMaster() { return false; } + /** + * a task indicates that the current node should become master + * @deprecated As of 2.0, because supporting inclusive language, replaced by {@link #newBecomeClusterManagerTask()} + */ + @Deprecated public static Task newBecomeMasterTask() { return new Task(null, Task.BECOME_MASTER_TASK_REASON); } + /** + * a task indicates that the current node should become cluster-manager + */ + public static Task newBecomeClusterManagerTask() { + return new Task(null, Task.BECOME_CLUSTER_MANAGER_TASK_REASON); + } + /** * a task that is used to signal the election is stopped and we should process pending joins. * it may be used in combination with {@link JoinTaskExecutor#newBecomeMasterTask()} diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index 49ef48cd1e9c6..6bd2d1e70033a 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -50,6 +50,7 @@ import java.util.HashSet; +import static org.hamcrest.Matchers.is; import static org.opensearch.test.VersionUtils.allVersions; import static org.opensearch.test.VersionUtils.maxCompatibleVersion; import static org.opensearch.test.VersionUtils.randomCompatibleVersion; @@ -198,4 +199,14 @@ public void testUpdatesNodeWithNewRoles() throws Exception { assertThat(result.resultingState.getNodes().get(actualNode.getId()).getRoles(), equalTo(actualNode.getRoles())); } + + /** + * Validate isBecomeMasterTask() can identify "become cluster manager task" properly + */ + public void testIsBecomeClusterManagerTask() { + JoinTaskExecutor.Task joinTaskOfMaster = JoinTaskExecutor.newBecomeMasterTask(); + assertThat(joinTaskOfMaster.isBecomeMasterTask(), is(true)); + JoinTaskExecutor.Task joinTaskOfClusterManager = JoinTaskExecutor.newBecomeClusterManagerTask(); + assertThat(joinTaskOfClusterManager.isBecomeMasterTask(), is(true)); + } } From 8a19ccc851cd62f2647b9bd34820518e8d2ad8c1 Mon Sep 17 00:00:00 2001 From: Laurent Arnoud Date: Fri, 29 Apr 2022 19:28:34 +0000 Subject: [PATCH 133/653] Build: do not ship jdk when no-jdk option set (#3039) When using package distribution ``` ./gradlew :distribution:packages:no-jdk-deb:assemble ``` When `true` this include jdk switch the boolean to the correct value fix https://github.com/opensearch-project/OpenSearch/issues/3024 Signed-off-by: Laurent Arnoud --- distribution/packages/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 93a82ff324835..cd0cf6b9db64d 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -355,7 +355,7 @@ tasks.register('buildDeb', Deb) { } tasks.register('buildNoJdkDeb', Deb) { - configure(commonDebConfig(true, 'x64')) + configure(commonDebConfig(false, 'x64')) } Closure commonRpmConfig(boolean jdk, String architecture) { From 4fef5a3e8c98c7a1ba3bbe4ae06c81a582e035d6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 May 2022 10:22:57 -0700 Subject: [PATCH 134/653] Bump commons-cli from 1.2 to 1.5.0 in /plugins/repository-hdfs (#3125) * Bump commons-cli from 1.2 to 1.5.0 in /plugins/repository-hdfs Bumps commons-cli from 1.2 to 1.5.0. --- updated-dependencies: - dependency-name: commons-cli:commons-cli dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/commons-cli-1.2.jar.sha1 | 1 - plugins/repository-hdfs/licenses/commons-cli-1.5.0.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/commons-cli-1.2.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/commons-cli-1.5.0.jar.sha1 diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 0a1e0bde3af2f..9789bde9cd489 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -69,7 +69,7 @@ dependencies { runtimeOnly 'com.google.guava:guava:31.1-jre' api 'com.google.protobuf:protobuf-java:3.20.1' api "commons-logging:commons-logging:${versions.commonslogging}" - api 'commons-cli:commons-cli:1.2' + api 'commons-cli:commons-cli:1.5.0' api "commons-codec:commons-codec:${versions.commonscodec}" api 'commons-collections:commons-collections:3.2.2' api 'org.apache.commons:commons-compress:1.21' diff --git a/plugins/repository-hdfs/licenses/commons-cli-1.2.jar.sha1 b/plugins/repository-hdfs/licenses/commons-cli-1.2.jar.sha1 deleted file mode 100644 index d38d00127e8cd..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-cli-1.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2bf96b7aa8b611c177d329452af1dc933e14501c \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-cli-1.5.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-cli-1.5.0.jar.sha1 new file mode 100644 index 0000000000000..8f9e064eda2d0 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-cli-1.5.0.jar.sha1 @@ -0,0 +1 @@ +dc98be5d5390230684a092589d70ea76a147925c \ No newline at end of file From 87e065ed1ede119598b88e4a85cecbd23c96ee82 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 May 2022 10:23:56 -0700 Subject: [PATCH 135/653] Bump wiremock-jre8-standalone from 2.32.0 to 2.33.2 in /buildSrc (#3124) Bumps [wiremock-jre8-standalone](https://github.com/wiremock/wiremock) from 2.32.0 to 2.33.2. - [Release notes](https://github.com/wiremock/wiremock/releases) - [Commits](https://github.com/wiremock/wiremock/compare/2.32.0...2.33.2) --- updated-dependencies: - dependency-name: com.github.tomakehurst:wiremock-jre8-standalone dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- buildSrc/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index a8b97a110d19a..6ae4512415345 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -122,7 +122,7 @@ dependencies { testFixturesApi "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${props.getProperty('randomizedrunner')}" testFixturesApi gradleApi() testFixturesApi gradleTestKit() - testImplementation 'com.github.tomakehurst:wiremock-jre8-standalone:2.32.0' + testImplementation 'com.github.tomakehurst:wiremock-jre8-standalone:2.33.2' testImplementation "org.mockito:mockito-core:${props.getProperty('mockito')}" integTestImplementation('org.spockframework:spock-core:2.1-groovy-3.0') { exclude module: "groovy" From 783ba47e8c223132c8205ecb6bce110c21422b1d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 May 2022 10:24:23 -0700 Subject: [PATCH 136/653] Bump azure-storage-blob from 12.15.0 to 12.16.0 in /plugins/repository-azure (#3123) * Bump azure-storage-blob in /plugins/repository-azure Bumps [azure-storage-blob](https://github.com/Azure/azure-sdk-for-java) from 12.15.0 to 12.16.0. - [Release notes](https://github.com/Azure/azure-sdk-for-java/releases) - [Commits](https://github.com/Azure/azure-sdk-for-java/compare/azure-storage-blob_12.15.0...azure-storage-blob_12.16.0) --- updated-dependencies: - dependency-name: com.azure:azure-storage-blob dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/repository-azure/build.gradle | 2 +- .../licenses/azure-storage-blob-12.15.0.jar.sha1 | 1 - .../licenses/azure-storage-blob-12.16.0.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-azure/licenses/azure-storage-blob-12.15.0.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-storage-blob-12.16.0.jar.sha1 diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index d5bbd23325cd0..661fce5a2c003 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -54,7 +54,7 @@ dependencies { api "io.netty:netty-resolver-dns:${versions.netty}" api "io.netty:netty-transport-native-unix-common:${versions.netty}" implementation project(':modules:transport-netty4') - api 'com.azure:azure-storage-blob:12.15.0' + api 'com.azure:azure-storage-blob:12.16.0' api 'org.reactivestreams:reactive-streams:1.0.3' api 'io.projectreactor:reactor-core:3.4.17' api 'io.projectreactor.netty:reactor-netty:1.0.17' diff --git a/plugins/repository-azure/licenses/azure-storage-blob-12.15.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-blob-12.15.0.jar.sha1 deleted file mode 100644 index 513cb017f798d..0000000000000 --- a/plugins/repository-azure/licenses/azure-storage-blob-12.15.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a53a6bdf7564f4e3a7b0b93cd96b7f5f95c03d36 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-blob-12.16.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-blob-12.16.0.jar.sha1 new file mode 100644 index 0000000000000..349d190bbbac3 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-storage-blob-12.16.0.jar.sha1 @@ -0,0 +1 @@ +74b92065815f7affb0cd7897b683369b9ed982fd \ No newline at end of file From 3e22e0938481667af9c4358db44d7b8ea3576d4d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 May 2022 10:24:53 -0700 Subject: [PATCH 137/653] Bump morfologik-fsa from 2.1.8 to 2.1.9 in /plugins/analysis-ukrainian (#3122) * Bump morfologik-fsa from 2.1.8 to 2.1.9 in /plugins/analysis-ukrainian Bumps [morfologik-fsa](https://github.com/morfologik/morfologik-stemming) from 2.1.8 to 2.1.9. - [Release notes](https://github.com/morfologik/morfologik-stemming/releases) - [Changelog](https://github.com/morfologik/morfologik-stemming/blob/master/CHANGES.txt) - [Commits](https://github.com/morfologik/morfologik-stemming/compare/2.1.8...2.1.9) --- updated-dependencies: - dependency-name: org.carrot2:morfologik-fsa dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/analysis-ukrainian/build.gradle | 2 +- .../analysis-ukrainian/licenses/morfologik-fsa-2.1.8.jar.sha1 | 1 - .../analysis-ukrainian/licenses/morfologik-fsa-2.1.9.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/analysis-ukrainian/licenses/morfologik-fsa-2.1.8.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/morfologik-fsa-2.1.9.jar.sha1 diff --git a/plugins/analysis-ukrainian/build.gradle b/plugins/analysis-ukrainian/build.gradle index 386452fcf8aeb..88cf900967db2 100644 --- a/plugins/analysis-ukrainian/build.gradle +++ b/plugins/analysis-ukrainian/build.gradle @@ -37,7 +37,7 @@ opensearchplugin { dependencies { api "org.apache.lucene:lucene-analysis-morfologik:${versions.lucene}" api "org.carrot2:morfologik-stemming:2.1.8" - api "org.carrot2:morfologik-fsa:2.1.8" + api "org.carrot2:morfologik-fsa:2.1.9" api "ua.net.nlp:morfologik-ukrainian-search:4.9.1" } diff --git a/plugins/analysis-ukrainian/licenses/morfologik-fsa-2.1.8.jar.sha1 b/plugins/analysis-ukrainian/licenses/morfologik-fsa-2.1.8.jar.sha1 deleted file mode 100644 index 0b81b8051a3ba..0000000000000 --- a/plugins/analysis-ukrainian/licenses/morfologik-fsa-2.1.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -68e23e2c57fe5699d511b3a7a2f202f90020e214 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/morfologik-fsa-2.1.9.jar.sha1 b/plugins/analysis-ukrainian/licenses/morfologik-fsa-2.1.9.jar.sha1 new file mode 100644 index 0000000000000..117ce2e14c3a2 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/morfologik-fsa-2.1.9.jar.sha1 @@ -0,0 +1 @@ +a12005eacfbbbe256fd4d41f80f6e3675d7e314e \ No newline at end of file From 2c2c475641699fb64d36bb076295fa2e378e4b29 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 May 2022 10:25:21 -0700 Subject: [PATCH 138/653] Bump com.diffplug.spotless from 6.4.2 to 6.5.1 (#3121) Bumps com.diffplug.spotless from 6.4.2 to 6.5.1. --- updated-dependencies: - dependency-name: com.diffplug.spotless dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index ae2247bb865d1..6209aeff64492 100644 --- a/build.gradle +++ b/build.gradle @@ -48,7 +48,7 @@ plugins { id 'lifecycle-base' id 'opensearch.docker-support' id 'opensearch.global-build-info' - id "com.diffplug.spotless" version "6.4.2" apply false + id "com.diffplug.spotless" version "6.5.1" apply false id "org.gradle.test-retry" version "1.3.2" apply false id "test-report-aggregation" id 'jacoco-report-aggregation' From e12b6b75f41abe3dd52efce321a4407d0c50967f Mon Sep 17 00:00:00 2001 From: "Daniel Doubrovkine (dB.)" Date: Mon, 2 May 2022 14:20:44 -0400 Subject: [PATCH 139/653] Sync maintainers with actual permissions. (#3127) Signed-off-by: dblock --- MAINTAINERS.md | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 7aa77cad0e713..ed20b6f5ab314 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -1,4 +1,7 @@ -## Maintainers +- [Current Maintainers](#current-maintainers) +- [Emeritus](#emeritus) + +## Current Maintainers | Maintainer | GitHub ID | Affiliation | | --------------- | --------- | ----------- | @@ -8,12 +11,10 @@ | Andriy Redko | [reta](https://github.com/reta) | Aiven | | Charlotte Henkle | [CEHENKLE](https://github.com/CEHENKLE) | Amazon | | Daniel "dB." Doubrovkine | [dblock](https://github.com/dblock) | Amazon | -| Gopala Krishna Ambareesh | [krishna-ggk](https://github.com/krishna-ggk) |Amazon | | Himanshu Setia | [setiah](https://github.com/setiah) | Amazon | -| Itiyama Sadana | [itiyamas](https://github.com/itiyamas) | Amazon | | Kartik Ganesh | [kartg](https://github.com/kartg) | Amazon | +| Kunal Kotwani | [kotwanikunal](https://github.com/kotwanikunal) | Amazon | | Marc Handalian | [mch2](https://github.com/mch2) | Amazon | -| Megha Sai Kavikondala | [meghasaik](https://github.com/meghasaik) | Amazon | | Nick Knize | [nknize](https://github.com/nknize) | Amazon | | Owais Kazi | [owaiskazi19](https://github.com/owaiskazi19) | Amazon | | Rabi Panda | [adnapibar](https://github.com/adnapibar) | Amazon | @@ -23,7 +24,11 @@ | Shweta Thareja |[shwetathareja](https://github.com/shwetathareja) | Amazon | | Tianli Feng | [tlfeng](https://github.com/tlfeng) | Amazon | | Vacha Shah | [VachaShah](https://github.com/VachaShah) | Amazon | -| Vengadanathan Srinivasan | [vengadanathan-s](https://github.com/vengadanathan-s) | Amazon | | Xue Zhou | [xuezhou25](https://github.com/xuezhou25) | Amazon | +## Emeritus + +| Maintainer | GitHub ID | Affiliation | +| Megha Sai Kavikondala | [meghasaik](https://github.com/meghasaik) | Amazon | + [This document](https://github.com/opensearch-project/.github/blob/main/MAINTAINERS.md) explains what maintainers do in this repo, and how they should be doing it. If you're interested in contributing, see [CONTRIBUTING](CONTRIBUTING.md). From 21468279b2731aea5556f290c6e4fca7ea0e291e Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Mon, 2 May 2022 14:30:53 -0500 Subject: [PATCH 140/653] [Javadoc] Add missing package-info.java files to server (#3128) In preparation for re-enabling the missingJavadoc gradle task this change adds in the missing package-info.java files to the server folder. For now general javadocs are added to these files with the intent to clean up with better descriptions over time. Signed-off-by: Nicholas Walter Knize --- gradle/missing-javadoc.gradle | 13 ++++++++++++- .../admin/cluster/allocation/package-info.java | 10 ++++++++++ .../admin/cluster/configuration/package-info.java | 10 ++++++++++ .../action/admin/cluster/health/package-info.java | 10 ++++++++++ .../admin/cluster/node/hotthreads/package-info.java | 10 ++++++++++ .../admin/cluster/node/info/package-info.java | 10 ++++++++++ .../admin/cluster/node/liveness/package-info.java | 10 ++++++++++ .../action/admin/cluster/node/package-info.java | 10 ++++++++++ .../admin/cluster/node/reload/package-info.java | 10 ++++++++++ .../admin/cluster/node/stats/package-info.java | 10 ++++++++++ .../cluster/node/tasks/cancel/package-info.java | 10 ++++++++++ .../admin/cluster/node/tasks/get/package-info.java | 10 ++++++++++ .../admin/cluster/node/tasks/list/package-info.java | 10 ++++++++++ .../admin/cluster/node/tasks/package-info.java | 10 ++++++++++ .../admin/cluster/node/usage/package-info.java | 10 ++++++++++ .../action/admin/cluster/remote/package-info.java | 10 ++++++++++ .../cluster/repositories/cleanup/package-info.java | 10 ++++++++++ .../cluster/repositories/delete/package-info.java | 10 ++++++++++ .../cluster/repositories/get/package-info.java | 10 ++++++++++ .../admin/cluster/repositories/package-info.java | 10 ++++++++++ .../cluster/repositories/put/package-info.java | 10 ++++++++++ .../cluster/repositories/verify/package-info.java | 10 ++++++++++ .../action/admin/cluster/reroute/package-info.java | 10 ++++++++++ .../action/admin/cluster/settings/package-info.java | 10 ++++++++++ .../action/admin/cluster/shards/package-info.java | 10 ++++++++++ .../admin/cluster/snapshots/clone/package-info.java | 10 ++++++++++ .../cluster/snapshots/create/package-info.java | 10 ++++++++++ .../cluster/snapshots/delete/package-info.java | 10 ++++++++++ .../admin/cluster/snapshots/get/package-info.java | 10 ++++++++++ .../admin/cluster/snapshots/package-info.java | 10 ++++++++++ .../cluster/snapshots/restore/package-info.java | 10 ++++++++++ .../cluster/snapshots/status/package-info.java | 10 ++++++++++ .../action/admin/cluster/state/package-info.java | 10 ++++++++++ .../action/admin/cluster/stats/package-info.java | 10 ++++++++++ .../admin/cluster/storedscripts/package-info.java | 10 ++++++++++ .../action/admin/cluster/tasks/package-info.java | 10 ++++++++++ .../admin/indices/alias/exists/package-info.java | 10 ++++++++++ .../admin/indices/alias/get/package-info.java | 10 ++++++++++ .../action/admin/indices/alias/package-info.java | 10 ++++++++++ .../action/admin/indices/analyze/package-info.java | 10 ++++++++++ .../admin/indices/cache/clear/package-info.java | 10 ++++++++++ .../action/admin/indices/cache/package-info.java | 10 ++++++++++ .../action/admin/indices/close/package-info.java | 10 ++++++++++ .../action/admin/indices/create/package-info.java | 6 +++--- .../admin/indices/dangling/delete/package-info.java | 10 ++++++++++ .../admin/indices/dangling/find/package-info.java | 10 ++++++++++ .../indices/dangling/import_index/package-info.java | 10 ++++++++++ .../admin/indices/dangling/list/package-info.java | 10 ++++++++++ .../action/admin/indices/dangling/package-info.java | 9 +++++---- .../admin/indices/datastream/package-info.java | 10 ++++++++++ .../admin/indices/exists/indices/package-info.java | 10 ++++++++++ .../action/admin/indices/exists/package-info.java | 10 ++++++++++ .../admin/indices/exists/types/package-info.java | 13 +++++++++++++ .../action/admin/indices/get/package-info.java | 10 ++++++++++ .../admin/indices/mapping/get/package-info.java | 10 ++++++++++ .../action/admin/indices/open/package-info.java | 10 ++++++++++ .../action/admin/indices/readonly/package-info.java | 10 ++++++++++ .../action/admin/indices/recovery/package-info.java | 10 ++++++++++ .../action/admin/indices/resolve/package-info.java | 10 ++++++++++ .../action/admin/indices/rollover/package-info.java | 10 ++++++++++ .../action/admin/indices/segments/package-info.java | 10 ++++++++++ .../admin/indices/settings/get/package-info.java | 10 ++++++++++ .../action/admin/indices/settings/package-info.java | 10 ++++++++++ .../admin/indices/settings/put/package-info.java | 10 ++++++++++ .../action/admin/indices/shards/package-info.java | 10 ++++++++++ .../action/admin/indices/shrink/package-info.java | 10 ++++++++++ .../action/admin/indices/stats/package-info.java | 10 ++++++++++ .../admin/indices/template/delete/package-info.java | 10 ++++++++++ .../admin/indices/template/get/package-info.java | 10 ++++++++++ .../action/admin/indices/template/package-info.java | 10 ++++++++++ .../admin/indices/template/post/package-info.java | 10 ++++++++++ .../admin/indices/template/put/package-info.java | 10 ++++++++++ .../admin/indices/upgrade/get/package-info.java | 10 ++++++++++ .../action/admin/indices/upgrade/package-info.java | 10 ++++++++++ .../admin/indices/upgrade/post/package-info.java | 10 ++++++++++ .../action/admin/indices/validate/package-info.java | 10 ++++++++++ .../org/opensearch/action/admin/package-info.java | 6 +++--- .../org/opensearch/action/bulk/package-info.java | 10 ++++++++++ .../opensearch/action/fieldcaps/package-info.java | 10 ++++++++++ .../org/opensearch/action/ingest/package-info.java | 10 ++++++++++ .../org/opensearch/action/main/package-info.java | 10 ++++++++++ .../org/opensearch/action/resync/package-info.java | 10 ++++++++++ .../action/support/broadcast/node/package-info.java | 10 ++++++++++ .../action/support/broadcast/package-info.java | 10 ++++++++++ .../action/support/master/info/package-info.java | 10 ++++++++++ .../action/support/master/package-info.java | 10 ++++++++++ .../action/support/nodes/package-info.java | 10 ++++++++++ .../org/opensearch/action/support/package-info.java | 10 ++++++++++ .../action/support/replication/package-info.java | 10 ++++++++++ .../support/single/instance/package-info.java | 10 ++++++++++ .../action/support/single/package-info.java | 10 ++++++++++ .../action/support/single/shard/package-info.java | 10 ++++++++++ .../action/support/tasks/package-info.java | 10 ++++++++++ .../org/opensearch/action/update/package-info.java | 10 ++++++++++ .../java/org/opensearch/bootstrap/package-info.java | 12 ++++++++++++ .../main/java/org/opensearch/cli/package-info.java | 12 ++++++++++++ .../org/opensearch/client/node/package-info.java | 12 ++++++++++++ .../java/org/opensearch/client/package-info.java | 6 +++--- .../org/opensearch/client/support/package-info.java | 12 ++++++++++++ .../opensearch/client/transport/package-info.java | 12 ++++++++++++ .../org/opensearch/cluster/ack/package-info.java | 10 ++++++++++ .../cluster/action/index/package-info.java | 10 ++++++++++ .../org/opensearch/cluster/action/package-info.java | 10 ++++++++++ .../cluster/action/shard/package-info.java | 10 ++++++++++ .../org/opensearch/cluster/block/package-info.java | 10 ++++++++++ .../cluster/coordination/package-info.java | 10 ++++++++++ .../org/opensearch/cluster/health/package-info.java | 10 ++++++++++ .../opensearch/cluster/metadata/package-info.java | 10 ++++++++++ .../org/opensearch/cluster/node/package-info.java | 10 ++++++++++ .../java/org/opensearch/cluster/package-info.java | 10 ++++++++++ .../routing/allocation/allocator/package-info.java | 10 ++++++++++ .../routing/allocation/command/package-info.java | 10 ++++++++++ .../routing/allocation/decider/package-info.java | 10 ++++++++++ .../cluster/routing/allocation/package-info.java | 10 ++++++++++ .../opensearch/cluster/routing/package-info.java | 10 ++++++++++ .../opensearch/cluster/service/package-info.java | 10 ++++++++++ .../common/blobstore/fs/package-info.java | 10 ++++++++++ .../opensearch/common/blobstore/package-info.java | 10 ++++++++++ .../common/blobstore/support/package-info.java | 10 ++++++++++ .../org/opensearch/common/breaker/package-info.java | 10 ++++++++++ .../org/opensearch/common/bytes/package-info.java | 10 ++++++++++ .../org/opensearch/common/cache/package-info.java | 10 ++++++++++ .../org/opensearch/common/collect/package-info.java | 10 ++++++++++ .../opensearch/common/component/package-info.java | 10 ++++++++++ .../opensearch/common/compress/package-info.java | 10 ++++++++++ .../opensearch/common/concurrent/package-info.java | 10 ++++++++++ .../opensearch/common/document/package-info.java | 10 ++++++++++ .../common/geo/builders/package-info.java | 10 ++++++++++ .../org/opensearch/common/geo/package-info.java | 10 ++++++++++ .../opensearch/common/geo/parsers/package-info.java | 10 ++++++++++ .../org/opensearch/common/hash/package-info.java | 10 ++++++++++ .../java/org/opensearch/common/io/package-info.java | 10 ++++++++++ .../org/opensearch/common/joda/package-info.java | 10 ++++++++++ .../org/opensearch/common/lease/package-info.java | 10 ++++++++++ .../org/opensearch/common/logging/package-info.java | 10 ++++++++++ .../common/lucene/index/package-info.java | 10 ++++++++++ .../org/opensearch/common/lucene/package-info.java | 10 ++++++++++ .../common/lucene/search/function/package-info.java | 10 ++++++++++ .../common/lucene/search/package-info.java | 10 ++++++++++ .../common/lucene/store/package-info.java | 10 ++++++++++ .../opensearch/common/lucene/uid/package-info.java | 10 ++++++++++ .../org/opensearch/common/metrics/package-info.java | 10 ++++++++++ .../org/opensearch/common/network/package-info.java | 10 ++++++++++ .../java/org/opensearch/common/package-info.java | 10 ++++++++++ .../org/opensearch/common/path/package-info.java | 10 ++++++++++ .../opensearch/common/recycler/package-info.java | 10 ++++++++++ .../org/opensearch/common/regex/package-info.java | 10 ++++++++++ .../opensearch/common/rounding/package-info.java | 10 ++++++++++ .../org/opensearch/common/text/package-info.java | 10 ++++++++++ .../org/opensearch/common/time/package-info.java | 10 ++++++++++ .../opensearch/common/transport/package-info.java | 10 ++++++++++ .../org/opensearch/common/unit/package-info.java | 10 ++++++++++ .../common/util/concurrent/package-info.java | 10 ++++++++++ .../common/util/iterable/package-info.java | 10 ++++++++++ .../org/opensearch/common/util/package-info.java | 10 ++++++++++ .../opensearch/common/util/set/package-info.java | 10 ++++++++++ .../opensearch/common/xcontent/package-info.java | 10 ++++++++++ .../common/xcontent/support/package-info.java | 10 ++++++++++ .../java/org/opensearch/discovery/package-info.java | 10 ++++++++++ .../main/java/org/opensearch/env/package-info.java | 10 ++++++++++ .../java/org/opensearch/gateway/package-info.java | 10 ++++++++++ .../main/java/org/opensearch/http/package-info.java | 10 ++++++++++ .../org/opensearch/index/analysis/package-info.java | 10 ++++++++++ .../opensearch/index/cache/bitset/package-info.java | 10 ++++++++++ .../org/opensearch/index/cache/package-info.java | 10 ++++++++++ .../opensearch/index/cache/query/package-info.java | 10 ++++++++++ .../index/cache/request/package-info.java | 10 ++++++++++ .../org/opensearch/index/codec/package-info.java | 10 ++++++++++ .../org/opensearch/index/engine/package-info.java | 10 ++++++++++ .../fielddata/fieldcomparator/package-info.java | 10 ++++++++++ .../index/fielddata/ordinals/package-info.java | 10 ++++++++++ .../opensearch/index/fielddata/package-info.java | 10 ++++++++++ .../index/fielddata/plain/package-info.java | 10 ++++++++++ .../opensearch/index/fieldvisitor/package-info.java | 10 ++++++++++ .../org/opensearch/index/flush/package-info.java | 10 ++++++++++ .../java/org/opensearch/index/get/package-info.java | 10 ++++++++++ .../org/opensearch/index/mapper/package-info.java | 10 ++++++++++ .../org/opensearch/index/merge/package-info.java | 10 ++++++++++ .../java/org/opensearch/index/package-info.java | 10 ++++++++++ .../index/query/functionscore/package-info.java | 10 ++++++++++ .../org/opensearch/index/query/package-info.java | 10 ++++++++++ .../index/query/support/package-info.java | 10 ++++++++++ .../org/opensearch/index/recovery/package-info.java | 10 ++++++++++ .../org/opensearch/index/refresh/package-info.java | 10 ++++++++++ .../org/opensearch/index/reindex/package-info.java | 10 ++++++++++ .../org/opensearch/index/search/package-info.java | 10 ++++++++++ .../opensearch/index/search/stats/package-info.java | 10 ++++++++++ .../org/opensearch/index/seqno/package-info.java | 10 ++++++++++ .../org/opensearch/index/shard/package-info.java | 10 ++++++++++ .../opensearch/index/similarity/package-info.java | 10 ++++++++++ .../index/snapshots/blobstore/package-info.java | 10 ++++++++++ .../opensearch/index/snapshots/package-info.java | 10 ++++++++++ .../org/opensearch/index/stats/package-info.java | 10 ++++++++++ .../org/opensearch/index/store/package-info.java | 13 +++++++++++++ .../opensearch/index/termvectors/package-info.java | 10 ++++++++++ .../org/opensearch/index/translog/package-info.java | 10 ++++++++++ .../org/opensearch/index/warmer/package-info.java | 10 ++++++++++ .../opensearch/indices/analysis/package-info.java | 10 ++++++++++ .../opensearch/indices/breaker/package-info.java | 10 ++++++++++ .../opensearch/indices/cluster/package-info.java | 10 ++++++++++ .../indices/fielddata/cache/package-info.java | 10 ++++++++++ .../opensearch/indices/fielddata/package-info.java | 10 ++++++++++ .../org/opensearch/indices/mapper/package-info.java | 10 ++++++++++ .../java/org/opensearch/indices/package-info.java | 10 ++++++++++ .../opensearch/indices/recovery/package-info.java | 10 ++++++++++ .../indices/replication/common/package-info.java | 10 ++++++++++ .../indices/replication/package-info.java | 10 ++++++++++ .../org/opensearch/indices/store/package-info.java | 10 ++++++++++ .../java/org/opensearch/ingest/package-info.java | 10 ++++++++++ .../java/org/opensearch/lucene/package-info.java | 10 ++++++++++ .../org/opensearch/lucene/queries/package-info.java | 10 ++++++++++ .../org/opensearch/monitor/fs/package-info.java | 10 ++++++++++ .../org/opensearch/monitor/jvm/package-info.java | 10 ++++++++++ .../org/opensearch/monitor/os/package-info.java | 10 ++++++++++ .../opensearch/monitor/process/package-info.java | 10 ++++++++++ .../opensearch/persistent/decider/package-info.java | 10 ++++++++++ .../opensearch/repositories/fs/package-info.java | 10 ++++++++++ .../aggregations/bucket/adjacency/package-info.java | 10 ++++++++++ .../aggregations/bucket/composite/package-info.java | 10 ++++++++++ .../aggregations/bucket/filter/package-info.java | 10 ++++++++++ .../aggregations/bucket/geogrid/package-info.java | 10 ++++++++++ .../aggregations/bucket/global/package-info.java | 10 ++++++++++ .../aggregations/bucket/missing/package-info.java | 10 ++++++++++ .../aggregations/bucket/nested/package-info.java | 10 ++++++++++ .../bucket/{package-info => package-info.java} | 0 .../aggregations/bucket/range/package-info.java | 10 ++++++++++ .../aggregations/bucket/sampler/package-info.java | 10 ++++++++++ .../bucket/terms/heuristic/package-info.java | 10 ++++++++++ .../aggregations/bucket/terms/package-info.java | 10 ++++++++++ .../search/aggregations/pipeline/package-info.java | 10 ++++++++++ .../search/aggregations/support/package-info.java | 3 +-- .../aggregations/support/values/package-info.java | 10 ++++++++++ .../org/opensearch/search/builder/package-info.java | 10 ++++++++++ .../opensearch/search/collapse/package-info.java | 10 ++++++++++ .../org/opensearch/search/dfs/package-info.java | 10 ++++++++++ .../opensearch/search/internal/package-info.java | 10 ++++++++++ .../org/opensearch/search/lookup/package-info.java | 10 ++++++++++ .../java/org/opensearch/search/package-info.java | 10 ++++++++++ .../org/opensearch/search/query/package-info.java | 10 ++++++++++ .../org/opensearch/search/rescore/package-info.java | 10 ++++++++++ .../opensearch/search/searchafter/package-info.java | 10 ++++++++++ .../org/opensearch/search/slice/package-info.java | 10 ++++++++++ .../org/opensearch/search/sort/package-info.java | 10 ++++++++++ .../org/opensearch/threadpool/package-info.java | 10 ++++++++++ .../java/org/opensearch/transport/package-info.java | 10 ++++++++++ .../java/org/opensearch/usage/package-info.java | 10 ++++++++++ .../java/org/opensearch/watcher/package-info.java | 10 ++++++++++ 247 files changed, 2443 insertions(+), 16 deletions(-) create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/allocation/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/configuration/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/health/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/info/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/liveness/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/reload/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/stats/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/usage/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/remote/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/repositories/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/reroute/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/settings/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/shards/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/snapshots/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/state/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/stats/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/tasks/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/alias/exists/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/alias/get/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/alias/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/analyze/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/cache/clear/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/cache/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/close/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/dangling/find/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/dangling/list/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/datastream/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/exists/indices/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/exists/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/exists/types/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/get/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/mapping/get/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/open/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/readonly/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/recovery/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/resolve/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/rollover/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/segments/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/settings/get/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/settings/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/settings/put/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/shards/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/shrink/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/stats/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/template/delete/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/template/get/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/template/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/template/post/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/template/put/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/upgrade/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/validate/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/bulk/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/fieldcaps/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/ingest/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/main/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/resync/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/support/broadcast/node/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/support/broadcast/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/support/master/info/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/support/master/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/support/nodes/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/support/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/support/replication/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/support/single/instance/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/support/single/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/support/single/shard/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/support/tasks/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/update/package-info.java create mode 100644 server/src/main/java/org/opensearch/bootstrap/package-info.java create mode 100644 server/src/main/java/org/opensearch/cli/package-info.java create mode 100644 server/src/main/java/org/opensearch/client/node/package-info.java create mode 100644 server/src/main/java/org/opensearch/client/support/package-info.java create mode 100644 server/src/main/java/org/opensearch/client/transport/package-info.java create mode 100644 server/src/main/java/org/opensearch/cluster/ack/package-info.java create mode 100644 server/src/main/java/org/opensearch/cluster/action/index/package-info.java create mode 100644 server/src/main/java/org/opensearch/cluster/action/package-info.java create mode 100644 server/src/main/java/org/opensearch/cluster/action/shard/package-info.java create mode 100644 server/src/main/java/org/opensearch/cluster/block/package-info.java create mode 100644 server/src/main/java/org/opensearch/cluster/coordination/package-info.java create mode 100644 server/src/main/java/org/opensearch/cluster/health/package-info.java create mode 100644 server/src/main/java/org/opensearch/cluster/metadata/package-info.java create mode 100644 server/src/main/java/org/opensearch/cluster/node/package-info.java create mode 100644 server/src/main/java/org/opensearch/cluster/package-info.java create mode 100644 server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/package-info.java create mode 100644 server/src/main/java/org/opensearch/cluster/routing/allocation/command/package-info.java create mode 100644 server/src/main/java/org/opensearch/cluster/routing/allocation/decider/package-info.java create mode 100644 server/src/main/java/org/opensearch/cluster/routing/allocation/package-info.java create mode 100644 server/src/main/java/org/opensearch/cluster/routing/package-info.java create mode 100644 server/src/main/java/org/opensearch/cluster/service/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/blobstore/fs/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/blobstore/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/blobstore/support/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/breaker/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/bytes/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/cache/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/collect/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/component/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/compress/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/concurrent/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/document/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/geo/builders/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/geo/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/geo/parsers/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/hash/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/io/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/joda/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/lease/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/logging/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/lucene/index/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/lucene/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/lucene/search/function/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/lucene/search/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/lucene/store/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/lucene/uid/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/metrics/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/network/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/path/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/recycler/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/regex/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/rounding/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/text/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/time/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/transport/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/unit/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/util/concurrent/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/util/iterable/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/util/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/util/set/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/xcontent/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/xcontent/support/package-info.java create mode 100644 server/src/main/java/org/opensearch/discovery/package-info.java create mode 100644 server/src/main/java/org/opensearch/env/package-info.java create mode 100644 server/src/main/java/org/opensearch/gateway/package-info.java create mode 100644 server/src/main/java/org/opensearch/http/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/analysis/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/cache/bitset/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/cache/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/cache/query/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/cache/request/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/codec/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/engine/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/fielddata/ordinals/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/fielddata/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/fielddata/plain/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/fieldvisitor/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/flush/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/get/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/mapper/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/merge/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/query/functionscore/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/query/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/query/support/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/recovery/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/refresh/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/reindex/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/search/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/search/stats/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/seqno/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/shard/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/similarity/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/snapshots/blobstore/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/snapshots/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/stats/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/store/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/termvectors/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/translog/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/warmer/package-info.java create mode 100644 server/src/main/java/org/opensearch/indices/analysis/package-info.java create mode 100644 server/src/main/java/org/opensearch/indices/breaker/package-info.java create mode 100644 server/src/main/java/org/opensearch/indices/cluster/package-info.java create mode 100644 server/src/main/java/org/opensearch/indices/fielddata/cache/package-info.java create mode 100644 server/src/main/java/org/opensearch/indices/fielddata/package-info.java create mode 100644 server/src/main/java/org/opensearch/indices/mapper/package-info.java create mode 100644 server/src/main/java/org/opensearch/indices/package-info.java create mode 100644 server/src/main/java/org/opensearch/indices/recovery/package-info.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/common/package-info.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/package-info.java create mode 100644 server/src/main/java/org/opensearch/indices/store/package-info.java create mode 100644 server/src/main/java/org/opensearch/ingest/package-info.java create mode 100644 server/src/main/java/org/opensearch/lucene/package-info.java create mode 100644 server/src/main/java/org/opensearch/lucene/queries/package-info.java create mode 100644 server/src/main/java/org/opensearch/monitor/fs/package-info.java create mode 100644 server/src/main/java/org/opensearch/monitor/jvm/package-info.java create mode 100644 server/src/main/java/org/opensearch/monitor/os/package-info.java create mode 100644 server/src/main/java/org/opensearch/monitor/process/package-info.java create mode 100644 server/src/main/java/org/opensearch/persistent/decider/package-info.java create mode 100644 server/src/main/java/org/opensearch/repositories/fs/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/composite/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/filter/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/global/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/missing/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/nested/package-info.java rename server/src/main/java/org/opensearch/search/aggregations/bucket/{package-info => package-info.java} (100%) create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/range/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/terms/heuristic/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/terms/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/pipeline/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/support/values/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/builder/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/collapse/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/dfs/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/internal/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/lookup/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/query/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/rescore/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/searchafter/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/slice/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/sort/package-info.java create mode 100644 server/src/main/java/org/opensearch/threadpool/package-info.java create mode 100644 server/src/main/java/org/opensearch/transport/package-info.java create mode 100644 server/src/main/java/org/opensearch/usage/package-info.java create mode 100644 server/src/main/java/org/opensearch/watcher/package-info.java diff --git a/gradle/missing-javadoc.gradle b/gradle/missing-javadoc.gradle index 05531487f35f3..2dea9a0e11d91 100644 --- a/gradle/missing-javadoc.gradle +++ b/gradle/missing-javadoc.gradle @@ -162,7 +162,6 @@ configure([ project(":qa:os"), project(":qa:wildfly"), project(":rest-api-spec"), - project(":server"), project(":test:external-modules:test-delayed-aggs"), project(":test:fixtures:azure-fixture"), project(":test:fixtures:gcs-fixture"), @@ -177,6 +176,14 @@ configure([ } } +configure(project(":server")) { + project.tasks.withType(MissingJavadocTask) { + isExcluded = true + // TODO: reenable after fixing missing javadocs + // javadocMissingLevel = "class" + } +} + class MissingJavadocTask extends DefaultTask { @InputFiles @SkipWhenEmpty @@ -274,6 +281,10 @@ class MissingJavadocTask extends DefaultTask { if (!classpath.isEmpty()) { opts << [ '-classpath', classpath.asPath ] } + + opts << [ '-tag', 'opensearch.experimental:a:WARNING: This API is experimental and might change in incompatible ways in the next release.' ] + opts << [ '-tag', 'opensearch.internal:a:NOTE: This API is for internal purposes only and might change in incompatible ways in the next release.' ] + opts << [ '-doclet', "org.opensearch.missingdoclet.MissingDoclet" ] opts << [ '-docletpath', docletpath.asPath ] opts << [ '--missing-level', javadocMissingLevel ] diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/package-info.java new file mode 100644 index 0000000000000..ef38d39ede82a --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Cluster allocation transport handlers. */ +package org.opensearch.action.admin.cluster.allocation; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/package-info.java new file mode 100644 index 0000000000000..5543e7a6bde2f --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Cluster configuration transport handlers. */ +package org.opensearch.action.admin.cluster.configuration; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/package-info.java new file mode 100644 index 0000000000000..7512ea5cdaafb --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Cluster Health transport handlers. */ +package org.opensearch.action.admin.cluster.health; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/package-info.java new file mode 100644 index 0000000000000..a7e6b7c897fe5 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Hot threads at node level transport handlers. */ +package org.opensearch.action.admin.cluster.node.hotthreads; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/package-info.java new file mode 100644 index 0000000000000..4203adc8ed2d6 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Node Info transport handlers. */ +package org.opensearch.action.admin.cluster.node.info; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/liveness/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/liveness/package-info.java new file mode 100644 index 0000000000000..97dae1e4a4ef4 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/liveness/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Node liveness transport handlers. */ +package org.opensearch.action.admin.cluster.node.liveness; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/package-info.java new file mode 100644 index 0000000000000..f92091587eb04 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Node level transport handlers. */ +package org.opensearch.action.admin.cluster.node; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/package-info.java new file mode 100644 index 0000000000000..a2ddf57796a4a --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Node reloading secured settings transport handlers. */ +package org.opensearch.action.admin.cluster.node.reload; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/package-info.java new file mode 100644 index 0000000000000..14efa77f066d6 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Node Stats transport handlers. */ +package org.opensearch.action.admin.cluster.node.stats; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/package-info.java new file mode 100644 index 0000000000000..898aa26d16792 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Transport handler for Cancelling Node Tasks */ +package org.opensearch.action.admin.cluster.node.tasks.cancel; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/package-info.java new file mode 100644 index 0000000000000..0eb044c2a29eb --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Transport handlers for getting Node Tasks. */ +package org.opensearch.action.admin.cluster.node.tasks.get; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/package-info.java new file mode 100644 index 0000000000000..df84d7fe19d02 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Transport handlers for listing node tasks. */ +package org.opensearch.action.admin.cluster.node.tasks.list; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/package-info.java new file mode 100644 index 0000000000000..9d809646460f2 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Node tasks transport handlers. */ +package org.opensearch.action.admin.cluster.node.tasks; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/package-info.java new file mode 100644 index 0000000000000..37943b23dbe10 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Node Usage transport handlers. */ +package org.opensearch.action.admin.cluster.node.usage; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remote/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/remote/package-info.java new file mode 100644 index 0000000000000..e5be56ff1d139 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remote/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Remote Node Information transport handlers. */ +package org.opensearch.action.admin.cluster.remote; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/package-info.java new file mode 100644 index 0000000000000..1a9c0f2ce113b --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Transport handler for cleaning up a snapshot repository. */ +package org.opensearch.action.admin.cluster.repositories.cleanup; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/package-info.java new file mode 100644 index 0000000000000..db9d5e7f8ceea --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Transport handler for deleting a snapshot repository. */ +package org.opensearch.action.admin.cluster.repositories.delete; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/package-info.java new file mode 100644 index 0000000000000..9f053ae11d8bc --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Transport handler for getting a snapshot repository. */ +package org.opensearch.action.admin.cluster.repositories.get; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/package-info.java new file mode 100644 index 0000000000000..75eb53cb11748 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Repository transport handlers. */ +package org.opensearch.action.admin.cluster.repositories; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/package-info.java new file mode 100644 index 0000000000000..e5b6305416174 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Transport handlers for putting a new snapshot repository */ +package org.opensearch.action.admin.cluster.repositories.put; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/package-info.java new file mode 100644 index 0000000000000..236e7a6b26369 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Transport handler for verifying a snapshot repository. */ +package org.opensearch.action.admin.cluster.repositories.verify; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/package-info.java new file mode 100644 index 0000000000000..af26371347c0d --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Cluster reroute transport handlers. */ +package org.opensearch.action.admin.cluster.reroute; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/package-info.java new file mode 100644 index 0000000000000..a532b3e9f4158 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Cluster settings transport handlers. */ +package org.opensearch.action.admin.cluster.settings; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/package-info.java new file mode 100644 index 0000000000000..94866d102ac15 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Search Shards transport handlers. */ +package org.opensearch.action.admin.cluster.shards; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/package-info.java new file mode 100644 index 0000000000000..14738fe5e95b4 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Clone Snapshot transport handler. */ +package org.opensearch.action.admin.cluster.snapshots.clone; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/package-info.java new file mode 100644 index 0000000000000..58e1f4f9311ae --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Create Snapshot transport handler. */ +package org.opensearch.action.admin.cluster.snapshots.create; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/package-info.java new file mode 100644 index 0000000000000..5ad5e58ebe1fd --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Delete Snapshot transport handler. */ +package org.opensearch.action.admin.cluster.snapshots.delete; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/package-info.java new file mode 100644 index 0000000000000..a00088759051c --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Get Snapshot transport handler. */ +package org.opensearch.action.admin.cluster.snapshots.get; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/package-info.java new file mode 100644 index 0000000000000..9bfd68dd93178 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Snapshot transport handlers. */ +package org.opensearch.action.admin.cluster.snapshots; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/package-info.java new file mode 100644 index 0000000000000..69db3f77010ff --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Restore Snapshot transport handler. */ +package org.opensearch.action.admin.cluster.snapshots.restore; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/package-info.java new file mode 100644 index 0000000000000..3244beaf97961 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Snapshot Status transport handlers. */ +package org.opensearch.action.admin.cluster.snapshots.status; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/package-info.java new file mode 100644 index 0000000000000..ab935d939e459 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Cluster State transport handler. */ +package org.opensearch.action.admin.cluster.state; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/package-info.java new file mode 100644 index 0000000000000..b10c976bbddf6 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Cluster Stats transport handlers. */ +package org.opensearch.action.admin.cluster.stats; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/package-info.java new file mode 100644 index 0000000000000..8f2196ded7c23 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Stored Scripts transport handlers. */ +package org.opensearch.action.admin.cluster.storedscripts; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/package-info.java new file mode 100644 index 0000000000000..4169a343aaabf --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Pending Cluster Tasks transport handlers. */ +package org.opensearch.action.admin.cluster.tasks; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/exists/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/exists/package-info.java new file mode 100644 index 0000000000000..dd4886e6b3419 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/exists/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Alias Exists transport handler. */ +package org.opensearch.action.admin.indices.alias.exists; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/package-info.java new file mode 100644 index 0000000000000..e4ea33da95743 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Get Alias transport handler. */ +package org.opensearch.action.admin.indices.alias.get; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/package-info.java new file mode 100644 index 0000000000000..3630775073c68 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Alias transport handlers. */ +package org.opensearch.action.admin.indices.alias; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/analyze/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/analyze/package-info.java new file mode 100644 index 0000000000000..2de987da0f204 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/analyze/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Indices analyze transport handler. */ +package org.opensearch.action.admin.indices.analyze; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/package-info.java new file mode 100644 index 0000000000000..41e8437bc15c1 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Clear indices cache transport handler. */ +package org.opensearch.action.admin.indices.cache.clear; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/cache/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/cache/package-info.java new file mode 100644 index 0000000000000..367c663bdc637 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/cache/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Indices Cache transport handlers. */ +package org.opensearch.action.admin.indices.cache; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/close/package-info.java new file mode 100644 index 0000000000000..fb2d419124461 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Close indices transport handlers. */ +package org.opensearch.action.admin.indices.close; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/create/package-info.java index e386817090285..07bdc0eaa5c64 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/package-info.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/package-info.java @@ -25,12 +25,12 @@ * under the License. */ -/** - * Create index action. - */ /* * Modifications Copyright OpenSearch Contributors. See * GitHub history for details. */ +/** + * Create index action. + */ package org.opensearch.action.admin.indices.create; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/package-info.java new file mode 100644 index 0000000000000..15a966b35ab55 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Delete Dangling Indices transport handlers. */ +package org.opensearch.action.admin.indices.dangling.delete; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/package-info.java new file mode 100644 index 0000000000000..d5536d5794f3c --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Find Dangling Indices transport handlers. */ +package org.opensearch.action.admin.indices.dangling.find; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/package-info.java new file mode 100644 index 0000000000000..fce0f5a130c2f --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Import Dangling Index transport handler. */ +package org.opensearch.action.admin.indices.dangling.import_index; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/package-info.java new file mode 100644 index 0000000000000..f6c37345fb73c --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** List Dangling Indices transport handler. */ +package org.opensearch.action.admin.indices.dangling.list; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/package-info.java index 1cfaf7f6de650..e69b6acf995e3 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/package-info.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/package-info.java @@ -25,6 +25,11 @@ * under the License. */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + /** * Dangling indices are indices that exist on disk on one or more nodes but * which do not currently exist in the cluster state. They arise in a @@ -41,9 +46,5 @@ * *

      The classes in this package form an API for managing dangling indices, allowing them to be listed, imported or deleted. */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ package org.opensearch.action.admin.indices.dangling; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/datastream/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/datastream/package-info.java new file mode 100644 index 0000000000000..a37c2bed1adff --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/datastream/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Data Stream transport handlers. */ +package org.opensearch.action.admin.indices.datastream; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/package-info.java new file mode 100644 index 0000000000000..e5b320dc1bdcd --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Index Exists transport handler. */ +package org.opensearch.action.admin.indices.exists.indices; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/package-info.java new file mode 100644 index 0000000000000..0a91ac2fd9a20 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/exists/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Index Exists transport handlers. */ +package org.opensearch.action.admin.indices.exists; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/types/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/types/package-info.java new file mode 100644 index 0000000000000..30bc4569e221a --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/exists/types/package-info.java @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Index Type transport handlers. + * @deprecated types are deprecated and will be removed + **/ +package org.opensearch.action.admin.indices.exists.types; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/get/package-info.java new file mode 100644 index 0000000000000..b438da7ef31dc --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Get Index transport handler. */ +package org.opensearch.action.admin.indices.get; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/package-info.java new file mode 100644 index 0000000000000..fcdd9846640bc --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Get Mapping transport handlers. */ +package org.opensearch.action.admin.indices.mapping.get; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/open/package-info.java new file mode 100644 index 0000000000000..4fdea8fd18769 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Open Index transport handlers. */ +package org.opensearch.action.admin.indices.open; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/package-info.java new file mode 100644 index 0000000000000..d34d3c64eb1ad --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** ReadOnly Index transport handlers. */ +package org.opensearch.action.admin.indices.readonly; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/recovery/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/recovery/package-info.java new file mode 100644 index 0000000000000..51b7dd7997a15 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/recovery/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Index Recovery transport handlers. */ +package org.opensearch.action.admin.indices.recovery; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/resolve/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/resolve/package-info.java new file mode 100644 index 0000000000000..a7732563cb041 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/resolve/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Index Resolve transport handler. */ +package org.opensearch.action.admin.indices.resolve; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/package-info.java new file mode 100644 index 0000000000000..23b5b18e3ae87 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Index Rollover transport handlers. */ +package org.opensearch.action.admin.indices.rollover; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/package-info.java new file mode 100644 index 0000000000000..0bbd0164b8205 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Segment level transport handlers. */ +package org.opensearch.action.admin.indices.segments; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/package-info.java new file mode 100644 index 0000000000000..e767b1a8870e0 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Get Index Settings transport handler. */ +package org.opensearch.action.admin.indices.settings.get; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/package-info.java new file mode 100644 index 0000000000000..862dfad419bd4 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Index Settings transport handlers. */ +package org.opensearch.action.admin.indices.settings; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/package-info.java new file mode 100644 index 0000000000000..dc38136b72d57 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Put Settings transport handler. */ +package org.opensearch.action.admin.indices.settings.put; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/package-info.java new file mode 100644 index 0000000000000..187ffc0714e7e --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Index Shards transport handlers. */ +package org.opensearch.action.admin.indices.shards; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/package-info.java new file mode 100644 index 0000000000000..414095575f0d9 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Shrink Index transport handlers. */ +package org.opensearch.action.admin.indices.shrink; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/package-info.java new file mode 100644 index 0000000000000..38241a0d3bcbb --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Index Stats transport handlers. */ +package org.opensearch.action.admin.indices.stats; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/package-info.java new file mode 100644 index 0000000000000..f331c9dae4a38 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Delete Index Templates transport handlers. */ +package org.opensearch.action.admin.indices.template.delete; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/package-info.java new file mode 100644 index 0000000000000..e7173f4d6caaa --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Get Index Templates transport handlers. */ +package org.opensearch.action.admin.indices.template.get; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/template/package-info.java new file mode 100644 index 0000000000000..066a63f079232 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Index Templates transport handlers. */ +package org.opensearch.action.admin.indices.template; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/post/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/template/post/package-info.java new file mode 100644 index 0000000000000..60b5a1d2abce8 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/post/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Post Index Templates transport handlers. */ +package org.opensearch.action.admin.indices.template.post; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/package-info.java new file mode 100644 index 0000000000000..a0033a434b7b3 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Put Index Templates transport handlers. */ +package org.opensearch.action.admin.indices.template.put; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/package-info.java new file mode 100644 index 0000000000000..58ad55e715d94 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Get Upgrade Indices transport handlers. */ +package org.opensearch.action.admin.indices.upgrade.get; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/package-info.java new file mode 100644 index 0000000000000..ee5633c39fc2f --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Upgrade Indices transport handlers. */ +package org.opensearch.action.admin.indices.upgrade; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/package-info.java new file mode 100644 index 0000000000000..ae8c8839acaba --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Post Upgrade Indices transport handlers. */ +package org.opensearch.action.admin.indices.upgrade.post; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/validate/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/validate/package-info.java new file mode 100644 index 0000000000000..ee210f7495e70 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/validate/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Query Validation transport handlers. */ +package org.opensearch.action.admin.indices.validate; diff --git a/server/src/main/java/org/opensearch/action/admin/package-info.java b/server/src/main/java/org/opensearch/action/admin/package-info.java index 1ee6ebacef3a5..4be3b7cf12e9c 100644 --- a/server/src/main/java/org/opensearch/action/admin/package-info.java +++ b/server/src/main/java/org/opensearch/action/admin/package-info.java @@ -25,12 +25,12 @@ * under the License. */ -/** - * Administrative Actions. - */ /* * Modifications Copyright OpenSearch Contributors. See * GitHub history for details. */ +/** + * Administrative Actions. + */ package org.opensearch.action.admin; diff --git a/server/src/main/java/org/opensearch/action/bulk/package-info.java b/server/src/main/java/org/opensearch/action/bulk/package-info.java new file mode 100644 index 0000000000000..45c42c0efa3f2 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/bulk/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Bulk API transport handlers. */ +package org.opensearch.action.bulk; diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/package-info.java b/server/src/main/java/org/opensearch/action/fieldcaps/package-info.java new file mode 100644 index 0000000000000..c7b7c01875d2b --- /dev/null +++ b/server/src/main/java/org/opensearch/action/fieldcaps/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Field Capabilities transport handlers. */ +package org.opensearch.action.fieldcaps; diff --git a/server/src/main/java/org/opensearch/action/ingest/package-info.java b/server/src/main/java/org/opensearch/action/ingest/package-info.java new file mode 100644 index 0000000000000..42414f2090b41 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/ingest/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Ingest API transport handlers. */ +package org.opensearch.action.ingest; diff --git a/server/src/main/java/org/opensearch/action/main/package-info.java b/server/src/main/java/org/opensearch/action/main/package-info.java new file mode 100644 index 0000000000000..dc08f0a7b6303 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/main/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Main Response transport handlers. */ +package org.opensearch.action.main; diff --git a/server/src/main/java/org/opensearch/action/resync/package-info.java b/server/src/main/java/org/opensearch/action/resync/package-info.java new file mode 100644 index 0000000000000..61c46d85aecda --- /dev/null +++ b/server/src/main/java/org/opensearch/action/resync/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Resync action transport handlers. */ +package org.opensearch.action.resync; diff --git a/server/src/main/java/org/opensearch/action/support/broadcast/node/package-info.java b/server/src/main/java/org/opensearch/action/support/broadcast/node/package-info.java new file mode 100644 index 0000000000000..1d5ebaf244f77 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/broadcast/node/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Node Level Broadcast transport handlers. */ +package org.opensearch.action.support.broadcast.node; diff --git a/server/src/main/java/org/opensearch/action/support/broadcast/package-info.java b/server/src/main/java/org/opensearch/action/support/broadcast/package-info.java new file mode 100644 index 0000000000000..00b5c640fd3a1 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/broadcast/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Broadcast info transport handlers. */ +package org.opensearch.action.support.broadcast; diff --git a/server/src/main/java/org/opensearch/action/support/master/info/package-info.java b/server/src/main/java/org/opensearch/action/support/master/info/package-info.java new file mode 100644 index 0000000000000..6ae2eb5465db5 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/master/info/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Master Node Information transport handlers. */ +package org.opensearch.action.support.master.info; diff --git a/server/src/main/java/org/opensearch/action/support/master/package-info.java b/server/src/main/java/org/opensearch/action/support/master/package-info.java new file mode 100644 index 0000000000000..b0f6f7942b688 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/master/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Master Node transport handlers. */ +package org.opensearch.action.support.master; diff --git a/server/src/main/java/org/opensearch/action/support/nodes/package-info.java b/server/src/main/java/org/opensearch/action/support/nodes/package-info.java new file mode 100644 index 0000000000000..f388527e30d8b --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/nodes/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Node request transport handlers. */ +package org.opensearch.action.support.nodes; diff --git a/server/src/main/java/org/opensearch/action/support/package-info.java b/server/src/main/java/org/opensearch/action/support/package-info.java new file mode 100644 index 0000000000000..58dd4c5190d8b --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Transport handler Support Classes. */ +package org.opensearch.action.support; diff --git a/server/src/main/java/org/opensearch/action/support/replication/package-info.java b/server/src/main/java/org/opensearch/action/support/replication/package-info.java new file mode 100644 index 0000000000000..912cd9197b10f --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/replication/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Replication transport handlers. */ +package org.opensearch.action.support.replication; diff --git a/server/src/main/java/org/opensearch/action/support/single/instance/package-info.java b/server/src/main/java/org/opensearch/action/support/single/instance/package-info.java new file mode 100644 index 0000000000000..1acd5c1503ed0 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/single/instance/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Single Instance transport handlers. */ +package org.opensearch.action.support.single.instance; diff --git a/server/src/main/java/org/opensearch/action/support/single/package-info.java b/server/src/main/java/org/opensearch/action/support/single/package-info.java new file mode 100644 index 0000000000000..918a755936fe3 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/single/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Single instance transport handlers. */ +package org.opensearch.action.support.single; diff --git a/server/src/main/java/org/opensearch/action/support/single/shard/package-info.java b/server/src/main/java/org/opensearch/action/support/single/shard/package-info.java new file mode 100644 index 0000000000000..0d93688ece5f1 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/single/shard/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Single Shard transport handlers. */ +package org.opensearch.action.support.single.shard; diff --git a/server/src/main/java/org/opensearch/action/support/tasks/package-info.java b/server/src/main/java/org/opensearch/action/support/tasks/package-info.java new file mode 100644 index 0000000000000..30924e1433c7b --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/tasks/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Task Request transport handlers. */ +package org.opensearch.action.support.tasks; diff --git a/server/src/main/java/org/opensearch/action/update/package-info.java b/server/src/main/java/org/opensearch/action/update/package-info.java new file mode 100644 index 0000000000000..a8c8720c9ac10 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/update/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Update Action transport handlers. */ +package org.opensearch.action.update; diff --git a/server/src/main/java/org/opensearch/bootstrap/package-info.java b/server/src/main/java/org/opensearch/bootstrap/package-info.java new file mode 100644 index 0000000000000..a1761d8d38315 --- /dev/null +++ b/server/src/main/java/org/opensearch/bootstrap/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * The bootstrap module implementing OpenSearch bootstrap operations. + */ +package org.opensearch.bootstrap; diff --git a/server/src/main/java/org/opensearch/cli/package-info.java b/server/src/main/java/org/opensearch/cli/package-info.java new file mode 100644 index 0000000000000..8e9f9f6360870 --- /dev/null +++ b/server/src/main/java/org/opensearch/cli/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * The command line interface module. + */ +package org.opensearch.cli; diff --git a/server/src/main/java/org/opensearch/client/node/package-info.java b/server/src/main/java/org/opensearch/client/node/package-info.java new file mode 100644 index 0000000000000..0a3be1fad6bf0 --- /dev/null +++ b/server/src/main/java/org/opensearch/client/node/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * The node client module allowing to easily perform actions/operations at node level. + */ +package org.opensearch.client.node; diff --git a/server/src/main/java/org/opensearch/client/package-info.java b/server/src/main/java/org/opensearch/client/package-info.java index 24c744803a151..abfd9a9119971 100644 --- a/server/src/main/java/org/opensearch/client/package-info.java +++ b/server/src/main/java/org/opensearch/client/package-info.java @@ -25,12 +25,12 @@ * under the License. */ -/** - * The client module allowing to easily perform actions/operations. - */ /* * Modifications Copyright OpenSearch Contributors. See * GitHub history for details. */ +/** + * The client module allowing to easily perform actions/operations. + */ package org.opensearch.client; diff --git a/server/src/main/java/org/opensearch/client/support/package-info.java b/server/src/main/java/org/opensearch/client/support/package-info.java new file mode 100644 index 0000000000000..637f7f03101aa --- /dev/null +++ b/server/src/main/java/org/opensearch/client/support/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * The Abstract client for implementing new client types. + */ +package org.opensearch.client.support; diff --git a/server/src/main/java/org/opensearch/client/transport/package-info.java b/server/src/main/java/org/opensearch/client/transport/package-info.java new file mode 100644 index 0000000000000..a775d93350b19 --- /dev/null +++ b/server/src/main/java/org/opensearch/client/transport/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Transport Client support classes. + */ +package org.opensearch.client.transport; diff --git a/server/src/main/java/org/opensearch/cluster/ack/package-info.java b/server/src/main/java/org/opensearch/cluster/ack/package-info.java new file mode 100644 index 0000000000000..e8206eeb37b26 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/ack/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Cluster Acknowledgment transport handlers. */ +package org.opensearch.cluster.ack; diff --git a/server/src/main/java/org/opensearch/cluster/action/index/package-info.java b/server/src/main/java/org/opensearch/cluster/action/index/package-info.java new file mode 100644 index 0000000000000..3594d858b2970 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/action/index/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Cluster Index Action transport handlers. */ +package org.opensearch.cluster.action.index; diff --git a/server/src/main/java/org/opensearch/cluster/action/package-info.java b/server/src/main/java/org/opensearch/cluster/action/package-info.java new file mode 100644 index 0000000000000..acd8cf0f63592 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/action/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Cluster Action transport handlers. */ +package org.opensearch.cluster.action; diff --git a/server/src/main/java/org/opensearch/cluster/action/shard/package-info.java b/server/src/main/java/org/opensearch/cluster/action/shard/package-info.java new file mode 100644 index 0000000000000..5666176386d46 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/action/shard/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Cluster Shard State transport handlers. */ +package org.opensearch.cluster.action.shard; diff --git a/server/src/main/java/org/opensearch/cluster/block/package-info.java b/server/src/main/java/org/opensearch/cluster/block/package-info.java new file mode 100644 index 0000000000000..a51b4623d34af --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/block/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Cluster Block level classes. */ +package org.opensearch.cluster.block; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/package-info.java b/server/src/main/java/org/opensearch/cluster/coordination/package-info.java new file mode 100644 index 0000000000000..2f0514f25f6d6 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/coordination/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Cluster Coordination foundation classes. */ +package org.opensearch.cluster.coordination; diff --git a/server/src/main/java/org/opensearch/cluster/health/package-info.java b/server/src/main/java/org/opensearch/cluster/health/package-info.java new file mode 100644 index 0000000000000..f022210548cfe --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/health/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Cluster Health Foundation classes. */ +package org.opensearch.cluster.health; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/package-info.java b/server/src/main/java/org/opensearch/cluster/metadata/package-info.java new file mode 100644 index 0000000000000..ef618b435b3fb --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/metadata/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Cluster metadata foundation classes. */ +package org.opensearch.cluster.metadata; diff --git a/server/src/main/java/org/opensearch/cluster/node/package-info.java b/server/src/main/java/org/opensearch/cluster/node/package-info.java new file mode 100644 index 0000000000000..efb3e3d5f0248 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/node/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Cluster Node discovery foundation classes. */ +package org.opensearch.cluster.node; diff --git a/server/src/main/java/org/opensearch/cluster/package-info.java b/server/src/main/java/org/opensearch/cluster/package-info.java new file mode 100644 index 0000000000000..f9cd00f32a6a6 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Cluster level classes. */ +package org.opensearch.cluster; diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/package-info.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/package-info.java new file mode 100644 index 0000000000000..0481bbff1439c --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Foundation classes to ensure balanced shards. */ +package org.opensearch.cluster.routing.allocation.allocator; diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/command/package-info.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/command/package-info.java new file mode 100644 index 0000000000000..8489c49c78752 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/command/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Allocation Command transport handlers. */ +package org.opensearch.cluster.routing.allocation.command; diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/package-info.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/package-info.java new file mode 100644 index 0000000000000..9429cb35c8adf --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Foundation Classes to Decide how to allocate and balance shards */ +package org.opensearch.cluster.routing.allocation.decider; diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/package-info.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/package-info.java new file mode 100644 index 0000000000000..2f274f997f907 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Shard Allocation Foundation Classes. */ +package org.opensearch.cluster.routing.allocation; diff --git a/server/src/main/java/org/opensearch/cluster/routing/package-info.java b/server/src/main/java/org/opensearch/cluster/routing/package-info.java new file mode 100644 index 0000000000000..91989d2bc9c80 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Cluster level shard routing and recovery classes. */ +package org.opensearch.cluster.routing; diff --git a/server/src/main/java/org/opensearch/cluster/service/package-info.java b/server/src/main/java/org/opensearch/cluster/service/package-info.java new file mode 100644 index 0000000000000..57ff5731355b1 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/service/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Cluster Service foundation classes. */ +package org.opensearch.cluster.service; diff --git a/server/src/main/java/org/opensearch/common/blobstore/fs/package-info.java b/server/src/main/java/org/opensearch/common/blobstore/fs/package-info.java new file mode 100644 index 0000000000000..584d25f12738f --- /dev/null +++ b/server/src/main/java/org/opensearch/common/blobstore/fs/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base FileSystem Blobstore package. */ +package org.opensearch.common.blobstore.fs; diff --git a/server/src/main/java/org/opensearch/common/blobstore/package-info.java b/server/src/main/java/org/opensearch/common/blobstore/package-info.java new file mode 100644 index 0000000000000..bf7768b185084 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/blobstore/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Blob Store package. */ +package org.opensearch.common.blobstore; diff --git a/server/src/main/java/org/opensearch/common/blobstore/support/package-info.java b/server/src/main/java/org/opensearch/common/blobstore/support/package-info.java new file mode 100644 index 0000000000000..f93e9583a4c8b --- /dev/null +++ b/server/src/main/java/org/opensearch/common/blobstore/support/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Support classes package for BlobStore implementation. */ +package org.opensearch.common.blobstore.support; diff --git a/server/src/main/java/org/opensearch/common/breaker/package-info.java b/server/src/main/java/org/opensearch/common/breaker/package-info.java new file mode 100644 index 0000000000000..30c5d8d981697 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/breaker/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Common Circuit Breaker package. */ +package org.opensearch.common.breaker; diff --git a/server/src/main/java/org/opensearch/common/bytes/package-info.java b/server/src/main/java/org/opensearch/common/bytes/package-info.java new file mode 100644 index 0000000000000..96e3db1b7cd06 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/bytes/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base BytesRef package. */ +package org.opensearch.common.bytes; diff --git a/server/src/main/java/org/opensearch/common/cache/package-info.java b/server/src/main/java/org/opensearch/common/cache/package-info.java new file mode 100644 index 0000000000000..174f719b33d17 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Cache package. */ +package org.opensearch.common.cache; diff --git a/server/src/main/java/org/opensearch/common/collect/package-info.java b/server/src/main/java/org/opensearch/common/collect/package-info.java new file mode 100644 index 0000000000000..6cd7b11582bb8 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/collect/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base collections package. */ +package org.opensearch.common.collect; diff --git a/server/src/main/java/org/opensearch/common/component/package-info.java b/server/src/main/java/org/opensearch/common/component/package-info.java new file mode 100644 index 0000000000000..34d034b5a3ffb --- /dev/null +++ b/server/src/main/java/org/opensearch/common/component/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Lifecycle Component package. */ +package org.opensearch.common.component; diff --git a/server/src/main/java/org/opensearch/common/compress/package-info.java b/server/src/main/java/org/opensearch/common/compress/package-info.java new file mode 100644 index 0000000000000..38fbba9076ca8 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/compress/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base compression package. */ +package org.opensearch.common.compress; diff --git a/server/src/main/java/org/opensearch/common/concurrent/package-info.java b/server/src/main/java/org/opensearch/common/concurrent/package-info.java new file mode 100644 index 0000000000000..886920af509ee --- /dev/null +++ b/server/src/main/java/org/opensearch/common/concurrent/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Concurrency package. */ +package org.opensearch.common.concurrent; diff --git a/server/src/main/java/org/opensearch/common/document/package-info.java b/server/src/main/java/org/opensearch/common/document/package-info.java new file mode 100644 index 0000000000000..8bd90082dfc17 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/document/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Document Level package. */ +package org.opensearch.common.document; diff --git a/server/src/main/java/org/opensearch/common/geo/builders/package-info.java b/server/src/main/java/org/opensearch/common/geo/builders/package-info.java new file mode 100644 index 0000000000000..bb4f9105835be --- /dev/null +++ b/server/src/main/java/org/opensearch/common/geo/builders/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Geo Builders package. */ +package org.opensearch.common.geo.builders; diff --git a/server/src/main/java/org/opensearch/common/geo/package-info.java b/server/src/main/java/org/opensearch/common/geo/package-info.java new file mode 100644 index 0000000000000..8debc310d9c72 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/geo/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Geo package. */ +package org.opensearch.common.geo; diff --git a/server/src/main/java/org/opensearch/common/geo/parsers/package-info.java b/server/src/main/java/org/opensearch/common/geo/parsers/package-info.java new file mode 100644 index 0000000000000..a465462d5d7de --- /dev/null +++ b/server/src/main/java/org/opensearch/common/geo/parsers/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Geo Parsers package. */ +package org.opensearch.common.geo.parsers; diff --git a/server/src/main/java/org/opensearch/common/hash/package-info.java b/server/src/main/java/org/opensearch/common/hash/package-info.java new file mode 100644 index 0000000000000..1babf570e7264 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/hash/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base hashing package. */ +package org.opensearch.common.hash; diff --git a/server/src/main/java/org/opensearch/common/io/package-info.java b/server/src/main/java/org/opensearch/common/io/package-info.java new file mode 100644 index 0000000000000..28e34bcead549 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/io/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base IO package. */ +package org.opensearch.common.io; diff --git a/server/src/main/java/org/opensearch/common/joda/package-info.java b/server/src/main/java/org/opensearch/common/joda/package-info.java new file mode 100644 index 0000000000000..55ed8d9592a6d --- /dev/null +++ b/server/src/main/java/org/opensearch/common/joda/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Joda Time package. */ +package org.opensearch.common.joda; diff --git a/server/src/main/java/org/opensearch/common/lease/package-info.java b/server/src/main/java/org/opensearch/common/lease/package-info.java new file mode 100644 index 0000000000000..f7097486a9c64 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/lease/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Releasables package. */ +package org.opensearch.common.lease; diff --git a/server/src/main/java/org/opensearch/common/logging/package-info.java b/server/src/main/java/org/opensearch/common/logging/package-info.java new file mode 100644 index 0000000000000..8440be23c5fee --- /dev/null +++ b/server/src/main/java/org/opensearch/common/logging/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base logger package. */ +package org.opensearch.common.logging; diff --git a/server/src/main/java/org/opensearch/common/lucene/index/package-info.java b/server/src/main/java/org/opensearch/common/lucene/index/package-info.java new file mode 100644 index 0000000000000..c605aec31752c --- /dev/null +++ b/server/src/main/java/org/opensearch/common/lucene/index/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** OpenSearch Lucene Index package. */ +package org.opensearch.common.lucene.index; diff --git a/server/src/main/java/org/opensearch/common/lucene/package-info.java b/server/src/main/java/org/opensearch/common/lucene/package-info.java new file mode 100644 index 0000000000000..845f99788cb83 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/lucene/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Common OpenSearch Lucene implementation package. */ +package org.opensearch.common.lucene; diff --git a/server/src/main/java/org/opensearch/common/lucene/search/function/package-info.java b/server/src/main/java/org/opensearch/common/lucene/search/function/package-info.java new file mode 100644 index 0000000000000..c24e917137415 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/lucene/search/function/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Lucene Function Scoring package. */ +package org.opensearch.common.lucene.search.function; diff --git a/server/src/main/java/org/opensearch/common/lucene/search/package-info.java b/server/src/main/java/org/opensearch/common/lucene/search/package-info.java new file mode 100644 index 0000000000000..e60b632a11d33 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/lucene/search/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base OpenSearch Specific Lucene Query package. */ +package org.opensearch.common.lucene.search; diff --git a/server/src/main/java/org/opensearch/common/lucene/store/package-info.java b/server/src/main/java/org/opensearch/common/lucene/store/package-info.java new file mode 100644 index 0000000000000..3d46f3be44b9b --- /dev/null +++ b/server/src/main/java/org/opensearch/common/lucene/store/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** OpenSearch specific lucene store package. */ +package org.opensearch.common.lucene.store; diff --git a/server/src/main/java/org/opensearch/common/lucene/uid/package-info.java b/server/src/main/java/org/opensearch/common/lucene/uid/package-info.java new file mode 100644 index 0000000000000..46d28f9c416e9 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/lucene/uid/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** OpenSearch specific lucene sequence numbers package. */ +package org.opensearch.common.lucene.uid; diff --git a/server/src/main/java/org/opensearch/common/metrics/package-info.java b/server/src/main/java/org/opensearch/common/metrics/package-info.java new file mode 100644 index 0000000000000..58d66e44ef07d --- /dev/null +++ b/server/src/main/java/org/opensearch/common/metrics/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base metrics package. */ +package org.opensearch.common.metrics; diff --git a/server/src/main/java/org/opensearch/common/network/package-info.java b/server/src/main/java/org/opensearch/common/network/package-info.java new file mode 100644 index 0000000000000..250423e4fb486 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/network/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base networking support classes package. */ +package org.opensearch.common.network; diff --git a/server/src/main/java/org/opensearch/common/package-info.java b/server/src/main/java/org/opensearch/common/package-info.java new file mode 100644 index 0000000000000..09e988590c22d --- /dev/null +++ b/server/src/main/java/org/opensearch/common/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base package for common classes used across the OpenSearch server codebase. */ +package org.opensearch.common; diff --git a/server/src/main/java/org/opensearch/common/path/package-info.java b/server/src/main/java/org/opensearch/common/path/package-info.java new file mode 100644 index 0000000000000..f171fd0ce8281 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/path/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Path utility package. */ +package org.opensearch.common.path; diff --git a/server/src/main/java/org/opensearch/common/recycler/package-info.java b/server/src/main/java/org/opensearch/common/recycler/package-info.java new file mode 100644 index 0000000000000..f59d4db545200 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/recycler/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Resource Recycler package. */ +package org.opensearch.common.recycler; diff --git a/server/src/main/java/org/opensearch/common/regex/package-info.java b/server/src/main/java/org/opensearch/common/regex/package-info.java new file mode 100644 index 0000000000000..30140269a2726 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/regex/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Regex utility package. */ +package org.opensearch.common.regex; diff --git a/server/src/main/java/org/opensearch/common/rounding/package-info.java b/server/src/main/java/org/opensearch/common/rounding/package-info.java new file mode 100644 index 0000000000000..5fa3e39c6a786 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/rounding/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base DateTime rounding package. */ +package org.opensearch.common.rounding; diff --git a/server/src/main/java/org/opensearch/common/text/package-info.java b/server/src/main/java/org/opensearch/common/text/package-info.java new file mode 100644 index 0000000000000..229d39aa4ff8d --- /dev/null +++ b/server/src/main/java/org/opensearch/common/text/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Text utility package. */ +package org.opensearch.common.text; diff --git a/server/src/main/java/org/opensearch/common/time/package-info.java b/server/src/main/java/org/opensearch/common/time/package-info.java new file mode 100644 index 0000000000000..8e946aa8a3b1b --- /dev/null +++ b/server/src/main/java/org/opensearch/common/time/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Time utility package. */ +package org.opensearch.common.time; diff --git a/server/src/main/java/org/opensearch/common/transport/package-info.java b/server/src/main/java/org/opensearch/common/transport/package-info.java new file mode 100644 index 0000000000000..abb8dfbb4e4f0 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/transport/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Transport utility package. */ +package org.opensearch.common.transport; diff --git a/server/src/main/java/org/opensearch/common/unit/package-info.java b/server/src/main/java/org/opensearch/common/unit/package-info.java new file mode 100644 index 0000000000000..5c5e1b17e55a6 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/unit/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Units of Measure utility package. */ +package org.opensearch.common.unit; diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/package-info.java b/server/src/main/java/org/opensearch/common/util/concurrent/package-info.java new file mode 100644 index 0000000000000..7ccd4b6d8a6e8 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/concurrent/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base concurrency utilities package. */ +package org.opensearch.common.util.concurrent; diff --git a/server/src/main/java/org/opensearch/common/util/iterable/package-info.java b/server/src/main/java/org/opensearch/common/util/iterable/package-info.java new file mode 100644 index 0000000000000..5b3c307dd0ea5 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/iterable/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base iterables utility package. */ +package org.opensearch.common.util.iterable; diff --git a/server/src/main/java/org/opensearch/common/util/package-info.java b/server/src/main/java/org/opensearch/common/util/package-info.java new file mode 100644 index 0000000000000..234c075d68d73 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Utilitiy Classes package. */ +package org.opensearch.common.util; diff --git a/server/src/main/java/org/opensearch/common/util/set/package-info.java b/server/src/main/java/org/opensearch/common/util/set/package-info.java new file mode 100644 index 0000000000000..298e5f53ff215 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/set/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Set collection utility package. */ +package org.opensearch.common.util.set; diff --git a/server/src/main/java/org/opensearch/common/xcontent/package-info.java b/server/src/main/java/org/opensearch/common/xcontent/package-info.java new file mode 100644 index 0000000000000..5f820fe4d973f --- /dev/null +++ b/server/src/main/java/org/opensearch/common/xcontent/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base XContent parsing and building utiilty class package. */ +package org.opensearch.common.xcontent; diff --git a/server/src/main/java/org/opensearch/common/xcontent/support/package-info.java b/server/src/main/java/org/opensearch/common/xcontent/support/package-info.java new file mode 100644 index 0000000000000..021ccf0a07a98 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/xcontent/support/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** XContent specific support classes package. */ +package org.opensearch.common.xcontent.support; diff --git a/server/src/main/java/org/opensearch/discovery/package-info.java b/server/src/main/java/org/opensearch/discovery/package-info.java new file mode 100644 index 0000000000000..052a7d3027f07 --- /dev/null +++ b/server/src/main/java/org/opensearch/discovery/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Node Discovery Module Foundation Classes. */ +package org.opensearch.discovery; diff --git a/server/src/main/java/org/opensearch/env/package-info.java b/server/src/main/java/org/opensearch/env/package-info.java new file mode 100644 index 0000000000000..8675321616eaa --- /dev/null +++ b/server/src/main/java/org/opensearch/env/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** OpenSearch Environment Foundation Classes. */ +package org.opensearch.env; diff --git a/server/src/main/java/org/opensearch/gateway/package-info.java b/server/src/main/java/org/opensearch/gateway/package-info.java new file mode 100644 index 0000000000000..e89db75e04023 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Gateway Foundation Classes. */ +package org.opensearch.gateway; diff --git a/server/src/main/java/org/opensearch/http/package-info.java b/server/src/main/java/org/opensearch/http/package-info.java new file mode 100644 index 0000000000000..814c82450e5ad --- /dev/null +++ b/server/src/main/java/org/opensearch/http/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Core http module implementing http transport request and responses */ +package org.opensearch.http; diff --git a/server/src/main/java/org/opensearch/index/analysis/package-info.java b/server/src/main/java/org/opensearch/index/analysis/package-info.java new file mode 100644 index 0000000000000..28cd10ffae0ab --- /dev/null +++ b/server/src/main/java/org/opensearch/index/analysis/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Core classes responsible for handling all indexing analysis operations */ +package org.opensearch.index.analysis; diff --git a/server/src/main/java/org/opensearch/index/cache/bitset/package-info.java b/server/src/main/java/org/opensearch/index/cache/bitset/package-info.java new file mode 100644 index 0000000000000..cb15224dd7613 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/cache/bitset/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Core classes responsible for handling all bitset operations for indexing cache */ +package org.opensearch.index.cache.bitset; diff --git a/server/src/main/java/org/opensearch/index/cache/package-info.java b/server/src/main/java/org/opensearch/index/cache/package-info.java new file mode 100644 index 0000000000000..6ea72a6e17f83 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/cache/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Core classes responsible for handling all indexing cache operations */ +package org.opensearch.index.cache; diff --git a/server/src/main/java/org/opensearch/index/cache/query/package-info.java b/server/src/main/java/org/opensearch/index/cache/query/package-info.java new file mode 100644 index 0000000000000..4843b1fb5c29b --- /dev/null +++ b/server/src/main/java/org/opensearch/index/cache/query/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Core classes responsible for handling all query cache operations */ +package org.opensearch.index.cache.query; diff --git a/server/src/main/java/org/opensearch/index/cache/request/package-info.java b/server/src/main/java/org/opensearch/index/cache/request/package-info.java new file mode 100644 index 0000000000000..d88c79d17b9fd --- /dev/null +++ b/server/src/main/java/org/opensearch/index/cache/request/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Core transport classes responsible for handling all indexing cache requests */ +package org.opensearch.index.cache.request; diff --git a/server/src/main/java/org/opensearch/index/codec/package-info.java b/server/src/main/java/org/opensearch/index/codec/package-info.java new file mode 100644 index 0000000000000..2f96ecea4f434 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Core classes responsible for handling all codec service and operations */ +package org.opensearch.index.codec; diff --git a/server/src/main/java/org/opensearch/index/engine/package-info.java b/server/src/main/java/org/opensearch/index/engine/package-info.java new file mode 100644 index 0000000000000..283ef24f04d50 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/engine/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Core classes responsible for handling all indexing engine operations */ +package org.opensearch.index.engine; diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/package-info.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/package-info.java new file mode 100644 index 0000000000000..7735d4e8088dd --- /dev/null +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Core classes responsible for handling all indexing field comparator operations */ +package org.opensearch.index.fielddata.fieldcomparator; diff --git a/server/src/main/java/org/opensearch/index/fielddata/ordinals/package-info.java b/server/src/main/java/org/opensearch/index/fielddata/ordinals/package-info.java new file mode 100644 index 0000000000000..275bd283cf90f --- /dev/null +++ b/server/src/main/java/org/opensearch/index/fielddata/ordinals/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Core classes responsible for handling all field ordinals operations */ +package org.opensearch.index.fielddata.ordinals; diff --git a/server/src/main/java/org/opensearch/index/fielddata/package-info.java b/server/src/main/java/org/opensearch/index/fielddata/package-info.java new file mode 100644 index 0000000000000..6892a82fdbfaa --- /dev/null +++ b/server/src/main/java/org/opensearch/index/fielddata/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Core classes responsible for handling all field data logic */ +package org.opensearch.index.fielddata; diff --git a/server/src/main/java/org/opensearch/index/fielddata/plain/package-info.java b/server/src/main/java/org/opensearch/index/fielddata/plain/package-info.java new file mode 100644 index 0000000000000..f719dfb72f842 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/fielddata/plain/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Core classes responsible for handling all core field data operations */ +package org.opensearch.index.fielddata.plain; diff --git a/server/src/main/java/org/opensearch/index/fieldvisitor/package-info.java b/server/src/main/java/org/opensearch/index/fieldvisitor/package-info.java new file mode 100644 index 0000000000000..d6ab3f388c16a --- /dev/null +++ b/server/src/main/java/org/opensearch/index/fieldvisitor/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** FieldVisitor classes for retrieving field values from lucene */ +package org.opensearch.index.fieldvisitor; diff --git a/server/src/main/java/org/opensearch/index/flush/package-info.java b/server/src/main/java/org/opensearch/index/flush/package-info.java new file mode 100644 index 0000000000000..c048eace8b798 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/flush/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Core classes responsible for handling all indexing flush statistics */ +package org.opensearch.index.flush; diff --git a/server/src/main/java/org/opensearch/index/get/package-info.java b/server/src/main/java/org/opensearch/index/get/package-info.java new file mode 100644 index 0000000000000..7a2f40441c1b5 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/get/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Package containing support Classes for Getting data from an Index */ +package org.opensearch.index.get; diff --git a/server/src/main/java/org/opensearch/index/mapper/package-info.java b/server/src/main/java/org/opensearch/index/mapper/package-info.java new file mode 100644 index 0000000000000..431f5b0f4debb --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Package containing OpenSearch Field Mappers */ +package org.opensearch.index.mapper; diff --git a/server/src/main/java/org/opensearch/index/merge/package-info.java b/server/src/main/java/org/opensearch/index/merge/package-info.java new file mode 100644 index 0000000000000..b184c9e5443bd --- /dev/null +++ b/server/src/main/java/org/opensearch/index/merge/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Core classes responsible for handling all merge stats requests */ +package org.opensearch.index.merge; diff --git a/server/src/main/java/org/opensearch/index/package-info.java b/server/src/main/java/org/opensearch/index/package-info.java new file mode 100644 index 0000000000000..0f7d564e76ee5 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Core classes responsible for handling all indexing operations in OpenSearch */ +package org.opensearch.index; diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/package-info.java b/server/src/main/java/org/opensearch/index/query/functionscore/package-info.java new file mode 100644 index 0000000000000..e590fd4a605d0 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/query/functionscore/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Foundation classes for implementing Function Scoring */ +package org.opensearch.index.query.functionscore; diff --git a/server/src/main/java/org/opensearch/index/query/package-info.java b/server/src/main/java/org/opensearch/index/query/package-info.java new file mode 100644 index 0000000000000..fe9ea6e4ba1b6 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/query/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Query implementations */ +package org.opensearch.index.query; diff --git a/server/src/main/java/org/opensearch/index/query/support/package-info.java b/server/src/main/java/org/opensearch/index/query/support/package-info.java new file mode 100644 index 0000000000000..f14412c0fde3c --- /dev/null +++ b/server/src/main/java/org/opensearch/index/query/support/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Support classes for OpenSearch Queries - todo remove this package? */ +package org.opensearch.index.query.support; diff --git a/server/src/main/java/org/opensearch/index/recovery/package-info.java b/server/src/main/java/org/opensearch/index/recovery/package-info.java new file mode 100644 index 0000000000000..44f4291157718 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/recovery/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Recovery stats implementation */ +package org.opensearch.index.recovery; diff --git a/server/src/main/java/org/opensearch/index/refresh/package-info.java b/server/src/main/java/org/opensearch/index/refresh/package-info.java new file mode 100644 index 0000000000000..fdd0f5658504f --- /dev/null +++ b/server/src/main/java/org/opensearch/index/refresh/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Refresh stats implementation */ +package org.opensearch.index.refresh; diff --git a/server/src/main/java/org/opensearch/index/reindex/package-info.java b/server/src/main/java/org/opensearch/index/reindex/package-info.java new file mode 100644 index 0000000000000..b3374e4b1853d --- /dev/null +++ b/server/src/main/java/org/opensearch/index/reindex/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Reindex implementation */ +package org.opensearch.index.reindex; diff --git a/server/src/main/java/org/opensearch/index/search/package-info.java b/server/src/main/java/org/opensearch/index/search/package-info.java new file mode 100644 index 0000000000000..a421f5372264f --- /dev/null +++ b/server/src/main/java/org/opensearch/index/search/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Utility classes for building OpenSearch queries */ +package org.opensearch.index.search; diff --git a/server/src/main/java/org/opensearch/index/search/stats/package-info.java b/server/src/main/java/org/opensearch/index/search/stats/package-info.java new file mode 100644 index 0000000000000..9b664d5619ce0 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/search/stats/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Classes for obtaining search statistics */ +package org.opensearch.index.search.stats; diff --git a/server/src/main/java/org/opensearch/index/seqno/package-info.java b/server/src/main/java/org/opensearch/index/seqno/package-info.java new file mode 100644 index 0000000000000..495cde64a67ae --- /dev/null +++ b/server/src/main/java/org/opensearch/index/seqno/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Sequence number and retention lease implementation */ +package org.opensearch.index.seqno; diff --git a/server/src/main/java/org/opensearch/index/shard/package-info.java b/server/src/main/java/org/opensearch/index/shard/package-info.java new file mode 100644 index 0000000000000..4b36049f27a55 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/shard/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Package containing classes to implement an Index Shard */ +package org.opensearch.index.shard; diff --git a/server/src/main/java/org/opensearch/index/similarity/package-info.java b/server/src/main/java/org/opensearch/index/similarity/package-info.java new file mode 100644 index 0000000000000..384dcf63f0298 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/similarity/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Package containing the OpenSearch SimilarityService and Providers */ +package org.opensearch.index.similarity; diff --git a/server/src/main/java/org/opensearch/index/snapshots/blobstore/package-info.java b/server/src/main/java/org/opensearch/index/snapshots/blobstore/package-info.java new file mode 100644 index 0000000000000..0d1eb6f5942ea --- /dev/null +++ b/server/src/main/java/org/opensearch/index/snapshots/blobstore/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Classes implementing blob store for snapshot / restore */ +package org.opensearch.index.snapshots.blobstore; diff --git a/server/src/main/java/org/opensearch/index/snapshots/package-info.java b/server/src/main/java/org/opensearch/index/snapshots/package-info.java new file mode 100644 index 0000000000000..7cebfe5152d48 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/snapshots/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Foundation classes for snapshot / restore */ +package org.opensearch.index.snapshots; diff --git a/server/src/main/java/org/opensearch/index/stats/package-info.java b/server/src/main/java/org/opensearch/index/stats/package-info.java new file mode 100644 index 0000000000000..f431a013b82b1 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/stats/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Classes responsible for handing ShardIndexingPressure statistics */ +package org.opensearch.index.stats; diff --git a/server/src/main/java/org/opensearch/index/store/package-info.java b/server/src/main/java/org/opensearch/index/store/package-info.java new file mode 100644 index 0000000000000..cec95f35dba06 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/package-info.java @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Package containing classes to access files written by an index shard. + * see {@link org.opensearch.index.store.Store} for details + */ +package org.opensearch.index.store; diff --git a/server/src/main/java/org/opensearch/index/termvectors/package-info.java b/server/src/main/java/org/opensearch/index/termvectors/package-info.java new file mode 100644 index 0000000000000..9c5ab1da547f6 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/termvectors/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** TermVectorsService package */ +package org.opensearch.index.termvectors; diff --git a/server/src/main/java/org/opensearch/index/translog/package-info.java b/server/src/main/java/org/opensearch/index/translog/package-info.java new file mode 100644 index 0000000000000..ba824031420ed --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Core classes responsible for handling all translog operations */ +package org.opensearch.index.translog; diff --git a/server/src/main/java/org/opensearch/index/warmer/package-info.java b/server/src/main/java/org/opensearch/index/warmer/package-info.java new file mode 100644 index 0000000000000..b3635b6ea286a --- /dev/null +++ b/server/src/main/java/org/opensearch/index/warmer/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Core classes responsible for handling all indexing warming operations */ +package org.opensearch.index.warmer; diff --git a/server/src/main/java/org/opensearch/indices/analysis/package-info.java b/server/src/main/java/org/opensearch/indices/analysis/package-info.java new file mode 100644 index 0000000000000..3f4103298b04e --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/analysis/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Analysis Module package. */ +package org.opensearch.indices.analysis; diff --git a/server/src/main/java/org/opensearch/indices/breaker/package-info.java b/server/src/main/java/org/opensearch/indices/breaker/package-info.java new file mode 100644 index 0000000000000..9e455c0a54c15 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/breaker/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** CircuitBreaker Service package. */ +package org.opensearch.indices.breaker; diff --git a/server/src/main/java/org/opensearch/indices/cluster/package-info.java b/server/src/main/java/org/opensearch/indices/cluster/package-info.java new file mode 100644 index 0000000000000..9d42103a9af41 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/cluster/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Indices ClusterState Service package. */ +package org.opensearch.indices.cluster; diff --git a/server/src/main/java/org/opensearch/indices/fielddata/cache/package-info.java b/server/src/main/java/org/opensearch/indices/fielddata/cache/package-info.java new file mode 100644 index 0000000000000..fc52c884edeb7 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/fielddata/cache/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** FieldData Cache package. */ +package org.opensearch.indices.fielddata.cache; diff --git a/server/src/main/java/org/opensearch/indices/fielddata/package-info.java b/server/src/main/java/org/opensearch/indices/fielddata/package-info.java new file mode 100644 index 0000000000000..465badca28de7 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/fielddata/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Indices FieldData package. */ +package org.opensearch.indices.fielddata; diff --git a/server/src/main/java/org/opensearch/indices/mapper/package-info.java b/server/src/main/java/org/opensearch/indices/mapper/package-info.java new file mode 100644 index 0000000000000..85c2927ae4c03 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/mapper/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Field Mapper Registry package. */ +package org.opensearch.indices.mapper; diff --git a/server/src/main/java/org/opensearch/indices/package-info.java b/server/src/main/java/org/opensearch/indices/package-info.java new file mode 100644 index 0000000000000..0f2ea1e1a9848 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Indices Module package. */ +package org.opensearch.indices; diff --git a/server/src/main/java/org/opensearch/indices/recovery/package-info.java b/server/src/main/java/org/opensearch/indices/recovery/package-info.java new file mode 100644 index 0000000000000..3c850b2a84122 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/recovery/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Peer Recovery Package. */ +package org.opensearch.indices.recovery; diff --git a/server/src/main/java/org/opensearch/indices/replication/common/package-info.java b/server/src/main/java/org/opensearch/indices/replication/common/package-info.java new file mode 100644 index 0000000000000..77a8422f83829 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/common/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Foundation classes for Replication Implementation. */ +package org.opensearch.indices.replication.common; diff --git a/server/src/main/java/org/opensearch/indices/replication/package-info.java b/server/src/main/java/org/opensearch/indices/replication/package-info.java new file mode 100644 index 0000000000000..270f8b820f7b3 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Document vs Segment Replication classes. */ +package org.opensearch.indices.replication; diff --git a/server/src/main/java/org/opensearch/indices/store/package-info.java b/server/src/main/java/org/opensearch/indices/store/package-info.java new file mode 100644 index 0000000000000..47b19d6d40141 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/store/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base package for index store. */ +package org.opensearch.indices.store; diff --git a/server/src/main/java/org/opensearch/ingest/package-info.java b/server/src/main/java/org/opensearch/ingest/package-info.java new file mode 100644 index 0000000000000..513d288495520 --- /dev/null +++ b/server/src/main/java/org/opensearch/ingest/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Ingest Processor base package. */ +package org.opensearch.ingest; diff --git a/server/src/main/java/org/opensearch/lucene/package-info.java b/server/src/main/java/org/opensearch/lucene/package-info.java new file mode 100644 index 0000000000000..b56e64b670cbd --- /dev/null +++ b/server/src/main/java/org/opensearch/lucene/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Package for OpenSearch Lucene Extensions. */ +package org.opensearch.lucene; diff --git a/server/src/main/java/org/opensearch/lucene/queries/package-info.java b/server/src/main/java/org/opensearch/lucene/queries/package-info.java new file mode 100644 index 0000000000000..4fee9a059b80b --- /dev/null +++ b/server/src/main/java/org/opensearch/lucene/queries/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** OpenSearch specific lucene query implementations. */ +package org.opensearch.lucene.queries; diff --git a/server/src/main/java/org/opensearch/monitor/fs/package-info.java b/server/src/main/java/org/opensearch/monitor/fs/package-info.java new file mode 100644 index 0000000000000..bfd99560f8eb6 --- /dev/null +++ b/server/src/main/java/org/opensearch/monitor/fs/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** FileSystem monitoring package. For _cat APIs. */ +package org.opensearch.monitor.fs; diff --git a/server/src/main/java/org/opensearch/monitor/jvm/package-info.java b/server/src/main/java/org/opensearch/monitor/jvm/package-info.java new file mode 100644 index 0000000000000..bff07e3886af2 --- /dev/null +++ b/server/src/main/java/org/opensearch/monitor/jvm/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** JVM Monitoring base package. For _cat APIs. */ +package org.opensearch.monitor.jvm; diff --git a/server/src/main/java/org/opensearch/monitor/os/package-info.java b/server/src/main/java/org/opensearch/monitor/os/package-info.java new file mode 100644 index 0000000000000..73a912ead3fd5 --- /dev/null +++ b/server/src/main/java/org/opensearch/monitor/os/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** OperatingSystem monitoring base package. For _cat APIs. */ +package org.opensearch.monitor.os; diff --git a/server/src/main/java/org/opensearch/monitor/process/package-info.java b/server/src/main/java/org/opensearch/monitor/process/package-info.java new file mode 100644 index 0000000000000..c99241bf534ba --- /dev/null +++ b/server/src/main/java/org/opensearch/monitor/process/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Process monitoring base package. For _cat APIs. */ +package org.opensearch.monitor.process; diff --git a/server/src/main/java/org/opensearch/persistent/decider/package-info.java b/server/src/main/java/org/opensearch/persistent/decider/package-info.java new file mode 100644 index 0000000000000..83f96dc23db19 --- /dev/null +++ b/server/src/main/java/org/opensearch/persistent/decider/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Persistant Task Decider package. */ +package org.opensearch.persistent.decider; diff --git a/server/src/main/java/org/opensearch/repositories/fs/package-info.java b/server/src/main/java/org/opensearch/repositories/fs/package-info.java new file mode 100644 index 0000000000000..2f3f9aedad933 --- /dev/null +++ b/server/src/main/java/org/opensearch/repositories/fs/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** FileSystem snapshot repository store package. */ +package org.opensearch.repositories.fs; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/package-info.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/package-info.java new file mode 100644 index 0000000000000..93eed08c5dd2d --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Adjacency Aggregation package. */ +package org.opensearch.search.aggregations.bucket.adjacency; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/package-info.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/package-info.java new file mode 100644 index 0000000000000..33a87b342d74d --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Composite Aggregation package. */ +package org.opensearch.search.aggregations.bucket.composite; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/package-info.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/package-info.java new file mode 100644 index 0000000000000..112f2f3221858 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** filter Aggregation package. */ +package org.opensearch.search.aggregations.bucket.filter; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/package-info.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/package-info.java new file mode 100644 index 0000000000000..c59685e06cf79 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** geo_grid Aggregation package. */ +package org.opensearch.search.aggregations.bucket.geogrid; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/global/package-info.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/global/package-info.java new file mode 100644 index 0000000000000..b278c91af36ea --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/global/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Global Aggregation package. */ +package org.opensearch.search.aggregations.bucket.global; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/missing/package-info.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/missing/package-info.java new file mode 100644 index 0000000000000..9e10d8fe090d7 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/missing/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Missing Aggregation package. */ +package org.opensearch.search.aggregations.bucket.missing; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/package-info.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/package-info.java new file mode 100644 index 0000000000000..1eceaa248afb6 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Nested Aggregation package. */ +package org.opensearch.search.aggregations.bucket.nested; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/package-info b/server/src/main/java/org/opensearch/search/aggregations/bucket/package-info.java similarity index 100% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/package-info rename to server/src/main/java/org/opensearch/search/aggregations/bucket/package-info.java diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/package-info.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/package-info.java new file mode 100644 index 0000000000000..5567c241abcae --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Range Aggregation package. */ +package org.opensearch.search.aggregations.bucket.range; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/package-info.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/package-info.java new file mode 100644 index 0000000000000..8a4cc512da13a --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Sampler Aggregation package. */ +package org.opensearch.search.aggregations.bucket.sampler; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/heuristic/package-info.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/heuristic/package-info.java new file mode 100644 index 0000000000000..2a9d4fe5de436 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/heuristic/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Heuristic package for the Terms Aggregations. */ +package org.opensearch.search.aggregations.bucket.terms.heuristic; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/package-info.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/package-info.java new file mode 100644 index 0000000000000..808b7d065b01f --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Terms Aggregation package. */ +package org.opensearch.search.aggregations.bucket.terms; diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/package-info.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/package-info.java new file mode 100644 index 0000000000000..87172ec5f4ca6 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Pipeline Aggregation package. */ +package org.opensearch.search.aggregations.pipeline; diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/package-info.java b/server/src/main/java/org/opensearch/search/aggregations/support/package-info.java index b31c2e1d7b5c4..e16e8c91b3fd0 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/package-info.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/package-info.java @@ -30,8 +30,6 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.support; - /** *

      * This package holds shared code for the aggregations framework, especially around dealing with values. @@ -88,3 +86,4 @@ *

      * */ +package org.opensearch.search.aggregations.support; diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/values/package-info.java b/server/src/main/java/org/opensearch/search/aggregations/support/values/package-info.java new file mode 100644 index 0000000000000..d39782586fc1a --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/support/values/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Values Source for Aggregations package. */ +package org.opensearch.search.aggregations.support.values; diff --git a/server/src/main/java/org/opensearch/search/builder/package-info.java b/server/src/main/java/org/opensearch/search/builder/package-info.java new file mode 100644 index 0000000000000..7de5bce1f7c14 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/builder/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Search builders package. */ +package org.opensearch.search.builder; diff --git a/server/src/main/java/org/opensearch/search/collapse/package-info.java b/server/src/main/java/org/opensearch/search/collapse/package-info.java new file mode 100644 index 0000000000000..2a3c6e3a4f507 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/collapse/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Collapse package. */ +package org.opensearch.search.collapse; diff --git a/server/src/main/java/org/opensearch/search/dfs/package-info.java b/server/src/main/java/org/opensearch/search/dfs/package-info.java new file mode 100644 index 0000000000000..1469d4d0e9dc4 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/dfs/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base DFS search package. */ +package org.opensearch.search.dfs; diff --git a/server/src/main/java/org/opensearch/search/internal/package-info.java b/server/src/main/java/org/opensearch/search/internal/package-info.java new file mode 100644 index 0000000000000..fb49927753d9c --- /dev/null +++ b/server/src/main/java/org/opensearch/search/internal/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Search Internals package. */ +package org.opensearch.search.internal; diff --git a/server/src/main/java/org/opensearch/search/lookup/package-info.java b/server/src/main/java/org/opensearch/search/lookup/package-info.java new file mode 100644 index 0000000000000..c50520ac84b62 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/lookup/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Lookup package for Fields, Docs, Source, and Leaves. */ +package org.opensearch.search.lookup; diff --git a/server/src/main/java/org/opensearch/search/package-info.java b/server/src/main/java/org/opensearch/search/package-info.java new file mode 100644 index 0000000000000..ee35b70defbb2 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Search package. */ +package org.opensearch.search; diff --git a/server/src/main/java/org/opensearch/search/query/package-info.java b/server/src/main/java/org/opensearch/search/query/package-info.java new file mode 100644 index 0000000000000..f0d4b039fe192 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/query/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Query Listeners package. */ +package org.opensearch.search.query; diff --git a/server/src/main/java/org/opensearch/search/rescore/package-info.java b/server/src/main/java/org/opensearch/search/rescore/package-info.java new file mode 100644 index 0000000000000..623f8ece8d24a --- /dev/null +++ b/server/src/main/java/org/opensearch/search/rescore/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Query rescorer package. */ +package org.opensearch.search.rescore; diff --git a/server/src/main/java/org/opensearch/search/searchafter/package-info.java b/server/src/main/java/org/opensearch/search/searchafter/package-info.java new file mode 100644 index 0000000000000..a4f14a3e6cc23 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/searchafter/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base search after package. */ +package org.opensearch.search.searchafter; diff --git a/server/src/main/java/org/opensearch/search/slice/package-info.java b/server/src/main/java/org/opensearch/search/slice/package-info.java new file mode 100644 index 0000000000000..b24c5ff7416a6 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/slice/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base slice query package. */ +package org.opensearch.search.slice; diff --git a/server/src/main/java/org/opensearch/search/sort/package-info.java b/server/src/main/java/org/opensearch/search/sort/package-info.java new file mode 100644 index 0000000000000..2c83ef57a3a60 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/sort/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base sorting package. */ +package org.opensearch.search.sort; diff --git a/server/src/main/java/org/opensearch/threadpool/package-info.java b/server/src/main/java/org/opensearch/threadpool/package-info.java new file mode 100644 index 0000000000000..f1afdb705d6dd --- /dev/null +++ b/server/src/main/java/org/opensearch/threadpool/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base OpenSearch ThreadPool package. */ +package org.opensearch.threadpool; diff --git a/server/src/main/java/org/opensearch/transport/package-info.java b/server/src/main/java/org/opensearch/transport/package-info.java new file mode 100644 index 0000000000000..d7005c3e221aa --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** All Transport Classes needed for wire communication in OpenSearch */ +package org.opensearch.transport; diff --git a/server/src/main/java/org/opensearch/usage/package-info.java b/server/src/main/java/org/opensearch/usage/package-info.java new file mode 100644 index 0000000000000..708bd8af33b39 --- /dev/null +++ b/server/src/main/java/org/opensearch/usage/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base Usage Service package for tracking OpenSearch telemetry. */ +package org.opensearch.usage; diff --git a/server/src/main/java/org/opensearch/watcher/package-info.java b/server/src/main/java/org/opensearch/watcher/package-info.java new file mode 100644 index 0000000000000..e10cd0fcb90dd --- /dev/null +++ b/server/src/main/java/org/opensearch/watcher/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base File / Directory Watcher package. */ +package org.opensearch.watcher; From f58d98d2613026cc080bf1b1dc753a4810a53643 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Mon, 2 May 2022 20:23:11 -0700 Subject: [PATCH 141/653] Rename reserved node id '_must_join_elected_master_' to '_must_join_elected_cluster_manager_' that used by in DetachClusterCommand (#3116) Signed-off-by: Tianli Feng --- .../cluster/coordination/ClusterFormationFailureHelper.java | 2 +- .../opensearch/cluster/coordination/CoordinationMetadata.java | 4 ++-- .../opensearch/cluster/coordination/DetachClusterCommand.java | 4 ++-- .../coordination/ClusterFormationFailureHelperTests.java | 2 +- .../cluster/coordination/CoordinationStateTestCluster.java | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java b/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java index 0f419aa7a0937..9cfd3a6fc3697 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java @@ -218,7 +218,7 @@ String getDescription() { assert clusterState.getLastCommittedConfiguration().isEmpty() == false; - if (clusterState.getLastCommittedConfiguration().equals(VotingConfiguration.MUST_JOIN_ELECTED_MASTER)) { + if (clusterState.getLastCommittedConfiguration().equals(VotingConfiguration.MUST_JOIN_ELECTED_CLUSTER_MANAGER)) { return String.format( Locale.ROOT, "cluster-manager not discovered yet and this node was detached from its previous cluster, have discovered %s; %s", diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationMetadata.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationMetadata.java index c7671cb9e9b2b..56f8d24b6a5c0 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationMetadata.java @@ -350,8 +350,8 @@ public String toString() { public static class VotingConfiguration implements Writeable, ToXContentFragment { public static final VotingConfiguration EMPTY_CONFIG = new VotingConfiguration(Collections.emptySet()); - public static final VotingConfiguration MUST_JOIN_ELECTED_MASTER = new VotingConfiguration( - Collections.singleton("_must_join_elected_master_") + public static final VotingConfiguration MUST_JOIN_ELECTED_CLUSTER_MANAGER = new VotingConfiguration( + Collections.singleton("_must_join_elected_cluster_manager_") ); private final Set nodeIds; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/DetachClusterCommand.java b/server/src/main/java/org/opensearch/cluster/coordination/DetachClusterCommand.java index efa5a5ee600ab..afc8a68a74e9c 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/DetachClusterCommand.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/DetachClusterCommand.java @@ -86,8 +86,8 @@ protected void processNodePaths(Terminal terminal, Path[] dataPaths, int nodeLoc // package-private for tests static Metadata updateMetadata(Metadata oldMetadata) { final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder() - .lastAcceptedConfiguration(CoordinationMetadata.VotingConfiguration.MUST_JOIN_ELECTED_MASTER) - .lastCommittedConfiguration(CoordinationMetadata.VotingConfiguration.MUST_JOIN_ELECTED_MASTER) + .lastAcceptedConfiguration(CoordinationMetadata.VotingConfiguration.MUST_JOIN_ELECTED_CLUSTER_MANAGER) + .lastCommittedConfiguration(CoordinationMetadata.VotingConfiguration.MUST_JOIN_ELECTED_CLUSTER_MANAGER) .term(0) .build(); return Metadata.builder(oldMetadata).coordinationMetadata(coordinationMetadata).clusterUUIDCommitted(false).build(); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java b/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java index 391d7b0e56332..c3be1a726d949 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java @@ -428,7 +428,7 @@ public void testDescriptionAfterDetachCluster() { final ClusterState clusterState = state( localNode, - VotingConfiguration.MUST_JOIN_ELECTED_MASTER.getNodeIds().toArray(new String[0]) + VotingConfiguration.MUST_JOIN_ELECTED_CLUSTER_MANAGER.getNodeIds().toArray(new String[0]) ); assertThat( diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/CoordinationStateTestCluster.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/CoordinationStateTestCluster.java index 2f1e18058d544..1e7456e03ce6f 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/CoordinationStateTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/CoordinationStateTestCluster.java @@ -155,7 +155,7 @@ void reboot() { .getLastAcceptedConfiguration() .isEmpty() ? CoordinationMetadata.VotingConfiguration.EMPTY_CONFIG - : CoordinationMetadata.VotingConfiguration.MUST_JOIN_ELECTED_MASTER; + : CoordinationMetadata.VotingConfiguration.MUST_JOIN_ELECTED_CLUSTER_MANAGER; persistedState = new InMemoryPersistedState( 0L, clusterState(0L, 0L, localNode, votingConfiguration, votingConfiguration, 0L) From bb870f70c8fdafbe4c01e44c34698d37a508b537 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Tue, 3 May 2022 07:24:37 -0700 Subject: [PATCH 142/653] Rename ClusterBlock description 'no master' to 'no cluster-manager' (#3133) * Rename ClusterBlock description 'no master' to 'no cluster-manager' Signed-off-by: Tianli Feng * Adjust format by spotlessApply task Signed-off-by: Tianli Feng --- .../org/opensearch/cluster/MinimumMasterNodesIT.java | 12 ++++++------ .../opensearch/cluster/SpecificMasterNodesIT.java | 6 +++--- .../opensearch/discovery/DiscoveryDisruptionIT.java | 2 +- .../org/opensearch/discovery/MasterDisruptionIT.java | 12 ++++++++---- .../cluster/coordination/NoMasterBlockService.java | 6 +++--- .../org/opensearch/discovery/SeedHostsProvider.java | 2 +- .../org/opensearch/ExceptionSerializationTests.java | 2 +- .../org/opensearch/OpenSearchExceptionTests.java | 7 ++++--- .../org/opensearch/cluster/ClusterStateTests.java | 2 +- .../replication/ClusterStateCreationUtils.java | 2 +- .../coordination/AbstractCoordinatorTestCase.java | 6 +++++- .../org/opensearch/test/InternalTestCluster.java | 2 +- .../main/java/org/opensearch/test/RandomObjects.java | 3 ++- .../test/test/InternalTestClusterTests.java | 2 +- 14 files changed, 38 insertions(+), 28 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumMasterNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumMasterNodesIT.java index c3dc686921eb6..32899690799d3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumMasterNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumMasterNodesIT.java @@ -91,7 +91,7 @@ public void testTwoNodesNoMasterBlock() throws Exception { logger.info("--> start first node"); String node1Name = internalCluster().startNode(settings); - logger.info("--> should be blocked, no master..."); + logger.info("--> should be blocked, no cluster-manager..."); ClusterState state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); assertThat(state.nodes().getSize(), equalTo(1)); // verify that we still see the local node in the cluster state @@ -155,7 +155,7 @@ public void testTwoNodesNoMasterBlock() throws Exception { String otherNode = node1Name.equals(masterNode) ? node2Name : node1Name; logger.info("--> add voting config exclusion for non-master node, to be sure it's not elected"); client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(otherNode)).get(); - logger.info("--> stop master node, no master block should appear"); + logger.info("--> stop master node, no cluster-manager block should appear"); Settings masterDataPathSettings = internalCluster().dataPathSettings(masterNode); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNode)); @@ -166,7 +166,7 @@ public void testTwoNodesNoMasterBlock() throws Exception { state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); - // verify that both nodes are still in the cluster state but there is no master + // verify that both nodes are still in the cluster state but there is no cluster-manager assertThat(state.nodes().getSize(), equalTo(2)); assertThat(state.nodes().getMasterNode(), equalTo(null)); @@ -208,7 +208,7 @@ public void testTwoNodesNoMasterBlock() throws Exception { otherNode = node1Name.equals(masterNode) ? node2Name : node1Name; logger.info("--> add voting config exclusion for master node, to be sure it's not elected"); client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(masterNode)).get(); - logger.info("--> stop non-master node, no master block should appear"); + logger.info("--> stop non-master node, no cluster-manager block should appear"); Settings otherNodeDataPathSettings = internalCluster().dataPathSettings(otherNode); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(otherNode)); @@ -317,7 +317,7 @@ public void testThreeNodesNoMasterBlock() throws Exception { internalCluster().stopRandomNonMasterNode(); internalCluster().stopRandomNonMasterNode(); - logger.info("--> verify that there is no master anymore on remaining node"); + logger.info("--> verify that there is no cluster-manager anymore on remaining node"); // spin here to wait till the state is set assertBusy(() -> { ClusterState st = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); @@ -386,7 +386,7 @@ public void onFailure(String source, Exception e) { assertThat(failure.get(), instanceOf(FailedToCommitClusterStateException.class)); - logger.debug("--> check that there is no master in minor partition"); + logger.debug("--> check that there is no cluster-manager in minor partition"); assertBusy(() -> assertThat(masterClusterService.state().nodes().getMasterNode(), nullValue())); // let major partition to elect new master, to ensure that old master is not elected once partition is restored, diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificMasterNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificMasterNodesIT.java index fc193163f75cc..5380b61c446d4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificMasterNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificMasterNodesIT.java @@ -74,7 +74,7 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { ); fail("should not be able to find master"); } catch (MasterNotDiscoveredException e) { - // all is well, no master elected + // all is well, no cluster-manager elected } logger.info("--> start master node"); final String masterNodeName = internalCluster().startMasterOnlyNode(); @@ -124,7 +124,7 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { ); fail("should not be able to find master"); } catch (MasterNotDiscoveredException e) { - // all is well, no master elected + // all is well, no cluster-manager elected } logger.info("--> start previous master node again"); @@ -178,7 +178,7 @@ public void testElectOnlyBetweenMasterNodes() throws Exception { ); fail("should not be able to find master"); } catch (MasterNotDiscoveredException e) { - // all is well, no master elected + // all is well, no cluster-manager elected } logger.info("--> start master node (1)"); final String masterNodeName = internalCluster().startMasterOnlyNode(); diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java index 079aaa714a15c..bea70bd0f5919 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java @@ -220,7 +220,7 @@ public void testNodeNotReachableFromMaster() throws Exception { logger.info("waiting for [{}] to be removed from cluster", nonMasterNode); ensureStableCluster(2, masterNode); - logger.info("waiting for [{}] to have no master", nonMasterNode); + logger.info("waiting for [{}] to have no cluster-manager", nonMasterNode); assertNoMaster(nonMasterNode); logger.info("healing partition and checking cluster reforms"); diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/MasterDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/MasterDisruptionIT.java index 14e7a26bb448e..2434537d7a424 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/MasterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/MasterDisruptionIT.java @@ -217,7 +217,7 @@ public void testVerifyApiBlocksDuringPartition() throws Exception { // The unlucky node must report *no* master node, since it can't connect to master and in fact it should // continuously ping until network failures have been resolved. However // It may a take a bit before the node detects it has been cut off from the elected master - logger.info("waiting for isolated node [{}] to have no master", isolatedNode); + logger.info("waiting for isolated node [{}] to have no cluster-manager", isolatedNode); assertNoMaster(isolatedNode, NoMasterBlockService.NO_MASTER_BLOCK_WRITES, TimeValue.timeValueSeconds(30)); logger.info("wait until elected master has been removed and a new 2 node cluster was from (via [{}])", isolatedNode); @@ -236,7 +236,7 @@ public void testVerifyApiBlocksDuringPartition() throws Exception { fail( "node [" + node - + "] has no master or has blocks, despite of being on the right side of the partition. State dump:\n" + + "] has no cluster-manager or has blocks, despite of being on the right side of the partition. State dump:\n" + nodeState ); } @@ -247,7 +247,11 @@ public void testVerifyApiBlocksDuringPartition() throws Exception { // Wait until the master node sees al 3 nodes again. ensureStableCluster(3, new TimeValue(DISRUPTION_HEALING_OVERHEAD.millis() + networkDisruption.expectedTimeToHeal().millis())); - logger.info("Verify no master block with {} set to {}", NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "all"); + logger.info( + "Verify no cluster-manager block with {} set to {}", + NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), + "all" + ); client().admin() .cluster() .prepareUpdateSettings() @@ -259,7 +263,7 @@ public void testVerifyApiBlocksDuringPartition() throws Exception { // The unlucky node must report *no* master node, since it can't connect to master and in fact it should // continuously ping until network failures have been resolved. However // It may a take a bit before the node detects it has been cut off from the elected master - logger.info("waiting for isolated node [{}] to have no master", isolatedNode); + logger.info("waiting for isolated node [{}] to have no cluster-manager", isolatedNode); assertNoMaster(isolatedNode, NoMasterBlockService.NO_MASTER_BLOCK_ALL, TimeValue.timeValueSeconds(30)); // make sure we have stable cluster & cross partition recoveries are canceled by the removal of the missing node diff --git a/server/src/main/java/org/opensearch/cluster/coordination/NoMasterBlockService.java b/server/src/main/java/org/opensearch/cluster/coordination/NoMasterBlockService.java index f6420bb32b5f3..f020ae4081f06 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/NoMasterBlockService.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/NoMasterBlockService.java @@ -45,7 +45,7 @@ public class NoMasterBlockService { public static final int NO_MASTER_BLOCK_ID = 2; public static final ClusterBlock NO_MASTER_BLOCK_WRITES = new ClusterBlock( NO_MASTER_BLOCK_ID, - "no master", + "no cluster-manager", true, false, false, @@ -54,7 +54,7 @@ public class NoMasterBlockService { ); public static final ClusterBlock NO_MASTER_BLOCK_ALL = new ClusterBlock( NO_MASTER_BLOCK_ID, - "no master", + "no cluster-manager", true, true, false, @@ -63,7 +63,7 @@ public class NoMasterBlockService { ); public static final ClusterBlock NO_MASTER_BLOCK_METADATA_WRITES = new ClusterBlock( NO_MASTER_BLOCK_ID, - "no master", + "no cluster-manager", true, false, false, diff --git a/server/src/main/java/org/opensearch/discovery/SeedHostsProvider.java b/server/src/main/java/org/opensearch/discovery/SeedHostsProvider.java index ddea57db44476..23944f7bb122c 100644 --- a/server/src/main/java/org/opensearch/discovery/SeedHostsProvider.java +++ b/server/src/main/java/org/opensearch/discovery/SeedHostsProvider.java @@ -42,7 +42,7 @@ public interface SeedHostsProvider { /** - * Returns a list of seed hosts to use for discovery. Called repeatedly while discovery is active (i.e. while there is no master) + * Returns a list of seed hosts to use for discovery. Called repeatedly while discovery is active (i.e. while there is no cluster-manager) * so that this list may be dynamic. */ List getSeedAddresses(HostsResolver hostsResolver); diff --git a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java index b5859e1fb18a9..888e855176fe6 100644 --- a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java @@ -511,7 +511,7 @@ public void testFailedNodeException() throws IOException { public void testClusterBlockException() throws IOException { ClusterBlockException ex = serialize(new ClusterBlockException(singleton(NoMasterBlockService.NO_MASTER_BLOCK_WRITES))); - assertEquals("blocked by: [SERVICE_UNAVAILABLE/2/no master];", ex.getMessage()); + assertEquals("blocked by: [SERVICE_UNAVAILABLE/2/no cluster-manager];", ex.getMessage()); assertTrue(ex.blocks().contains(NoMasterBlockService.NO_MASTER_BLOCK_WRITES)); assertEquals(1, ex.blocks().size()); } diff --git a/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java b/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java index 9f32af143ee2d..31c2d77370941 100644 --- a/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java +++ b/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java @@ -499,7 +499,7 @@ public void testToXContentWithHeadersAndMetadata() throws IOException { + "\"reason\":\"baz\"," + "\"caused_by\":{" + "\"type\":\"cluster_block_exception\"," - + "\"reason\":\"blocked by: [SERVICE_UNAVAILABLE/2/no master];\"" + + "\"reason\":\"blocked by: [SERVICE_UNAVAILABLE/2/no cluster-manager];\"" + "}" + "}" + "}," @@ -537,7 +537,7 @@ public void testToXContentWithHeadersAndMetadata() throws IOException { cause = (OpenSearchException) cause.getCause(); assertEquals( cause.getMessage(), - "OpenSearch exception [type=cluster_block_exception, reason=blocked by: [SERVICE_UNAVAILABLE/2/no master];]" + "OpenSearch exception [type=cluster_block_exception, reason=blocked by: [SERVICE_UNAVAILABLE/2/no cluster-manager];]" ); } @@ -1034,7 +1034,8 @@ public static Tuple randomExceptions() { case 0: actual = new ClusterBlockException(singleton(NoMasterBlockService.NO_MASTER_BLOCK_WRITES)); expected = new OpenSearchException( - "OpenSearch exception [type=cluster_block_exception, " + "reason=blocked by: [SERVICE_UNAVAILABLE/2/no master];]" + "OpenSearch exception [type=cluster_block_exception, " + + "reason=blocked by: [SERVICE_UNAVAILABLE/2/no cluster-manager];]" ); break; case 1: // Simple opensearch exception with headers (other metadata of type number are not parsed) diff --git a/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java b/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java index 7cbab104cd07a..8904e4391a89f 100644 --- a/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java +++ b/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java @@ -101,7 +101,7 @@ public void testSupersedes() { .nodes(DiscoveryNodes.builder(nodes).masterNodeId(node2.getId())) .build(); - // states with no master should never supersede anything + // states with no cluster-manager should never supersede anything assertFalse(noMaster1.supersedes(noMaster2)); assertFalse(noMaster1.supersedes(withMaster1a)); diff --git a/test/framework/src/main/java/org/opensearch/action/support/replication/ClusterStateCreationUtils.java b/test/framework/src/main/java/org/opensearch/action/support/replication/ClusterStateCreationUtils.java index 64b82f9fd1b92..72ca3617c40a3 100644 --- a/test/framework/src/main/java/org/opensearch/action/support/replication/ClusterStateCreationUtils.java +++ b/test/framework/src/main/java/org/opensearch/action/support/replication/ClusterStateCreationUtils.java @@ -429,7 +429,7 @@ public static ClusterState stateWithNoShard() { * Creates a cluster state where local node and master node can be specified * * @param localNode node in allNodes that is the local node - * @param masterNode node in allNodes that is the master node. Can be null if no master exists + * @param masterNode node in allNodes that is the master node. Can be null if no cluster-manager exists * @param allNodes all nodes in the cluster * @return cluster state */ diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java index 6617102c12ffc..6178ead662870 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -632,7 +632,11 @@ void stabilise(long stabilisationDurationMillis) { ); } else { assertThat(nodeId + " is not following " + leaderId, clusterNode.coordinator.getMode(), is(CANDIDATE)); - assertThat(nodeId + " has no master", clusterNode.getLastAppliedClusterState().nodes().getMasterNode(), nullValue()); + assertThat( + nodeId + " has no cluster-manager", + clusterNode.getLastAppliedClusterState().nodes().getMasterNode(), + nullValue() + ); assertThat( nodeId + " has NO_MASTER_BLOCK", clusterNode.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID), diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index 9e3a2c3564a00..f37a1d68ec384 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -825,7 +825,7 @@ public Client masterClient() { if (randomNodeAndClient != null) { return randomNodeAndClient.nodeClient(); // ensure node client master is requested } - throw new AssertionError("No master client found"); + throw new AssertionError("No cluster-manager client found"); } /** diff --git a/test/framework/src/main/java/org/opensearch/test/RandomObjects.java b/test/framework/src/main/java/org/opensearch/test/RandomObjects.java index 55da731e8311f..8beac9e441787 100644 --- a/test/framework/src/main/java/org/opensearch/test/RandomObjects.java +++ b/test/framework/src/main/java/org/opensearch/test/RandomObjects.java @@ -333,7 +333,8 @@ private static Tuple randomShardInfoFailure(Random random) { case 0: actualException = new ClusterBlockException(singleton(NoMasterBlockService.NO_MASTER_BLOCK_WRITES)); expectedException = new OpenSearchException( - "OpenSearch exception [type=cluster_block_exception, " + "reason=blocked by: [SERVICE_UNAVAILABLE/2/no master];]" + "OpenSearch exception [type=cluster_block_exception, " + + "reason=blocked by: [SERVICE_UNAVAILABLE/2/no cluster-manager];]" ); break; case 1: diff --git a/test/framework/src/test/java/org/opensearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/opensearch/test/test/InternalTestClusterTests.java index f1aee04d92a19..87cd98a717be6 100644 --- a/test/framework/src/test/java/org/opensearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/opensearch/test/test/InternalTestClusterTests.java @@ -404,7 +404,7 @@ public Path nodeConfigPath(int nodeOrdinal) { List roles = new ArrayList<>(); for (int i = 0; i < numNodes; i++) { final DiscoveryNodeRole role = i == numNodes - 1 && roles.contains(clusterManagerRole) == false - ? clusterManagerRole // last node and still no master + ? clusterManagerRole // last node and still no cluster-manager : randomFrom(clusterManagerRole, DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.INGEST_ROLE); roles.add(role); } From ae4c612580502a2b358de074b1d661ac162eceaa Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Tue, 3 May 2022 09:25:55 -0500 Subject: [PATCH 143/653] Fix Emeritus Table in MAINTAINERS.md (#3137) Fixes emeritus table formatting in Maintainers document. Signed-off-by: Nicholas Walter Knize --- MAINTAINERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS.md b/MAINTAINERS.md index ed20b6f5ab314..988823682b6c1 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -29,6 +29,7 @@ ## Emeritus | Maintainer | GitHub ID | Affiliation | +| --------------- | --------- | ----------- | | Megha Sai Kavikondala | [meghasaik](https://github.com/meghasaik) | Amazon | [This document](https://github.com/opensearch-project/.github/blob/main/MAINTAINERS.md) explains what maintainers do in this repo, and how they should be doing it. If you're interested in contributing, see [CONTRIBUTING](CONTRIBUTING.md). From 15f035b4790f23fe65761f22129babfc6ccd7ce1 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Tue, 3 May 2022 09:26:37 -0500 Subject: [PATCH 144/653] [Remove] TypesExist Action (#3139) The types exist transport action can be removed now that the TransportClient has been removed and types support has been removed. Signed-off-by: Nicholas Walter Knize --- .../indices/IndicesOptionsIntegrationIT.java | 20 --- .../org/opensearch/action/ActionModule.java | 3 - .../types/TransportTypesExistsAction.java | 125 ------------------ .../exists/types/TypesExistsAction.java | 44 ------ .../exists/types/TypesExistsRequest.java | 115 ---------------- .../types/TypesExistsRequestBuilder.java | 82 ------------ .../exists/types/TypesExistsResponse.java | 64 --------- .../indices/exists/types/package-info.java | 13 -- .../opensearch/client/IndicesAdminClient.java | 31 ----- .../client/support/AbstractClient.java | 20 --- 10 files changed, 517 deletions(-) delete mode 100644 server/src/main/java/org/opensearch/action/admin/indices/exists/types/TransportTypesExistsAction.java delete mode 100644 server/src/main/java/org/opensearch/action/admin/indices/exists/types/TypesExistsAction.java delete mode 100644 server/src/main/java/org/opensearch/action/admin/indices/exists/types/TypesExistsRequest.java delete mode 100644 server/src/main/java/org/opensearch/action/admin/indices/exists/types/TypesExistsRequestBuilder.java delete mode 100644 server/src/main/java/org/opensearch/action/admin/indices/exists/types/TypesExistsResponse.java delete mode 100644 server/src/main/java/org/opensearch/action/admin/indices/exists/types/package-info.java diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java index 3432cc967bf22..0ea35a24d7c26 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.admin.indices.alias.exists.AliasesExistRequestBuilder; import org.opensearch.action.admin.indices.alias.get.GetAliasesRequestBuilder; import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheRequestBuilder; -import org.opensearch.action.admin.indices.exists.types.TypesExistsRequestBuilder; import org.opensearch.action.admin.indices.flush.FlushRequestBuilder; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder; import org.opensearch.action.admin.indices.mapping.get.GetFieldMappingsRequestBuilder; @@ -99,7 +98,6 @@ public void testSpecifiedIndexUnavailableMultipleIndices() throws Exception { verify(refreshBuilder("test1", "test2"), true); verify(validateQuery("test1", "test2"), true); verify(aliasExists("test1", "test2"), true); - verify(typesExists("test1", "test2"), true); verify(getAliases("test1", "test2"), true); verify(getFieldMapping("test1", "test2"), true); verify(getMapping("test1", "test2"), true); @@ -116,7 +114,6 @@ public void testSpecifiedIndexUnavailableMultipleIndices() throws Exception { verify(refreshBuilder("test1", "test2").setIndicesOptions(options), true); verify(validateQuery("test1", "test2").setIndicesOptions(options), true); verify(aliasExists("test1", "test2").setIndicesOptions(options), true); - verify(typesExists("test1", "test2").setIndicesOptions(options), true); verify(getAliases("test1", "test2").setIndicesOptions(options), true); verify(getFieldMapping("test1", "test2").setIndicesOptions(options), true); verify(getMapping("test1", "test2").setIndicesOptions(options), true); @@ -133,7 +130,6 @@ public void testSpecifiedIndexUnavailableMultipleIndices() throws Exception { verify(refreshBuilder("test1", "test2").setIndicesOptions(options), false); verify(validateQuery("test1", "test2").setIndicesOptions(options), false); verify(aliasExists("test1", "test2").setIndicesOptions(options), false); - verify(typesExists("test1", "test2").setIndicesOptions(options), false); verify(getAliases("test1", "test2").setIndicesOptions(options), false); verify(getFieldMapping("test1", "test2").setIndicesOptions(options), false); verify(getMapping("test1", "test2").setIndicesOptions(options), false); @@ -151,7 +147,6 @@ public void testSpecifiedIndexUnavailableMultipleIndices() throws Exception { verify(refreshBuilder("test1", "test2").setIndicesOptions(options), false); verify(validateQuery("test1", "test2").setIndicesOptions(options), false); verify(aliasExists("test1", "test2").setIndicesOptions(options), false); - verify(typesExists("test1", "test2").setIndicesOptions(options), false); verify(getAliases("test1", "test2").setIndicesOptions(options), false); verify(getFieldMapping("test1", "test2").setIndicesOptions(options), false); verify(getMapping("test1", "test2").setIndicesOptions(options), false); @@ -178,7 +173,6 @@ public void testSpecifiedIndexUnavailableSingleIndexThatIsClosed() throws Except verify(refreshBuilder("test1").setIndicesOptions(options), true); verify(validateQuery("test1").setIndicesOptions(options), true); verify(aliasExists("test1").setIndicesOptions(options), true); - verify(typesExists("test1").setIndicesOptions(options), true); verify(getAliases("test1").setIndicesOptions(options), true); verify(getFieldMapping("test1").setIndicesOptions(options), true); verify(getMapping("test1").setIndicesOptions(options), true); @@ -201,7 +195,6 @@ public void testSpecifiedIndexUnavailableSingleIndexThatIsClosed() throws Except verify(refreshBuilder("test1").setIndicesOptions(options), false); verify(validateQuery("test1").setIndicesOptions(options), false); verify(aliasExists("test1").setIndicesOptions(options), false); - verify(typesExists("test1").setIndicesOptions(options), false); verify(getAliases("test1").setIndicesOptions(options), false); verify(getFieldMapping("test1").setIndicesOptions(options), false); verify(getMapping("test1").setIndicesOptions(options), false); @@ -221,7 +214,6 @@ public void testSpecifiedIndexUnavailableSingleIndexThatIsClosed() throws Except verify(refreshBuilder("test1").setIndicesOptions(options), false); verify(validateQuery("test1").setIndicesOptions(options), false); verify(aliasExists("test1").setIndicesOptions(options), false); - verify(typesExists("test1").setIndicesOptions(options), false); verify(getAliases("test1").setIndicesOptions(options), false); verify(getFieldMapping("test1").setIndicesOptions(options), false); verify(getMapping("test1").setIndicesOptions(options), false); @@ -240,7 +232,6 @@ public void testSpecifiedIndexUnavailableSingleIndex() throws Exception { verify(refreshBuilder("test1").setIndicesOptions(options), true); verify(validateQuery("test1").setIndicesOptions(options), true); verify(aliasExists("test1").setIndicesOptions(options), true); - verify(typesExists("test1").setIndicesOptions(options), true); verify(getAliases("test1").setIndicesOptions(options), true); verify(getFieldMapping("test1").setIndicesOptions(options), true); verify(getMapping("test1").setIndicesOptions(options), true); @@ -263,7 +254,6 @@ public void testSpecifiedIndexUnavailableSingleIndex() throws Exception { verify(refreshBuilder("test1").setIndicesOptions(options), false); verify(validateQuery("test1").setIndicesOptions(options), false); verify(aliasExists("test1").setIndicesOptions(options), false); - verify(typesExists("test1").setIndicesOptions(options), false); verify(getAliases("test1").setIndicesOptions(options), false); verify(getFieldMapping("test1").setIndicesOptions(options), false); verify(getMapping("test1").setIndicesOptions(options), false); @@ -282,7 +272,6 @@ public void testSpecifiedIndexUnavailableSingleIndex() throws Exception { verify(refreshBuilder("test1").setIndicesOptions(options), false); verify(validateQuery("test1").setIndicesOptions(options), false); verify(aliasExists("test1").setIndicesOptions(options), false); - verify(typesExists("test1").setIndicesOptions(options), false); verify(getAliases("test1").setIndicesOptions(options), false); verify(getFieldMapping("test1").setIndicesOptions(options), false); verify(getMapping("test1").setIndicesOptions(options), false); @@ -336,7 +325,6 @@ public void testWildcardBehaviour() throws Exception { verify(refreshBuilder(indices), false); verify(validateQuery(indices), true); verify(aliasExists(indices), false); - verify(typesExists(indices), false); verify(getAliases(indices), false); verify(getFieldMapping(indices), false); verify(getMapping(indices), false); @@ -354,7 +342,6 @@ public void testWildcardBehaviour() throws Exception { verify(refreshBuilder(indices).setIndicesOptions(options), false); verify(validateQuery(indices).setIndicesOptions(options), false); verify(aliasExists(indices).setIndicesOptions(options), false); - verify(typesExists(indices).setIndicesOptions(options), false); verify(getAliases(indices).setIndicesOptions(options), false); verify(getFieldMapping(indices).setIndicesOptions(options), false); verify(getMapping(indices).setIndicesOptions(options), false); @@ -375,7 +362,6 @@ public void testWildcardBehaviour() throws Exception { verify(refreshBuilder(indices), false); verify(validateQuery(indices), false); verify(aliasExists(indices), false); - verify(typesExists(indices), false); verify(getAliases(indices), false); verify(getFieldMapping(indices), false); verify(getMapping(indices), false); @@ -393,7 +379,6 @@ public void testWildcardBehaviour() throws Exception { verify(refreshBuilder(indices), false); verify(validateQuery(indices), true); verify(aliasExists(indices), false); - verify(typesExists(indices), false); verify(getAliases(indices), false); verify(getFieldMapping(indices), false); verify(getMapping(indices), false); @@ -411,7 +396,6 @@ public void testWildcardBehaviour() throws Exception { verify(refreshBuilder(indices).setIndicesOptions(options), false); verify(validateQuery(indices).setIndicesOptions(options), false); verify(aliasExists(indices).setIndicesOptions(options), false); - verify(typesExists(indices).setIndicesOptions(options), false); verify(getAliases(indices).setIndicesOptions(options), false); verify(getFieldMapping(indices).setIndicesOptions(options), false); verify(getMapping(indices).setIndicesOptions(options), false); @@ -725,10 +709,6 @@ private static AliasesExistRequestBuilder aliasExists(String... indices) { return client().admin().indices().prepareAliasesExist("dummy").addIndices(indices); } - private static TypesExistsRequestBuilder typesExists(String... indices) { - return client().admin().indices().prepareTypesExists(indices).setTypes("dummy"); - } - static GetAliasesRequestBuilder getAliases(String... indices) { return client().admin().indices().prepareGetAliases("dummy").addIndices(indices); } diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 8e31aa23d88cf..d5d9660dbcf1e 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -137,8 +137,6 @@ import org.opensearch.action.admin.indices.delete.TransportDeleteIndexAction; import org.opensearch.action.admin.indices.exists.indices.IndicesExistsAction; import org.opensearch.action.admin.indices.exists.indices.TransportIndicesExistsAction; -import org.opensearch.action.admin.indices.exists.types.TransportTypesExistsAction; -import org.opensearch.action.admin.indices.exists.types.TypesExistsAction; import org.opensearch.action.admin.indices.flush.FlushAction; import org.opensearch.action.admin.indices.flush.TransportFlushAction; import org.opensearch.action.admin.indices.forcemerge.ForceMergeAction; @@ -560,7 +558,6 @@ public void reg actions.register(OpenIndexAction.INSTANCE, TransportOpenIndexAction.class); actions.register(CloseIndexAction.INSTANCE, TransportCloseIndexAction.class); actions.register(IndicesExistsAction.INSTANCE, TransportIndicesExistsAction.class); - actions.register(TypesExistsAction.INSTANCE, TransportTypesExistsAction.class); actions.register(AddIndexBlockAction.INSTANCE, TransportAddIndexBlockAction.class); actions.register(GetMappingsAction.INSTANCE, TransportGetMappingsAction.class); actions.register( diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/types/TransportTypesExistsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/types/TransportTypesExistsAction.java deleted file mode 100644 index bf4e0375941e7..0000000000000 --- a/server/src/main/java/org/opensearch/action/admin/indices/exists/types/TransportTypesExistsAction.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.admin.indices.exists.types; - -import org.opensearch.action.ActionListener; -import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeReadAction; -import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.block.ClusterBlockException; -import org.opensearch.cluster.block.ClusterBlockLevel; -import org.opensearch.cluster.metadata.IndexNameExpressionResolver; -import org.opensearch.cluster.metadata.MappingMetadata; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.inject.Inject; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.TransportService; - -import java.io.IOException; - -/** - * Types exists transport action. - */ -public class TransportTypesExistsAction extends TransportMasterNodeReadAction { - - @Inject - public TransportTypesExistsAction( - TransportService transportService, - ClusterService clusterService, - ThreadPool threadPool, - ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver - ) { - super( - TypesExistsAction.NAME, - transportService, - clusterService, - threadPool, - actionFilters, - TypesExistsRequest::new, - indexNameExpressionResolver - ); - } - - @Override - protected String executor() { - // lightweight check - return ThreadPool.Names.SAME; - } - - @Override - protected TypesExistsResponse read(StreamInput in) throws IOException { - return new TypesExistsResponse(in); - } - - @Override - protected ClusterBlockException checkBlock(TypesExistsRequest request, ClusterState state) { - return state.blocks() - .indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request)); - } - - @Override - protected void masterOperation( - final TypesExistsRequest request, - final ClusterState state, - final ActionListener listener - ) { - String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request.indicesOptions(), request.indices()); - if (concreteIndices.length == 0) { - listener.onResponse(new TypesExistsResponse(false)); - return; - } - - for (String concreteIndex : concreteIndices) { - if (!state.metadata().hasConcreteIndex(concreteIndex)) { - listener.onResponse(new TypesExistsResponse(false)); - return; - } - - MappingMetadata mapping = state.metadata().getIndices().get(concreteIndex).mapping(); - if (mapping == null) { - listener.onResponse(new TypesExistsResponse(false)); - return; - } - - for (String type : request.types()) { - if (mapping.type().equals(type) == false) { - listener.onResponse(new TypesExistsResponse(false)); - return; - } - } - } - - listener.onResponse(new TypesExistsResponse(true)); - } -} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/types/TypesExistsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/types/TypesExistsAction.java deleted file mode 100644 index df461559dda7c..0000000000000 --- a/server/src/main/java/org/opensearch/action/admin/indices/exists/types/TypesExistsAction.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.admin.indices.exists.types; - -import org.opensearch.action.ActionType; - -public class TypesExistsAction extends ActionType { - - public static final TypesExistsAction INSTANCE = new TypesExistsAction(); - public static final String NAME = "indices:admin/types/exists"; - - private TypesExistsAction() { - super(NAME, TypesExistsResponse::new); - } -} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/types/TypesExistsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/types/TypesExistsRequest.java deleted file mode 100644 index eee8076a9b176..0000000000000 --- a/server/src/main/java/org/opensearch/action/admin/indices/exists/types/TypesExistsRequest.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.admin.indices.exists.types; - -import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.IndicesRequest; -import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.MasterNodeReadRequest; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; - -import java.io.IOException; - -import static org.opensearch.action.ValidateActions.addValidationError; - -public class TypesExistsRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { - - private String[] indices; - private String[] types; - - private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); - - public TypesExistsRequest() {} - - public TypesExistsRequest(String[] indices, String... types) { - this.indices = indices; - this.types = types; - } - - public TypesExistsRequest(StreamInput in) throws IOException { - super(in); - indices = in.readStringArray(); - types = in.readStringArray(); - indicesOptions = IndicesOptions.readIndicesOptions(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeStringArray(indices); - out.writeStringArray(types); - indicesOptions.writeIndicesOptions(out); - } - - @Override - public String[] indices() { - return indices; - } - - @Override - public TypesExistsRequest indices(String... indices) { - this.indices = indices; - return this; - } - - public String[] types() { - return types; - } - - public void types(String[] types) { - this.types = types; - } - - @Override - public IndicesOptions indicesOptions() { - return indicesOptions; - } - - public TypesExistsRequest indicesOptions(IndicesOptions indicesOptions) { - this.indicesOptions = indicesOptions; - return this; - } - - @Override - public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = null; - if (indices == null) { // Specifying '*' via rest api results in an empty array - validationException = addValidationError("index/indices is missing", validationException); - } - if (types == null || types.length == 0) { - validationException = addValidationError("type/types is missing", validationException); - } - - return validationException; - } -} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/types/TypesExistsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/types/TypesExistsRequestBuilder.java deleted file mode 100644 index 12ed1b5766775..0000000000000 --- a/server/src/main/java/org/opensearch/action/admin/indices/exists/types/TypesExistsRequestBuilder.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.admin.indices.exists.types; - -import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.MasterNodeReadOperationRequestBuilder; -import org.opensearch.client.OpenSearchClient; -import org.opensearch.common.Strings; - -/** - * A builder for {@link TypesExistsRequest}. - */ -@Deprecated -public class TypesExistsRequestBuilder extends MasterNodeReadOperationRequestBuilder< - TypesExistsRequest, - TypesExistsResponse, - TypesExistsRequestBuilder> { - - /** - * @param indices What indices to check for types - */ - public TypesExistsRequestBuilder(OpenSearchClient client, TypesExistsAction action, String... indices) { - super(client, action, new TypesExistsRequest(indices, Strings.EMPTY_ARRAY)); - } - - TypesExistsRequestBuilder(OpenSearchClient client, TypesExistsAction action) { - super(client, action, new TypesExistsRequest()); - } - - /** - * @param indices What indices to check for types - */ - public TypesExistsRequestBuilder setIndices(String[] indices) { - request.indices(indices); - return this; - } - - /** - * @param types The types to check if they exist - */ - public TypesExistsRequestBuilder setTypes(String... types) { - request.types(types); - return this; - } - - /** - * @param indicesOptions Specifies how to resolve indices that aren't active / ready and indices wildcard expressions - */ - public TypesExistsRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) { - request.indicesOptions(indicesOptions); - return this; - } -} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/types/TypesExistsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/types/TypesExistsResponse.java deleted file mode 100644 index d7e08b5c9cdc9..0000000000000 --- a/server/src/main/java/org/opensearch/action/admin/indices/exists/types/TypesExistsResponse.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.admin.indices.exists.types; - -import org.opensearch.action.ActionResponse; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; - -import java.io.IOException; - -/** - * Whether all of the existed types exist. - */ -public class TypesExistsResponse extends ActionResponse { - - private boolean exists; - - TypesExistsResponse(StreamInput in) throws IOException { - super(in); - exists = in.readBoolean(); - } - - public TypesExistsResponse(boolean exists) { - this.exists = exists; - } - - public boolean isExists() { - return this.exists; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeBoolean(exists); - } -} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/types/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/types/package-info.java deleted file mode 100644 index 30bc4569e221a..0000000000000 --- a/server/src/main/java/org/opensearch/action/admin/indices/exists/types/package-info.java +++ /dev/null @@ -1,13 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * Index Type transport handlers. - * @deprecated types are deprecated and will be removed - **/ -package org.opensearch.action.admin.indices.exists.types; diff --git a/server/src/main/java/org/opensearch/client/IndicesAdminClient.java b/server/src/main/java/org/opensearch/client/IndicesAdminClient.java index 7f51b8af19e4b..35f880dbff5e3 100644 --- a/server/src/main/java/org/opensearch/client/IndicesAdminClient.java +++ b/server/src/main/java/org/opensearch/client/IndicesAdminClient.java @@ -60,9 +60,6 @@ import org.opensearch.action.admin.indices.exists.indices.IndicesExistsRequest; import org.opensearch.action.admin.indices.exists.indices.IndicesExistsRequestBuilder; import org.opensearch.action.admin.indices.exists.indices.IndicesExistsResponse; -import org.opensearch.action.admin.indices.exists.types.TypesExistsRequest; -import org.opensearch.action.admin.indices.exists.types.TypesExistsRequestBuilder; -import org.opensearch.action.admin.indices.exists.types.TypesExistsResponse; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.flush.FlushRequestBuilder; import org.opensearch.action.admin.indices.flush.FlushResponse; @@ -163,34 +160,6 @@ public interface IndicesAdminClient extends OpenSearchClient { */ IndicesExistsRequestBuilder prepareExists(String... indices); - /** - * Types exists. - * - * @deprecated Types are deprecated and are in the process of being removed. - * @param request The types exists request - * @return The result future - */ - @Deprecated - ActionFuture typesExists(TypesExistsRequest request); - - /** - * Types exists. - * - * @deprecated Types are deprecated and are in the process of being removed. - * @param request The types exists - * @param listener A listener to be notified with a result - */ - @Deprecated - void typesExists(TypesExistsRequest request, ActionListener listener); - - /** - * Types exists. - * - * @deprecated Types are deprecated and are in the process of being removed. - */ - @Deprecated - TypesExistsRequestBuilder prepareTypesExists(String... index); - /** * Indices stats. */ diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index a37d293ee5dd2..96380904304b5 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -192,10 +192,6 @@ import org.opensearch.action.admin.indices.exists.indices.IndicesExistsRequest; import org.opensearch.action.admin.indices.exists.indices.IndicesExistsRequestBuilder; import org.opensearch.action.admin.indices.exists.indices.IndicesExistsResponse; -import org.opensearch.action.admin.indices.exists.types.TypesExistsAction; -import org.opensearch.action.admin.indices.exists.types.TypesExistsRequest; -import org.opensearch.action.admin.indices.exists.types.TypesExistsRequestBuilder; -import org.opensearch.action.admin.indices.exists.types.TypesExistsResponse; import org.opensearch.action.admin.indices.flush.FlushAction; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.flush.FlushRequestBuilder; @@ -1332,22 +1328,6 @@ public IndicesExistsRequestBuilder prepareExists(String... indices) { return new IndicesExistsRequestBuilder(this, IndicesExistsAction.INSTANCE, indices); } - @Deprecated - @Override - public ActionFuture typesExists(TypesExistsRequest request) { - return execute(TypesExistsAction.INSTANCE, request); - } - - @Override - public void typesExists(TypesExistsRequest request, ActionListener listener) { - execute(TypesExistsAction.INSTANCE, request, listener); - } - - @Override - public TypesExistsRequestBuilder prepareTypesExists(String... index) { - return new TypesExistsRequestBuilder(this, TypesExistsAction.INSTANCE, index); - } - @Override public ActionFuture aliases(final IndicesAliasesRequest request) { return execute(IndicesAliasesAction.INSTANCE, request); From 5f531b366efcf1db02baf48b7bf6dec1e0d9237a Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Tue, 3 May 2022 11:05:41 -0500 Subject: [PATCH 145/653] [Remove] AliasesExistAction (#3149) With the removal of Transport Client AliasesExistAction is no longer needed and is removed. Signed-off-by: Nicholas Walter Knize --- .../ValidateIndicesAliasesRequestIT.java | 5 +- .../opensearch/aliases/IndexAliasesIT.java | 59 +++++------- .../indices/IndicesOptionsIntegrationIT.java | 40 ++------ .../org/opensearch/action/ActionModule.java | 3 - .../alias/exists/AliasesExistAction.java | 45 --------- .../exists/AliasesExistRequestBuilder.java | 43 --------- .../alias/exists/AliasesExistResponse.java | 66 ------------- .../exists/TransportAliasesExistAction.java | 95 ------------------- .../indices/alias/exists/package-info.java | 10 -- .../opensearch/client/IndicesAdminClient.java | 22 ----- .../client/support/AbstractClient.java | 18 ---- .../opensearch/cluster/metadata/Metadata.java | 32 ------- .../test/hamcrest/OpenSearchAssertions.java | 16 ---- 13 files changed, 35 insertions(+), 419 deletions(-) delete mode 100644 server/src/main/java/org/opensearch/action/admin/indices/alias/exists/AliasesExistAction.java delete mode 100644 server/src/main/java/org/opensearch/action/admin/indices/alias/exists/AliasesExistRequestBuilder.java delete mode 100644 server/src/main/java/org/opensearch/action/admin/indices/alias/exists/AliasesExistResponse.java delete mode 100644 server/src/main/java/org/opensearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java delete mode 100644 server/src/main/java/org/opensearch/action/admin/indices/alias/exists/package-info.java diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/alias/ValidateIndicesAliasesRequestIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/alias/ValidateIndicesAliasesRequestIT.java index 60243bd52ded3..bcbc93b5500bc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/alias/ValidateIndicesAliasesRequestIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/alias/ValidateIndicesAliasesRequestIT.java @@ -33,7 +33,6 @@ package org.opensearch.action.admin.indices.alias; import org.opensearch.action.RequestValidators; -import org.opensearch.action.admin.indices.alias.exists.AliasesExistResponse; import org.opensearch.action.admin.indices.alias.get.GetAliasesRequest; import org.opensearch.action.admin.indices.alias.get.GetAliasesResponse; import org.opensearch.cluster.metadata.AliasMetadata; @@ -143,8 +142,6 @@ public void testSomeAllowed() { final Exception e = expectThrows(IllegalStateException.class, () -> client().admin().indices().aliases(request).actionGet()); final String index = "foo_allowed".equals(origin) ? "bar" : "foo"; assertThat(e, hasToString(containsString("origin [" + origin + "] not allowed for index [" + index + "]"))); - final AliasesExistResponse response = client().admin().indices().aliasesExist(new GetAliasesRequest("alias")).actionGet(); - assertFalse(response.exists()); + assertTrue(client().admin().indices().getAliases(new GetAliasesRequest("alias")).actionGet().getAliases().isEmpty()); } - } diff --git a/server/src/internalClusterTest/java/org/opensearch/aliases/IndexAliasesIT.java b/server/src/internalClusterTest/java/org/opensearch/aliases/IndexAliasesIT.java index ff64a2cd90cb8..574046509de75 100644 --- a/server/src/internalClusterTest/java/org/opensearch/aliases/IndexAliasesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/aliases/IndexAliasesIT.java @@ -35,7 +35,6 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; -import org.opensearch.action.admin.indices.alias.exists.AliasesExistResponse; import org.opensearch.action.admin.indices.alias.get.GetAliasesResponse; import org.opensearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.opensearch.action.delete.DeleteResponse; @@ -684,8 +683,9 @@ public void testDeleteAliases() throws Exception { assertAliasesVersionIncreases(indices, () -> admin().indices().prepareAliases().removeAlias(indices, aliases).get()); - AliasesExistResponse response = admin().indices().prepareAliasesExist(aliases).get(); - assertThat(response.exists(), equalTo(false)); + for (String alias : aliases) { + assertTrue(admin().indices().prepareGetAliases(alias).get().getAliases().isEmpty()); + } logger.info("--> creating index [foo_foo] and [bar_bar]"); assertAcked(prepareCreate("foo_foo")); @@ -701,9 +701,9 @@ public void testDeleteAliases() throws Exception { () -> assertAcked(admin().indices().prepareAliases().addAliasAction(AliasActions.remove().index("foo*").alias("foo"))) ); - assertTrue(admin().indices().prepareAliasesExist("foo").get().exists()); - assertFalse(admin().indices().prepareAliasesExist("foo").setIndices("foo_foo").get().exists()); - assertTrue(admin().indices().prepareAliasesExist("foo").setIndices("bar_bar").get().exists()); + assertFalse(admin().indices().prepareGetAliases("foo").get().getAliases().isEmpty()); + assertTrue(admin().indices().prepareGetAliases("foo").setIndices("foo_foo").get().getAliases().isEmpty()); + assertFalse(admin().indices().prepareGetAliases("foo").setIndices("bar_bar").get().getAliases().isEmpty()); IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, () -> admin().indices().prepareAliases().addAliasAction(AliasActions.remove().index("foo").alias("foo")).execute().actionGet() @@ -880,8 +880,7 @@ public void testIndicesGetAliases() throws Exception { assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue()); - AliasesExistResponse existsResponse = admin().indices().prepareAliasesExist("alias1").get(); - assertThat(existsResponse.exists(), equalTo(true)); + assertFalse(admin().indices().prepareGetAliases("alias1").get().getAliases().isEmpty()); logger.info("--> getting all aliases that start with alias*"); getResponse = admin().indices().prepareGetAliases("alias*").get(); @@ -898,8 +897,7 @@ public void testIndicesGetAliases() throws Exception { assertThat(getResponse.getAliases().get("foobar").get(1).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(1).getIndexRouting(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(1).getSearchRouting(), nullValue()); - existsResponse = admin().indices().prepareAliasesExist("alias*").get(); - assertThat(existsResponse.exists(), equalTo(true)); + assertFalse(admin().indices().prepareGetAliases("alias*").get().getAliases().isEmpty()); logger.info("--> creating aliases [bar, baz, foo]"); assertAliasesVersionIncreases( @@ -937,8 +935,10 @@ public void testIndicesGetAliases() throws Exception { assertThat(getResponse.getAliases().get("bazbar").get(1).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("bazbar").get(1).getIndexRouting(), nullValue()); assertThat(getResponse.getAliases().get("bazbar").get(1).getSearchRouting(), nullValue()); - existsResponse = admin().indices().prepareAliasesExist("bar", "bac").addIndices("bazbar").get(); - assertThat(existsResponse.exists(), equalTo(true)); + assertFalse(admin().indices().prepareGetAliases("bar").get().getAliases().isEmpty()); + assertFalse(admin().indices().prepareGetAliases("bac").get().getAliases().isEmpty()); + assertFalse(admin().indices().prepareGetAliases("bar").addIndices("bazbar").get().getAliases().isEmpty()); + assertFalse(admin().indices().prepareGetAliases("bac").addIndices("bazbar").get().getAliases().isEmpty()); logger.info("--> getting *b* for index baz*"); getResponse = admin().indices().prepareGetAliases("*b*").addIndices("baz*").get(); @@ -957,8 +957,7 @@ public void testIndicesGetAliases() throws Exception { assertThat(getResponse.getAliases().get("bazbar").get(1).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("bazbar").get(1).getIndexRouting(), nullValue()); assertThat(getResponse.getAliases().get("bazbar").get(1).getSearchRouting(), nullValue()); - existsResponse = admin().indices().prepareAliasesExist("*b*").addIndices("baz*").get(); - assertThat(existsResponse.exists(), equalTo(true)); + assertFalse(admin().indices().prepareGetAliases("*b*").addIndices("baz*").get().getAliases().isEmpty()); logger.info("--> getting *b* for index *bar"); getResponse = admin().indices().prepareGetAliases("b*").addIndices("*bar").get(); @@ -982,8 +981,7 @@ public void testIndicesGetAliases() throws Exception { assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), equalTo("bla")); assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), equalTo("bla")); - existsResponse = admin().indices().prepareAliasesExist("b*").addIndices("*bar").get(); - assertThat(existsResponse.exists(), equalTo(true)); + assertFalse(admin().indices().prepareGetAliases("b*").addIndices("*bar").get().getAliases().isEmpty()); logger.info("--> getting f* for index *bar"); getResponse = admin().indices().prepareGetAliases("f*").addIndices("*bar").get(); @@ -994,8 +992,7 @@ public void testIndicesGetAliases() throws Exception { assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue()); - existsResponse = admin().indices().prepareAliasesExist("f*").addIndices("*bar").get(); - assertThat(existsResponse.exists(), equalTo(true)); + assertFalse(admin().indices().prepareGetAliases("f*").addIndices("*bar").get().getAliases().isEmpty()); // alias at work logger.info("--> getting f* for index *bac"); @@ -1008,8 +1005,7 @@ public void testIndicesGetAliases() throws Exception { assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue()); - existsResponse = admin().indices().prepareAliasesExist("foo").addIndices("*bac").get(); - assertThat(existsResponse.exists(), equalTo(true)); + assertFalse(admin().indices().prepareGetAliases("foo").addIndices("*bac").get().getAliases().isEmpty()); logger.info("--> getting foo for index foobar"); getResponse = admin().indices().prepareGetAliases("foo").addIndices("foobar").get(); @@ -1020,8 +1016,7 @@ public void testIndicesGetAliases() throws Exception { assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue()); - existsResponse = admin().indices().prepareAliasesExist("foo").addIndices("foobar").get(); - assertThat(existsResponse.exists(), equalTo(true)); + assertFalse(admin().indices().prepareGetAliases("foo").addIndices("foobar").get().getAliases().isEmpty()); for (String aliasName : new String[] { null, "_all", "*" }) { logger.info("--> getting {} alias for index foobar", aliasName); @@ -1044,8 +1039,7 @@ public void testIndicesGetAliases() throws Exception { assertThat(getResponse.getAliases().size(), equalTo(2)); assertThat(getResponse.getAliases().get("foobar").size(), equalTo(4)); assertThat(getResponse.getAliases().get("bazbar").size(), equalTo(2)); - existsResponse = admin().indices().prepareAliasesExist("*").addIndices("*bac").get(); - assertThat(existsResponse.exists(), equalTo(true)); + assertFalse(admin().indices().prepareGetAliases("*").addIndices("*bac").get().getAliases().isEmpty()); assertAcked(admin().indices().prepareAliases().removeAlias("foobar", "foo")); @@ -1053,8 +1047,7 @@ public void testIndicesGetAliases() throws Exception { for (final ObjectObjectCursor> entry : getResponse.getAliases()) { assertTrue(entry.value.isEmpty()); } - existsResponse = admin().indices().prepareAliasesExist("foo").addIndices("foobar").get(); - assertThat(existsResponse.exists(), equalTo(false)); + assertTrue(admin().indices().prepareGetAliases("foo").addIndices("foobar").get().getAliases().isEmpty()); } public void testGetAllAliasesWorks() { @@ -1226,7 +1219,7 @@ public void testAliasesWithBlocks() { ); assertAliasesVersionIncreases("test", () -> assertAcked(admin().indices().prepareAliases().removeAlias("test", "alias1"))); assertThat(admin().indices().prepareGetAliases("alias2").execute().actionGet().getAliases().get("test").size(), equalTo(1)); - assertThat(admin().indices().prepareAliasesExist("alias2").get().exists(), equalTo(true)); + assertFalse(admin().indices().prepareGetAliases("alias2").get().getAliases().isEmpty()); } finally { disableIndexBlock("test", block); } @@ -1244,8 +1237,7 @@ public void testAliasesWithBlocks() { () -> assertBlocked(admin().indices().prepareAliases().removeAlias("test", "alias2"), INDEX_READ_ONLY_BLOCK) ); assertThat(admin().indices().prepareGetAliases("alias2").execute().actionGet().getAliases().get("test").size(), equalTo(1)); - assertThat(admin().indices().prepareAliasesExist("alias2").get().exists(), equalTo(true)); - + assertFalse(admin().indices().prepareGetAliases("alias2").get().getAliases().isEmpty()); } finally { disableIndexBlock("test", SETTING_READ_ONLY); } @@ -1262,8 +1254,7 @@ public void testAliasesWithBlocks() { () -> assertBlocked(admin().indices().prepareAliases().removeAlias("test", "alias2"), INDEX_METADATA_BLOCK) ); assertBlocked(admin().indices().prepareGetAliases("alias2"), INDEX_METADATA_BLOCK); - assertBlocked(admin().indices().prepareAliasesExist("alias2"), INDEX_METADATA_BLOCK); - + assertBlocked(admin().indices().prepareGetAliases("alias2"), INDEX_METADATA_BLOCK); } finally { disableIndexBlock("test", SETTING_BLOCKS_METADATA); } @@ -1288,12 +1279,12 @@ public void testAliasActionRemoveIndex() throws InterruptedException, ExecutionE assertAcked(client().admin().indices().prepareAliases().removeIndex("foo*")); assertFalse(client().admin().indices().prepareExists("foo_foo").execute().actionGet().isExists()); - assertTrue(admin().indices().prepareAliasesExist("foo").get().exists()); + assertFalse(admin().indices().prepareGetAliases("foo").get().getAliases().isEmpty()); assertTrue(client().admin().indices().prepareExists("bar_bar").execute().actionGet().isExists()); - assertTrue(admin().indices().prepareAliasesExist("foo").setIndices("bar_bar").get().exists()); + assertFalse(admin().indices().prepareGetAliases("foo").setIndices("bar_bar").get().getAliases().isEmpty()); assertAcked(client().admin().indices().prepareAliases().removeIndex("bar_bar")); - assertFalse(admin().indices().prepareAliasesExist("foo").get().exists()); + assertTrue(admin().indices().prepareGetAliases("foo").get().getAliases().isEmpty()); assertFalse(client().admin().indices().prepareExists("bar_bar").execute().actionGet().isExists()); } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java index 0ea35a24d7c26..1f3d865811939 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java @@ -34,7 +34,6 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotRequestBuilder; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequestBuilder; -import org.opensearch.action.admin.indices.alias.exists.AliasesExistRequestBuilder; import org.opensearch.action.admin.indices.alias.get.GetAliasesRequestBuilder; import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheRequestBuilder; import org.opensearch.action.admin.indices.flush.FlushRequestBuilder; @@ -97,7 +96,6 @@ public void testSpecifiedIndexUnavailableMultipleIndices() throws Exception { verify(forceMerge("test1", "test2"), true); verify(refreshBuilder("test1", "test2"), true); verify(validateQuery("test1", "test2"), true); - verify(aliasExists("test1", "test2"), true); verify(getAliases("test1", "test2"), true); verify(getFieldMapping("test1", "test2"), true); verify(getMapping("test1", "test2"), true); @@ -113,7 +111,6 @@ public void testSpecifiedIndexUnavailableMultipleIndices() throws Exception { verify(forceMerge("test1", "test2").setIndicesOptions(options), true); verify(refreshBuilder("test1", "test2").setIndicesOptions(options), true); verify(validateQuery("test1", "test2").setIndicesOptions(options), true); - verify(aliasExists("test1", "test2").setIndicesOptions(options), true); verify(getAliases("test1", "test2").setIndicesOptions(options), true); verify(getFieldMapping("test1", "test2").setIndicesOptions(options), true); verify(getMapping("test1", "test2").setIndicesOptions(options), true); @@ -129,7 +126,6 @@ public void testSpecifiedIndexUnavailableMultipleIndices() throws Exception { verify(forceMerge("test1", "test2").setIndicesOptions(options), false); verify(refreshBuilder("test1", "test2").setIndicesOptions(options), false); verify(validateQuery("test1", "test2").setIndicesOptions(options), false); - verify(aliasExists("test1", "test2").setIndicesOptions(options), false); verify(getAliases("test1", "test2").setIndicesOptions(options), false); verify(getFieldMapping("test1", "test2").setIndicesOptions(options), false); verify(getMapping("test1", "test2").setIndicesOptions(options), false); @@ -146,7 +142,6 @@ public void testSpecifiedIndexUnavailableMultipleIndices() throws Exception { verify(forceMerge("test1", "test2").setIndicesOptions(options), false); verify(refreshBuilder("test1", "test2").setIndicesOptions(options), false); verify(validateQuery("test1", "test2").setIndicesOptions(options), false); - verify(aliasExists("test1", "test2").setIndicesOptions(options), false); verify(getAliases("test1", "test2").setIndicesOptions(options), false); verify(getFieldMapping("test1", "test2").setIndicesOptions(options), false); verify(getMapping("test1", "test2").setIndicesOptions(options), false); @@ -172,7 +167,6 @@ public void testSpecifiedIndexUnavailableSingleIndexThatIsClosed() throws Except verify(forceMerge("test1").setIndicesOptions(options), true); verify(refreshBuilder("test1").setIndicesOptions(options), true); verify(validateQuery("test1").setIndicesOptions(options), true); - verify(aliasExists("test1").setIndicesOptions(options), true); verify(getAliases("test1").setIndicesOptions(options), true); verify(getFieldMapping("test1").setIndicesOptions(options), true); verify(getMapping("test1").setIndicesOptions(options), true); @@ -194,7 +188,6 @@ public void testSpecifiedIndexUnavailableSingleIndexThatIsClosed() throws Except verify(forceMerge("test1").setIndicesOptions(options), false); verify(refreshBuilder("test1").setIndicesOptions(options), false); verify(validateQuery("test1").setIndicesOptions(options), false); - verify(aliasExists("test1").setIndicesOptions(options), false); verify(getAliases("test1").setIndicesOptions(options), false); verify(getFieldMapping("test1").setIndicesOptions(options), false); verify(getMapping("test1").setIndicesOptions(options), false); @@ -213,7 +206,6 @@ public void testSpecifiedIndexUnavailableSingleIndexThatIsClosed() throws Except verify(forceMerge("test1").setIndicesOptions(options), false); verify(refreshBuilder("test1").setIndicesOptions(options), false); verify(validateQuery("test1").setIndicesOptions(options), false); - verify(aliasExists("test1").setIndicesOptions(options), false); verify(getAliases("test1").setIndicesOptions(options), false); verify(getFieldMapping("test1").setIndicesOptions(options), false); verify(getMapping("test1").setIndicesOptions(options), false); @@ -231,7 +223,6 @@ public void testSpecifiedIndexUnavailableSingleIndex() throws Exception { verify(forceMerge("test1").setIndicesOptions(options), true); verify(refreshBuilder("test1").setIndicesOptions(options), true); verify(validateQuery("test1").setIndicesOptions(options), true); - verify(aliasExists("test1").setIndicesOptions(options), true); verify(getAliases("test1").setIndicesOptions(options), true); verify(getFieldMapping("test1").setIndicesOptions(options), true); verify(getMapping("test1").setIndicesOptions(options), true); @@ -253,7 +244,6 @@ public void testSpecifiedIndexUnavailableSingleIndex() throws Exception { verify(forceMerge("test1").setIndicesOptions(options), false); verify(refreshBuilder("test1").setIndicesOptions(options), false); verify(validateQuery("test1").setIndicesOptions(options), false); - verify(aliasExists("test1").setIndicesOptions(options), false); verify(getAliases("test1").setIndicesOptions(options), false); verify(getFieldMapping("test1").setIndicesOptions(options), false); verify(getMapping("test1").setIndicesOptions(options), false); @@ -271,7 +261,6 @@ public void testSpecifiedIndexUnavailableSingleIndex() throws Exception { verify(forceMerge("test1").setIndicesOptions(options), false); verify(refreshBuilder("test1").setIndicesOptions(options), false); verify(validateQuery("test1").setIndicesOptions(options), false); - verify(aliasExists("test1").setIndicesOptions(options), false); verify(getAliases("test1").setIndicesOptions(options), false); verify(getFieldMapping("test1").setIndicesOptions(options), false); verify(getMapping("test1").setIndicesOptions(options), false); @@ -324,7 +313,6 @@ public void testWildcardBehaviour() throws Exception { verify(forceMerge(indices), false); verify(refreshBuilder(indices), false); verify(validateQuery(indices), true); - verify(aliasExists(indices), false); verify(getAliases(indices), false); verify(getFieldMapping(indices), false); verify(getMapping(indices), false); @@ -341,7 +329,6 @@ public void testWildcardBehaviour() throws Exception { verify(forceMerge(indices).setIndicesOptions(options), false); verify(refreshBuilder(indices).setIndicesOptions(options), false); verify(validateQuery(indices).setIndicesOptions(options), false); - verify(aliasExists(indices).setIndicesOptions(options), false); verify(getAliases(indices).setIndicesOptions(options), false); verify(getFieldMapping(indices).setIndicesOptions(options), false); verify(getMapping(indices).setIndicesOptions(options), false); @@ -361,7 +348,6 @@ public void testWildcardBehaviour() throws Exception { verify(forceMerge(indices), false); verify(refreshBuilder(indices), false); verify(validateQuery(indices), false); - verify(aliasExists(indices), false); verify(getAliases(indices), false); verify(getFieldMapping(indices), false); verify(getMapping(indices), false); @@ -378,7 +364,6 @@ public void testWildcardBehaviour() throws Exception { verify(forceMerge(indices), false); verify(refreshBuilder(indices), false); verify(validateQuery(indices), true); - verify(aliasExists(indices), false); verify(getAliases(indices), false); verify(getFieldMapping(indices), false); verify(getMapping(indices), false); @@ -395,7 +380,6 @@ public void testWildcardBehaviour() throws Exception { verify(forceMerge(indices).setIndicesOptions(options), false); verify(refreshBuilder(indices).setIndicesOptions(options), false); verify(validateQuery(indices).setIndicesOptions(options), false); - verify(aliasExists(indices).setIndicesOptions(options), false); verify(getAliases(indices).setIndicesOptions(options), false); verify(getFieldMapping(indices).setIndicesOptions(options), false); verify(getMapping(indices).setIndicesOptions(options), false); @@ -557,25 +541,23 @@ public void testDeleteIndexWildcard() throws Exception { public void testPutAlias() throws Exception { createIndex("foobar"); verify(client().admin().indices().prepareAliases().addAlias("foobar", "foobar_alias"), false); - assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("foobar").get().exists(), equalTo(true)); - + assertFalse(client().admin().indices().prepareGetAliases("foobar_alias").setIndices("foobar").get().getAliases().isEmpty()); } public void testPutAliasWildcard() throws Exception { createIndex("foo", "foobar", "bar", "barbaz"); verify(client().admin().indices().prepareAliases().addAlias("foo*", "foobar_alias"), false); - assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("foo").get().exists(), equalTo(true)); - assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("foobar").get().exists(), equalTo(true)); - assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("bar").get().exists(), equalTo(false)); - assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("barbaz").get().exists(), equalTo(false)); + assertFalse(client().admin().indices().prepareGetAliases("foobar_alias").setIndices("foo").get().getAliases().isEmpty()); + assertFalse(client().admin().indices().prepareGetAliases("foobar_alias").setIndices("foobar").get().getAliases().isEmpty()); + assertTrue(client().admin().indices().prepareGetAliases("foobar_alias").setIndices("bar").get().getAliases().isEmpty()); + assertTrue(client().admin().indices().prepareGetAliases("foobar_alias").setIndices("barbaz").get().getAliases().isEmpty()); verify(client().admin().indices().prepareAliases().addAlias("*", "foobar_alias"), false); - assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("foo").get().exists(), equalTo(true)); - assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("foobar").get().exists(), equalTo(true)); - assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("bar").get().exists(), equalTo(true)); - assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("barbaz").get().exists(), equalTo(true)); - + assertFalse(client().admin().indices().prepareGetAliases("foobar_alias").setIndices("foo").get().getAliases().isEmpty()); + assertFalse(client().admin().indices().prepareGetAliases("foobar_alias").setIndices("foobar").get().getAliases().isEmpty()); + assertFalse(client().admin().indices().prepareGetAliases("foobar_alias").setIndices("bar").get().getAliases().isEmpty()); + assertFalse(client().admin().indices().prepareGetAliases("foobar_alias").setIndices("barbaz").get().getAliases().isEmpty()); } public void testPutMapping() throws Exception { @@ -705,10 +687,6 @@ static ValidateQueryRequestBuilder validateQuery(String... indices) { return client().admin().indices().prepareValidateQuery(indices); } - private static AliasesExistRequestBuilder aliasExists(String... indices) { - return client().admin().indices().prepareAliasesExist("dummy").addIndices(indices); - } - static GetAliasesRequestBuilder getAliases(String... indices) { return client().admin().indices().prepareGetAliases("dummy").addIndices(indices); } diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index d5d9660dbcf1e..9d1926f0d5ae5 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -108,8 +108,6 @@ import org.opensearch.action.admin.indices.alias.IndicesAliasesAction; import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; import org.opensearch.action.admin.indices.alias.TransportIndicesAliasesAction; -import org.opensearch.action.admin.indices.alias.exists.AliasesExistAction; -import org.opensearch.action.admin.indices.alias.exists.TransportAliasesExistAction; import org.opensearch.action.admin.indices.alias.get.GetAliasesAction; import org.opensearch.action.admin.indices.alias.get.TransportGetAliasesAction; import org.opensearch.action.admin.indices.analyze.AnalyzeAction; @@ -590,7 +588,6 @@ public void reg actions.register(UpgradeSettingsAction.INSTANCE, TransportUpgradeSettingsAction.class); actions.register(ClearIndicesCacheAction.INSTANCE, TransportClearIndicesCacheAction.class); actions.register(GetAliasesAction.INSTANCE, TransportGetAliasesAction.class); - actions.register(AliasesExistAction.INSTANCE, TransportAliasesExistAction.class); actions.register(GetSettingsAction.INSTANCE, TransportGetSettingsAction.class); actions.register(IndexAction.INSTANCE, TransportIndexAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/exists/AliasesExistAction.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/exists/AliasesExistAction.java deleted file mode 100644 index 964648ab04705..0000000000000 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/exists/AliasesExistAction.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.admin.indices.alias.exists; - -import org.opensearch.action.ActionType; - -public class AliasesExistAction extends ActionType { - - public static final AliasesExistAction INSTANCE = new AliasesExistAction(); - public static final String NAME = "indices:admin/aliases/exists"; - - private AliasesExistAction() { - super(NAME, AliasesExistResponse::new); - } -} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/exists/AliasesExistRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/exists/AliasesExistRequestBuilder.java deleted file mode 100644 index 20949360ddab8..0000000000000 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/exists/AliasesExistRequestBuilder.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.admin.indices.alias.exists; - -import org.opensearch.action.admin.indices.alias.get.BaseAliasesRequestBuilder; -import org.opensearch.client.OpenSearchClient; - -public class AliasesExistRequestBuilder extends BaseAliasesRequestBuilder { - - public AliasesExistRequestBuilder(OpenSearchClient client, AliasesExistAction action, String... aliases) { - super(client, action, aliases); - } -} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/exists/AliasesExistResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/exists/AliasesExistResponse.java deleted file mode 100644 index 447f10e5ab34a..0000000000000 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/exists/AliasesExistResponse.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.admin.indices.alias.exists; - -import org.opensearch.action.ActionResponse; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; - -import java.io.IOException; - -public class AliasesExistResponse extends ActionResponse { - - private boolean exists; - - public AliasesExistResponse(boolean exists) { - this.exists = exists; - } - - AliasesExistResponse(StreamInput in) throws IOException { - super(in); - exists = in.readBoolean(); - } - - public boolean exists() { - return exists; - } - - public boolean isExists() { - return exists(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeBoolean(exists); - } -} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java deleted file mode 100644 index 8a86a27561527..0000000000000 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.admin.indices.alias.exists; - -import org.opensearch.action.ActionListener; -import org.opensearch.action.admin.indices.alias.get.GetAliasesRequest; -import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeReadAction; -import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.block.ClusterBlockException; -import org.opensearch.cluster.block.ClusterBlockLevel; -import org.opensearch.cluster.metadata.IndexNameExpressionResolver; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.inject.Inject; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.TransportService; - -import java.io.IOException; - -public class TransportAliasesExistAction extends TransportMasterNodeReadAction { - - @Inject - public TransportAliasesExistAction( - TransportService transportService, - ClusterService clusterService, - ThreadPool threadPool, - ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver - ) { - super( - AliasesExistAction.NAME, - transportService, - clusterService, - threadPool, - actionFilters, - GetAliasesRequest::new, - indexNameExpressionResolver - ); - } - - @Override - protected String executor() { - // very lightweight operation, no need to fork - return ThreadPool.Names.SAME; - } - - @Override - protected AliasesExistResponse read(StreamInput in) throws IOException { - return new AliasesExistResponse(in); - } - - @Override - protected ClusterBlockException checkBlock(GetAliasesRequest request, ClusterState state) { - return state.blocks() - .indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request)); - } - - @Override - protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener listener) { - String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request); - boolean result = state.metadata().hasAliases(request.aliases(), concreteIndices); - listener.onResponse(new AliasesExistResponse(result)); - } - -} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/exists/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/exists/package-info.java deleted file mode 100644 index dd4886e6b3419..0000000000000 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/exists/package-info.java +++ /dev/null @@ -1,10 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** Alias Exists transport handler. */ -package org.opensearch.action.admin.indices.alias.exists; diff --git a/server/src/main/java/org/opensearch/client/IndicesAdminClient.java b/server/src/main/java/org/opensearch/client/IndicesAdminClient.java index 35f880dbff5e3..50430e3eb8507 100644 --- a/server/src/main/java/org/opensearch/client/IndicesAdminClient.java +++ b/server/src/main/java/org/opensearch/client/IndicesAdminClient.java @@ -36,8 +36,6 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; import org.opensearch.action.admin.indices.alias.IndicesAliasesRequestBuilder; -import org.opensearch.action.admin.indices.alias.exists.AliasesExistRequestBuilder; -import org.opensearch.action.admin.indices.alias.exists.AliasesExistResponse; import org.opensearch.action.admin.indices.alias.get.GetAliasesRequest; import org.opensearch.action.admin.indices.alias.get.GetAliasesRequestBuilder; import org.opensearch.action.admin.indices.alias.get.GetAliasesResponse; @@ -564,26 +562,6 @@ public interface IndicesAdminClient extends OpenSearchClient { */ GetAliasesRequestBuilder prepareGetAliases(String... aliases); - /** - * Allows to check to existence of aliases from indices. - */ - AliasesExistRequestBuilder prepareAliasesExist(String... aliases); - - /** - * Check to existence of index aliases. - * - * @param request The result future - */ - ActionFuture aliasesExist(GetAliasesRequest request); - - /** - * Check the existence of specified index aliases. - * - * @param request The index aliases request - * @param listener A listener to be notified with a result - */ - void aliasesExist(GetAliasesRequest request, ActionListener listener); - /** * Get index metadata for particular indices. * diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index 96380904304b5..4c8f5935e3680 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -154,9 +154,6 @@ import org.opensearch.action.admin.indices.alias.IndicesAliasesAction; import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; import org.opensearch.action.admin.indices.alias.IndicesAliasesRequestBuilder; -import org.opensearch.action.admin.indices.alias.exists.AliasesExistAction; -import org.opensearch.action.admin.indices.alias.exists.AliasesExistRequestBuilder; -import org.opensearch.action.admin.indices.alias.exists.AliasesExistResponse; import org.opensearch.action.admin.indices.alias.get.GetAliasesAction; import org.opensearch.action.admin.indices.alias.get.GetAliasesRequest; import org.opensearch.action.admin.indices.alias.get.GetAliasesRequestBuilder; @@ -1363,21 +1360,6 @@ public ActionFuture clearCache(final ClearIndicesCach return execute(ClearIndicesCacheAction.INSTANCE, request); } - @Override - public void aliasesExist(GetAliasesRequest request, ActionListener listener) { - execute(AliasesExistAction.INSTANCE, request, listener); - } - - @Override - public ActionFuture aliasesExist(GetAliasesRequest request) { - return execute(AliasesExistAction.INSTANCE, request); - } - - @Override - public AliasesExistRequestBuilder prepareAliasesExist(String... aliases) { - return new AliasesExistRequestBuilder(this, AliasesExistAction.INSTANCE, aliases); - } - @Override public ActionFuture getIndex(GetIndexRequest request) { return execute(GetIndexAction.INSTANCE, request); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index 6e9c30877f9c2..7cf3700402b6b 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -421,38 +421,6 @@ private ImmutableOpenMap> findAliases(final String[] return mapBuilder.build(); } - /** - * Checks if at least one of the specified aliases exists in the specified concrete indices. Wildcards are supported in the - * alias names for partial matches. - * - * @param aliases The names of the index aliases to find - * @param concreteIndices The concrete indexes the index aliases must point to order to be returned. - * @return whether at least one of the specified aliases exists in one of the specified concrete indices. - */ - public boolean hasAliases(final String[] aliases, String[] concreteIndices) { - assert aliases != null; - assert concreteIndices != null; - if (concreteIndices.length == 0) { - return false; - } - - Iterable intersection = HppcMaps.intersection(ObjectHashSet.from(concreteIndices), indices.keys()); - for (String index : intersection) { - IndexMetadata indexMetadata = indices.get(index); - List filteredValues = new ArrayList<>(); - for (ObjectCursor cursor : indexMetadata.getAliases().values()) { - AliasMetadata value = cursor.value; - if (Regex.simpleMatch(aliases, value.alias())) { - filteredValues.add(value); - } - } - if (!filteredValues.isEmpty()) { - return true; - } - } - return false; - } - /** * Finds all mappings for concrete indices. Only fields that match the provided field * filter will be returned (default is a predicate that always returns true, which can be diff --git a/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java b/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java index 28afed1a50e59..16d44d1f8eeb4 100644 --- a/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java +++ b/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java @@ -40,7 +40,6 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.action.admin.cluster.health.ClusterHealthRequestBuilder; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; -import org.opensearch.action.admin.indices.alias.exists.AliasesExistResponse; import org.opensearch.action.admin.indices.create.CreateIndexResponse; import org.opensearch.action.admin.indices.delete.DeleteIndexRequestBuilder; import org.opensearch.action.admin.indices.template.get.GetIndexTemplatesResponse; @@ -104,7 +103,6 @@ import static org.hamcrest.Matchers.notNullValue; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -518,20 +516,6 @@ public static void assertIndexTemplateExists(GetIndexTemplatesResponse templates assertThat(templateNames, hasItem(name)); } - /** - * Assert that aliases are missing - */ - public static void assertAliasesMissing(AliasesExistResponse aliasesExistResponse) { - assertFalse("Aliases shouldn't exist", aliasesExistResponse.exists()); - } - - /** - * Assert that aliases exist - */ - public static void assertAliasesExist(AliasesExistResponse aliasesExistResponse) { - assertTrue("Aliases should exist", aliasesExistResponse.exists()); - } - /* * matchers */ From 1b2f154a924c09634cbf5d081a909796ce552c1c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 May 2022 10:09:55 -0700 Subject: [PATCH 146/653] Bump gax-httpjson from 0.62.0 to 0.101.0 in /plugins/repository-gcs (#3058) * Bump gax-httpjson from 0.62.0 to 0.101.0 in /plugins/repository-gcs Bumps [gax-httpjson](https://github.com/googleapis/gax-java) from 0.62.0 to 0.101.0. - [Release notes](https://github.com/googleapis/gax-java/releases) - [Changelog](https://github.com/googleapis/gax-java/blob/main/CHANGELOG.md) - [Commits](https://github.com/googleapis/gax-java/commits) --- updated-dependencies: - dependency-name: com.google.api:gax-httpjson dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Added ignoreMissingClasses configuration for gax classes Signed-off-by: Kartik Ganesh Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Kartik Ganesh --- plugins/repository-gcs/build.gradle | 6 +++++- .../repository-gcs/licenses/gax-httpjson-0.101.0.jar.sha1 | 1 + .../repository-gcs/licenses/gax-httpjson-0.62.0.jar.sha1 | 1 - 3 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 plugins/repository-gcs/licenses/gax-httpjson-0.101.0.jar.sha1 delete mode 100644 plugins/repository-gcs/licenses/gax-httpjson-0.62.0.jar.sha1 diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 241cd70eba071..aa901758889fc 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -74,7 +74,7 @@ dependencies { api 'com.google.http-client:google-http-client-appengine:1.35.0' api 'com.google.http-client:google-http-client-jackson2:1.35.0' api 'com.google.http-client:google-http-client-gson:1.41.4' - api 'com.google.api:gax-httpjson:0.62.0' + api 'com.google.api:gax-httpjson:0.101.0' api 'io.grpc:grpc-context:1.45.1' api 'io.opencensus:opencensus-api:0.18.0' api 'io.opencensus:opencensus-contrib-http-util:0.18.0' @@ -145,6 +145,10 @@ thirdPartyAudit { 'com.google.appengine.api.urlfetch.HTTPResponse', 'com.google.appengine.api.urlfetch.URLFetchService', 'com.google.appengine.api.urlfetch.URLFetchServiceFactory', + 'com.google.api.gax.rpc.ApiCallContext$Key', + 'com.google.api.gax.rpc.internal.ApiCallContextOptions', + 'com.google.api.gax.rpc.mtls.MtlsProvider', + 'com.google.api.gax.tracing.BaseApiTracer', // commons-logging optional dependencies 'org.apache.avalon.framework.logger.Logger', 'org.apache.log.Hierarchy', diff --git a/plugins/repository-gcs/licenses/gax-httpjson-0.101.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-httpjson-0.101.0.jar.sha1 new file mode 100644 index 0000000000000..f722ccbd86c54 --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-httpjson-0.101.0.jar.sha1 @@ -0,0 +1 @@ +e056920e5df4086270e6c3d2e3a16d8a7585fd13 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-httpjson-0.62.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-httpjson-0.62.0.jar.sha1 deleted file mode 100644 index 161ca85ccfc0c..0000000000000 --- a/plugins/repository-gcs/licenses/gax-httpjson-0.62.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -05a1a4736acd1c4f30304be953532be6aecdc2c9 \ No newline at end of file From c13b679aad33f1f4b622c926cc468b6bb3fff7ef Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Tue, 3 May 2022 14:44:59 -0500 Subject: [PATCH 147/653] [Javadocs] add to o.o.bootstrap, cli, and client (#3163) Adds javadocs to o.o.bootstrap, o.o.cli, and o.o.client packages. Signed-off-by: Nicholas Walter Knize --- build.gradle | 9 +++++---- .../main/java/org/opensearch/bootstrap/Bootstrap.java | 2 ++ .../java/org/opensearch/bootstrap/BootstrapCheck.java | 2 ++ .../java/org/opensearch/bootstrap/BootstrapChecks.java | 2 ++ .../java/org/opensearch/bootstrap/BootstrapContext.java | 2 ++ .../org/opensearch/bootstrap/BootstrapException.java | 2 ++ .../java/org/opensearch/bootstrap/BootstrapInfo.java | 2 ++ .../java/org/opensearch/bootstrap/BootstrapSettings.java | 5 +++++ .../org/opensearch/bootstrap/ConsoleCtrlHandler.java | 5 +++++ .../org/opensearch/bootstrap/FilePermissionUtils.java | 5 +++++ .../main/java/org/opensearch/bootstrap/JNACLibrary.java | 2 ++ .../org/opensearch/bootstrap/JNAKernel32Library.java | 2 ++ .../main/java/org/opensearch/bootstrap/JNANatives.java | 2 ++ .../src/main/java/org/opensearch/bootstrap/Natives.java | 2 ++ .../main/java/org/opensearch/bootstrap/OpenSearch.java | 2 ++ .../java/org/opensearch/bootstrap/OpenSearchPolicy.java | 6 +++++- .../bootstrap/OpenSearchUncaughtExceptionHandler.java | 5 +++++ .../src/main/java/org/opensearch/bootstrap/Security.java | 2 ++ .../src/main/java/org/opensearch/bootstrap/Spawner.java | 2 ++ .../java/org/opensearch/bootstrap/StartupException.java | 2 ++ .../java/org/opensearch/bootstrap/SystemCallFilter.java | 2 ++ .../org/opensearch/cli/CommandLoggingConfigurator.java | 2 ++ .../java/org/opensearch/cli/EnvironmentAwareCommand.java | 6 +++++- .../java/org/opensearch/cli/KeyStoreAwareCommand.java | 2 ++ .../java/org/opensearch/cli/LoggingAwareCommand.java | 2 ++ .../org/opensearch/cli/LoggingAwareMultiCommand.java | 2 ++ .../src/main/java/org/opensearch/client/AdminClient.java | 3 ++- server/src/main/java/org/opensearch/client/Client.java | 2 ++ .../java/org/opensearch/client/ClusterAdminClient.java | 2 ++ .../main/java/org/opensearch/client/FilterClient.java | 2 ++ .../java/org/opensearch/client/IndicesAdminClient.java | 2 ++ .../java/org/opensearch/client/OpenSearchClient.java | 5 +++++ .../java/org/opensearch/client/OriginSettingClient.java | 2 ++ .../org/opensearch/client/ParentTaskAssigningClient.java | 2 ++ server/src/main/java/org/opensearch/client/Requests.java | 2 ++ .../main/java/org/opensearch/client/node/NodeClient.java | 2 ++ .../org/opensearch/client/support/AbstractClient.java | 5 +++++ .../client/transport/NoNodeAvailableException.java | 2 ++ 38 files changed, 103 insertions(+), 7 deletions(-) diff --git a/build.gradle b/build.gradle index 6209aeff64492..17d4bc864ca05 100644 --- a/build.gradle +++ b/build.gradle @@ -278,6 +278,7 @@ allprojects { // see https://discuss.gradle.org/t/add-custom-javadoc-option-that-does-not-take-an-argument/5959 javadoc.options.encoding = 'UTF8' javadoc.options.addStringOption('Xdoclint:all,-missing', '-quiet') + javadoc.options.tags = ["opensearch.internal", "opensearch.api", "opensearch.experimental"] } // support for reproducible builds @@ -387,7 +388,7 @@ gradle.projectsEvaluated { } } } - + dependencies { subprojects.findAll { it.pluginManager.hasPlugin('java') }.forEach { testReportAggregation it @@ -413,7 +414,7 @@ subprojects { // eclipse configuration allprojects { apply plugin: 'eclipse' - + // Name all the non-root projects after their path so that paths get grouped together when imported into eclipse. if (path != ':') { eclipse.project.name = path @@ -573,12 +574,12 @@ subprojects { reporting { reports { - testAggregateTestReport(AggregateTestReport) { + testAggregateTestReport(AggregateTestReport) { testType = TestSuiteType.UNIT_TEST } } } tasks.named(JavaBasePlugin.CHECK_TASK_NAME) { - dependsOn tasks.named('testAggregateTestReport', TestReport) + dependsOn tasks.named('testAggregateTestReport', TestReport) } diff --git a/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java b/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java index c0c0251538d01..8c4a550a9b5f5 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java +++ b/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java @@ -82,6 +82,8 @@ /** * Internal startup code. + * + * @opensearch.internal */ final class Bootstrap { diff --git a/server/src/main/java/org/opensearch/bootstrap/BootstrapCheck.java b/server/src/main/java/org/opensearch/bootstrap/BootstrapCheck.java index 230b1bd05e579..429612ba1b93d 100644 --- a/server/src/main/java/org/opensearch/bootstrap/BootstrapCheck.java +++ b/server/src/main/java/org/opensearch/bootstrap/BootstrapCheck.java @@ -36,6 +36,8 @@ /** * Encapsulates a bootstrap check. + * + * @opensearch.internal */ public interface BootstrapCheck { diff --git a/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java index 6b75f2306431c..7953dee644ea4 100644 --- a/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java +++ b/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java @@ -71,6 +71,8 @@ * We enforce bootstrap checks once a node has the transport protocol bound to a non-loopback interface or if the system property {@code * opensearch.enforce.bootstrap.checks} is set to {@true}. In this case we assume the node is running in production and * all bootstrap checks must pass. + * + * @opensearch.internal */ final class BootstrapChecks { diff --git a/server/src/main/java/org/opensearch/bootstrap/BootstrapContext.java b/server/src/main/java/org/opensearch/bootstrap/BootstrapContext.java index b07a544e8521e..1cfd8bf6dfc35 100644 --- a/server/src/main/java/org/opensearch/bootstrap/BootstrapContext.java +++ b/server/src/main/java/org/opensearch/bootstrap/BootstrapContext.java @@ -37,6 +37,8 @@ /** * Context that is passed to every bootstrap check to make decisions on. + * + * @opensearch.internal */ public class BootstrapContext { /** diff --git a/server/src/main/java/org/opensearch/bootstrap/BootstrapException.java b/server/src/main/java/org/opensearch/bootstrap/BootstrapException.java index 8daa29aa3578b..77b66553cb46d 100644 --- a/server/src/main/java/org/opensearch/bootstrap/BootstrapException.java +++ b/server/src/main/java/org/opensearch/bootstrap/BootstrapException.java @@ -41,6 +41,8 @@ * these checked exceptions so that * {@link Bootstrap#init(boolean, Path, boolean, org.opensearch.env.Environment)} * does not have to declare all of these checked exceptions. + * + * @opensearch.internal */ class BootstrapException extends Exception { diff --git a/server/src/main/java/org/opensearch/bootstrap/BootstrapInfo.java b/server/src/main/java/org/opensearch/bootstrap/BootstrapInfo.java index d45d8ddab9c2c..0aa965ce46096 100644 --- a/server/src/main/java/org/opensearch/bootstrap/BootstrapInfo.java +++ b/server/src/main/java/org/opensearch/bootstrap/BootstrapInfo.java @@ -39,6 +39,8 @@ /** * Exposes system startup information + * + * @opensearch.internal */ @SuppressForbidden(reason = "exposes read-only view of system properties") public final class BootstrapInfo { diff --git a/server/src/main/java/org/opensearch/bootstrap/BootstrapSettings.java b/server/src/main/java/org/opensearch/bootstrap/BootstrapSettings.java index 208030b5d6d12..911bc92c433f1 100644 --- a/server/src/main/java/org/opensearch/bootstrap/BootstrapSettings.java +++ b/server/src/main/java/org/opensearch/bootstrap/BootstrapSettings.java @@ -35,6 +35,11 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; +/** + * Settings used for bootstrapping OpenSearch + * + * @opensearch.internal + */ public final class BootstrapSettings { private BootstrapSettings() {} diff --git a/server/src/main/java/org/opensearch/bootstrap/ConsoleCtrlHandler.java b/server/src/main/java/org/opensearch/bootstrap/ConsoleCtrlHandler.java index cbc1b6e761923..ae9b6ea7ce761 100644 --- a/server/src/main/java/org/opensearch/bootstrap/ConsoleCtrlHandler.java +++ b/server/src/main/java/org/opensearch/bootstrap/ConsoleCtrlHandler.java @@ -32,6 +32,11 @@ package org.opensearch.bootstrap; +/** + * Handler for ctrl events on the console + * + * @opensearch.internal + */ public interface ConsoleCtrlHandler { int CTRL_CLOSE_EVENT = 2; diff --git a/server/src/main/java/org/opensearch/bootstrap/FilePermissionUtils.java b/server/src/main/java/org/opensearch/bootstrap/FilePermissionUtils.java index b37aef5f9738d..18ff013b1e8cb 100644 --- a/server/src/main/java/org/opensearch/bootstrap/FilePermissionUtils.java +++ b/server/src/main/java/org/opensearch/bootstrap/FilePermissionUtils.java @@ -40,6 +40,11 @@ import java.nio.file.Path; import java.security.Permissions; +/** + * Utility for File Permissions during bootstrap + * + * @opensearch.internal + */ public class FilePermissionUtils { /** no instantiation */ diff --git a/server/src/main/java/org/opensearch/bootstrap/JNACLibrary.java b/server/src/main/java/org/opensearch/bootstrap/JNACLibrary.java index 7d3ce7728a9dc..aca2cb9656025 100644 --- a/server/src/main/java/org/opensearch/bootstrap/JNACLibrary.java +++ b/server/src/main/java/org/opensearch/bootstrap/JNACLibrary.java @@ -45,6 +45,8 @@ /** * java mapping to some libc functions + * + * @opensearch.internal */ final class JNACLibrary { diff --git a/server/src/main/java/org/opensearch/bootstrap/JNAKernel32Library.java b/server/src/main/java/org/opensearch/bootstrap/JNAKernel32Library.java index a0cd656f5fc01..8e556df4b2f9b 100644 --- a/server/src/main/java/org/opensearch/bootstrap/JNAKernel32Library.java +++ b/server/src/main/java/org/opensearch/bootstrap/JNAKernel32Library.java @@ -51,6 +51,8 @@ /** * Library for Windows/Kernel32 + * + * @opensearch.internal */ final class JNAKernel32Library { diff --git a/server/src/main/java/org/opensearch/bootstrap/JNANatives.java b/server/src/main/java/org/opensearch/bootstrap/JNANatives.java index 1e3c6c3af0386..033596033b0fd 100644 --- a/server/src/main/java/org/opensearch/bootstrap/JNANatives.java +++ b/server/src/main/java/org/opensearch/bootstrap/JNANatives.java @@ -48,6 +48,8 @@ /** * This class performs the actual work with JNA and library bindings to call native methods. It should only be used after * we are sure that the JNA classes are available to the JVM + * + * @opensearch.internal */ class JNANatives { diff --git a/server/src/main/java/org/opensearch/bootstrap/Natives.java b/server/src/main/java/org/opensearch/bootstrap/Natives.java index 65025e41958ce..aa5e29fbb2591 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Natives.java +++ b/server/src/main/java/org/opensearch/bootstrap/Natives.java @@ -40,6 +40,8 @@ /** * The Natives class is a wrapper class that checks if the classes necessary for calling native methods are available on * startup. If they are not available, this class will avoid calling code that loads these classes. + * + * @opensearch.internal */ final class Natives { /** no instantiation */ diff --git a/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java b/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java index 7f96ea425d17e..ab52ae5a43a2a 100644 --- a/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java +++ b/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java @@ -55,6 +55,8 @@ /** * This class starts opensearch. + * + * @opensearch.internal */ class OpenSearch extends EnvironmentAwareCommand { diff --git a/server/src/main/java/org/opensearch/bootstrap/OpenSearchPolicy.java b/server/src/main/java/org/opensearch/bootstrap/OpenSearchPolicy.java index d31f11f153ae8..14435db64274c 100644 --- a/server/src/main/java/org/opensearch/bootstrap/OpenSearchPolicy.java +++ b/server/src/main/java/org/opensearch/bootstrap/OpenSearchPolicy.java @@ -48,7 +48,11 @@ import java.util.Map; import java.util.function.Predicate; -/** custom policy for union of static and dynamic permissions */ +/** + * custom policy for union of static and dynamic permissions + * + * @opensearch.internal + **/ final class OpenSearchPolicy extends Policy { /** template policy file, the one used in tests */ diff --git a/server/src/main/java/org/opensearch/bootstrap/OpenSearchUncaughtExceptionHandler.java b/server/src/main/java/org/opensearch/bootstrap/OpenSearchUncaughtExceptionHandler.java index 4834f4cb211fe..2b28260097ce1 100644 --- a/server/src/main/java/org/opensearch/bootstrap/OpenSearchUncaughtExceptionHandler.java +++ b/server/src/main/java/org/opensearch/bootstrap/OpenSearchUncaughtExceptionHandler.java @@ -41,6 +41,11 @@ import java.security.AccessController; import java.security.PrivilegedAction; +/** + * UncaughtException Handler used during bootstrapping + * + * @opensearch.internal + */ class OpenSearchUncaughtExceptionHandler implements Thread.UncaughtExceptionHandler { private static final Logger logger = LogManager.getLogger(OpenSearchUncaughtExceptionHandler.class); diff --git a/server/src/main/java/org/opensearch/bootstrap/Security.java b/server/src/main/java/org/opensearch/bootstrap/Security.java index 59ca91a68e025..749c146de4f16 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Security.java +++ b/server/src/main/java/org/opensearch/bootstrap/Security.java @@ -116,6 +116,8 @@ * * See * Troubleshooting Security for information. + * + * @opensearch.internal */ final class Security { /** no instantiation */ diff --git a/server/src/main/java/org/opensearch/bootstrap/Spawner.java b/server/src/main/java/org/opensearch/bootstrap/Spawner.java index e2fae1f196412..e2df60af6c1d1 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Spawner.java +++ b/server/src/main/java/org/opensearch/bootstrap/Spawner.java @@ -51,6 +51,8 @@ /** * Spawns native module controller processes if present. Will only work prior to a system call filter being installed. + * + * @opensearch.internal */ final class Spawner implements Closeable { diff --git a/server/src/main/java/org/opensearch/bootstrap/StartupException.java b/server/src/main/java/org/opensearch/bootstrap/StartupException.java index 735e8cfd287af..9ad89ab518006 100644 --- a/server/src/main/java/org/opensearch/bootstrap/StartupException.java +++ b/server/src/main/java/org/opensearch/bootstrap/StartupException.java @@ -44,6 +44,8 @@ * "reasonably". This means limits on stacktrace frames and * cleanup for guice, and some guidance about consulting full * logs for the whole exception. + * + * @opensearch.internal */ // TODO: remove this when guice is removed, and exceptions are cleaned up // this is horrible, but its what we must do diff --git a/server/src/main/java/org/opensearch/bootstrap/SystemCallFilter.java b/server/src/main/java/org/opensearch/bootstrap/SystemCallFilter.java index 8e179de9c28df..f8baee06c4315 100644 --- a/server/src/main/java/org/opensearch/bootstrap/SystemCallFilter.java +++ b/server/src/main/java/org/opensearch/bootstrap/SystemCallFilter.java @@ -102,6 +102,8 @@ * https://reverse.put.as/wp-content/uploads/2011/06/The-Apple-Sandbox-BHDC2011-Paper.pdf * @see * https://docs.oracle.com/cd/E23824_01/html/821-1456/prbac-2.html + * + * @opensearch.internal */ // not an example of how to write code!!! final class SystemCallFilter { diff --git a/server/src/main/java/org/opensearch/cli/CommandLoggingConfigurator.java b/server/src/main/java/org/opensearch/cli/CommandLoggingConfigurator.java index 1705e9ebdd4b9..8918725472160 100644 --- a/server/src/main/java/org/opensearch/cli/CommandLoggingConfigurator.java +++ b/server/src/main/java/org/opensearch/cli/CommandLoggingConfigurator.java @@ -39,6 +39,8 @@ /** * Holder class for method to configure logging without OpenSearch configuration files for use in CLI tools that will not read such * files. + * + * @opensearch.internal */ public final class CommandLoggingConfigurator { diff --git a/server/src/main/java/org/opensearch/cli/EnvironmentAwareCommand.java b/server/src/main/java/org/opensearch/cli/EnvironmentAwareCommand.java index e85af87213fd0..10c59ef673050 100644 --- a/server/src/main/java/org/opensearch/cli/EnvironmentAwareCommand.java +++ b/server/src/main/java/org/opensearch/cli/EnvironmentAwareCommand.java @@ -46,7 +46,11 @@ import java.util.Locale; import java.util.Map; -/** A cli command which requires an {@link org.opensearch.env.Environment} to use current paths and settings. */ +/** + * A cli command which requires an {@link org.opensearch.env.Environment} to use current paths and settings. + * + * @opensearch.internal + */ public abstract class EnvironmentAwareCommand extends Command { private final OptionSpec settingOption; diff --git a/server/src/main/java/org/opensearch/cli/KeyStoreAwareCommand.java b/server/src/main/java/org/opensearch/cli/KeyStoreAwareCommand.java index 404dacd504e8a..17067909315b2 100644 --- a/server/src/main/java/org/opensearch/cli/KeyStoreAwareCommand.java +++ b/server/src/main/java/org/opensearch/cli/KeyStoreAwareCommand.java @@ -44,6 +44,8 @@ /** * An {@link org.opensearch.cli.EnvironmentAwareCommand} that needs to access the opensearch keystore, possibly * decrypting it if it is password protected. + * + * @opensearch.internal */ public abstract class KeyStoreAwareCommand extends EnvironmentAwareCommand { public KeyStoreAwareCommand(String description) { diff --git a/server/src/main/java/org/opensearch/cli/LoggingAwareCommand.java b/server/src/main/java/org/opensearch/cli/LoggingAwareCommand.java index 7aee5e3fa1f76..07a6b7d523a33 100644 --- a/server/src/main/java/org/opensearch/cli/LoggingAwareCommand.java +++ b/server/src/main/java/org/opensearch/cli/LoggingAwareCommand.java @@ -35,6 +35,8 @@ /** * A command that is aware of logging. This class should be preferred over the base {@link Command} class for any CLI tools that depend on * core OpenSearch as they could directly or indirectly touch classes that touch logging and as such logging needs to be configured. + * + * @opensearch.internal */ public abstract class LoggingAwareCommand extends Command { diff --git a/server/src/main/java/org/opensearch/cli/LoggingAwareMultiCommand.java b/server/src/main/java/org/opensearch/cli/LoggingAwareMultiCommand.java index 2354a4978aec9..8453ed05a12d3 100644 --- a/server/src/main/java/org/opensearch/cli/LoggingAwareMultiCommand.java +++ b/server/src/main/java/org/opensearch/cli/LoggingAwareMultiCommand.java @@ -36,6 +36,8 @@ * A multi-command that is aware of logging. This class should be preferred over the base {@link MultiCommand} class for any CLI tools that * depend on core OpenSearch as they could directly or indirectly touch classes that touch logging and as such logging needs to be * configured. + * + * @opensearch.internal */ public abstract class LoggingAwareMultiCommand extends MultiCommand { diff --git a/server/src/main/java/org/opensearch/client/AdminClient.java b/server/src/main/java/org/opensearch/client/AdminClient.java index 995466ba5746c..0c6c97b795983 100644 --- a/server/src/main/java/org/opensearch/client/AdminClient.java +++ b/server/src/main/java/org/opensearch/client/AdminClient.java @@ -35,8 +35,9 @@ /** * Administrative actions/operations against the cluster or the indices. * - * * @see org.opensearch.client.Client#admin() + * + * @opensearch.internal */ public interface AdminClient { diff --git a/server/src/main/java/org/opensearch/client/Client.java b/server/src/main/java/org/opensearch/client/Client.java index bca68834ca3cf..50f8f52253815 100644 --- a/server/src/main/java/org/opensearch/client/Client.java +++ b/server/src/main/java/org/opensearch/client/Client.java @@ -93,6 +93,8 @@ * A client can be retrieved from a started {@link org.opensearch.node.Node}. * * @see org.opensearch.node.Node#client() + * + * @opensearch.internal */ public interface Client extends OpenSearchClient, Releasable { diff --git a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java index 1088b85ae271f..f4eaa979ff18c 100644 --- a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java +++ b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java @@ -139,6 +139,8 @@ * Administrative actions/operations against indices. * * @see AdminClient#cluster() + * + * @opensearch.internal */ public interface ClusterAdminClient extends OpenSearchClient { diff --git a/server/src/main/java/org/opensearch/client/FilterClient.java b/server/src/main/java/org/opensearch/client/FilterClient.java index 4d7bf2ed30b00..d121fdd77dfeb 100644 --- a/server/src/main/java/org/opensearch/client/FilterClient.java +++ b/server/src/main/java/org/opensearch/client/FilterClient.java @@ -43,6 +43,8 @@ * A {@link Client} that contains another {@link Client} which it * uses as its basic source, possibly transforming the requests / responses along the * way or providing additional functionality. + * + * @opensearch.internal */ public abstract class FilterClient extends AbstractClient { diff --git a/server/src/main/java/org/opensearch/client/IndicesAdminClient.java b/server/src/main/java/org/opensearch/client/IndicesAdminClient.java index 50430e3eb8507..c9cd0d0900b5a 100644 --- a/server/src/main/java/org/opensearch/client/IndicesAdminClient.java +++ b/server/src/main/java/org/opensearch/client/IndicesAdminClient.java @@ -132,6 +132,8 @@ * Administrative actions/operations against indices. * * @see AdminClient#indices() + * + * @opensearch.internal */ public interface IndicesAdminClient extends OpenSearchClient { diff --git a/server/src/main/java/org/opensearch/client/OpenSearchClient.java b/server/src/main/java/org/opensearch/client/OpenSearchClient.java index 9b092c98aa5f5..755aca1f573e0 100644 --- a/server/src/main/java/org/opensearch/client/OpenSearchClient.java +++ b/server/src/main/java/org/opensearch/client/OpenSearchClient.java @@ -39,6 +39,11 @@ import org.opensearch.action.ActionResponse; import org.opensearch.threadpool.ThreadPool; +/** + * Interface for an OpenSearch client implementation + * + * @opensearch.internal + */ public interface OpenSearchClient { /** diff --git a/server/src/main/java/org/opensearch/client/OriginSettingClient.java b/server/src/main/java/org/opensearch/client/OriginSettingClient.java index acb6352b06185..41fe9741cc4e7 100644 --- a/server/src/main/java/org/opensearch/client/OriginSettingClient.java +++ b/server/src/main/java/org/opensearch/client/OriginSettingClient.java @@ -46,6 +46,8 @@ * {@link ThreadContext#stashWithOrigin origin} set to a particular * value and calls its {@linkplain ActionListener} in its original * {@link ThreadContext}. + * + * @opensearch.internal */ public final class OriginSettingClient extends FilterClient { diff --git a/server/src/main/java/org/opensearch/client/ParentTaskAssigningClient.java b/server/src/main/java/org/opensearch/client/ParentTaskAssigningClient.java index 444f0ea778b05..58ea99fa96bc3 100644 --- a/server/src/main/java/org/opensearch/client/ParentTaskAssigningClient.java +++ b/server/src/main/java/org/opensearch/client/ParentTaskAssigningClient.java @@ -43,6 +43,8 @@ /** * A {@linkplain Client} that sets the parent task on all requests that it makes. Use this to conveniently implement actions that cause * many other actions. + * + * @opensearch.internal */ public class ParentTaskAssigningClient extends FilterClient { private final TaskId parentTask; diff --git a/server/src/main/java/org/opensearch/client/Requests.java b/server/src/main/java/org/opensearch/client/Requests.java index d89f55a37a9cf..b04de7830a780 100644 --- a/server/src/main/java/org/opensearch/client/Requests.java +++ b/server/src/main/java/org/opensearch/client/Requests.java @@ -79,6 +79,8 @@ /** * A handy one stop shop for creating requests (make sure to import static this class). + * + * @opensearch.internal */ public class Requests { diff --git a/server/src/main/java/org/opensearch/client/node/NodeClient.java b/server/src/main/java/org/opensearch/client/node/NodeClient.java index bda7cdca91015..56cb7c406744a 100644 --- a/server/src/main/java/org/opensearch/client/node/NodeClient.java +++ b/server/src/main/java/org/opensearch/client/node/NodeClient.java @@ -52,6 +52,8 @@ /** * Client that executes actions on the local node. + * + * @opensearch.internal */ public class NodeClient extends AbstractClient { diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index 4c8f5935e3680..4fdf4b1166bd6 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -366,6 +366,11 @@ import java.util.Map; +/** + * Base client used to create concrete client implementations + * + * @opensearch.internal + */ public abstract class AbstractClient implements Client { protected final Logger logger; diff --git a/server/src/main/java/org/opensearch/client/transport/NoNodeAvailableException.java b/server/src/main/java/org/opensearch/client/transport/NoNodeAvailableException.java index a4ffe2102037e..e3424689c5152 100644 --- a/server/src/main/java/org/opensearch/client/transport/NoNodeAvailableException.java +++ b/server/src/main/java/org/opensearch/client/transport/NoNodeAvailableException.java @@ -40,6 +40,8 @@ /** * An exception indicating no node is available to perform the operation. + * + * @opensearch.internal */ public class NoNodeAvailableException extends OpenSearchException { From ad1c8038b01d6d82e5393d73bcbf28a43bb97bc2 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Tue, 3 May 2022 22:16:48 -0500 Subject: [PATCH 148/653] [Javadocs] add to o.o.action.admin (#3155) Adds javadocs to classes in package o.o.action.admin. Signed-off-by: Nicholas Walter Knize --- gradle/missing-javadoc.gradle | 2 +- .../cluster/allocation/ClusterAllocationExplainAction.java | 2 ++ .../allocation/ClusterAllocationExplainRequest.java | 2 ++ .../allocation/ClusterAllocationExplainRequestBuilder.java | 2 ++ .../allocation/ClusterAllocationExplainResponse.java | 2 ++ .../cluster/allocation/ClusterAllocationExplanation.java | 2 ++ .../TransportClusterAllocationExplainAction.java | 2 ++ .../configuration/AddVotingConfigExclusionsAction.java | 7 ++++++- .../configuration/AddVotingConfigExclusionsRequest.java | 2 ++ .../configuration/AddVotingConfigExclusionsResponse.java | 2 ++ .../configuration/ClearVotingConfigExclusionsAction.java | 5 +++++ .../configuration/ClearVotingConfigExclusionsRequest.java | 2 ++ .../configuration/ClearVotingConfigExclusionsResponse.java | 2 ++ .../TransportAddVotingConfigExclusionsAction.java | 5 +++++ .../TransportClearVotingConfigExclusionsAction.java | 5 +++++ .../action/admin/cluster/health/ClusterHealthAction.java | 5 +++++ .../action/admin/cluster/health/ClusterHealthRequest.java | 5 +++++ .../admin/cluster/health/ClusterHealthRequestBuilder.java | 5 +++++ .../action/admin/cluster/health/ClusterHealthResponse.java | 5 +++++ .../admin/cluster/health/TransportClusterHealthAction.java | 5 +++++ .../admin/cluster/node/hotthreads/NodeHotThreads.java | 5 +++++ .../cluster/node/hotthreads/NodesHotThreadsAction.java | 5 +++++ .../cluster/node/hotthreads/NodesHotThreadsRequest.java | 5 +++++ .../node/hotthreads/NodesHotThreadsRequestBuilder.java | 5 +++++ .../cluster/node/hotthreads/NodesHotThreadsResponse.java | 5 +++++ .../node/hotthreads/TransportNodesHotThreadsAction.java | 5 +++++ .../action/admin/cluster/node/info/NodeInfo.java | 2 ++ .../action/admin/cluster/node/info/NodesInfoAction.java | 5 +++++ .../action/admin/cluster/node/info/NodesInfoRequest.java | 2 ++ .../admin/cluster/node/info/NodesInfoRequestBuilder.java | 5 +++++ .../action/admin/cluster/node/info/NodesInfoResponse.java | 5 +++++ .../action/admin/cluster/node/info/PluginsAndModules.java | 2 ++ .../admin/cluster/node/info/TransportNodesInfoAction.java | 5 +++++ .../admin/cluster/node/liveness/LivenessRequest.java | 2 ++ .../admin/cluster/node/liveness/LivenessResponse.java | 2 ++ .../cluster/node/liveness/TransportLivenessAction.java | 5 +++++ .../node/reload/NodesReloadSecureSettingsAction.java | 5 +++++ .../node/reload/NodesReloadSecureSettingsRequest.java | 2 ++ .../reload/NodesReloadSecureSettingsRequestBuilder.java | 2 ++ .../node/reload/NodesReloadSecureSettingsResponse.java | 2 ++ .../reload/TransportNodesReloadSecureSettingsAction.java | 5 +++++ .../action/admin/cluster/node/stats/NodeStats.java | 2 ++ .../action/admin/cluster/node/stats/NodesStatsAction.java | 5 +++++ .../action/admin/cluster/node/stats/NodesStatsRequest.java | 2 ++ .../admin/cluster/node/stats/NodesStatsRequestBuilder.java | 5 +++++ .../admin/cluster/node/stats/NodesStatsResponse.java | 5 +++++ .../cluster/node/stats/TransportNodesStatsAction.java | 5 +++++ .../admin/cluster/node/tasks/cancel/CancelTasksAction.java | 2 ++ .../cluster/node/tasks/cancel/CancelTasksRequest.java | 2 ++ .../node/tasks/cancel/CancelTasksRequestBuilder.java | 2 ++ .../cluster/node/tasks/cancel/CancelTasksResponse.java | 2 ++ .../node/tasks/cancel/TransportCancelTasksAction.java | 2 ++ .../action/admin/cluster/node/tasks/get/GetTaskAction.java | 2 ++ .../admin/cluster/node/tasks/get/GetTaskRequest.java | 2 ++ .../cluster/node/tasks/get/GetTaskRequestBuilder.java | 2 ++ .../admin/cluster/node/tasks/get/GetTaskResponse.java | 2 ++ .../cluster/node/tasks/get/TransportGetTaskAction.java | 2 ++ .../admin/cluster/node/tasks/list/ListTasksAction.java | 2 ++ .../admin/cluster/node/tasks/list/ListTasksRequest.java | 2 ++ .../cluster/node/tasks/list/ListTasksRequestBuilder.java | 2 ++ .../admin/cluster/node/tasks/list/ListTasksResponse.java | 2 ++ .../action/admin/cluster/node/tasks/list/TaskGroup.java | 2 ++ .../cluster/node/tasks/list/TransportListTasksAction.java | 5 +++++ .../action/admin/cluster/node/usage/NodeUsage.java | 5 +++++ .../action/admin/cluster/node/usage/NodesUsageAction.java | 5 +++++ .../action/admin/cluster/node/usage/NodesUsageRequest.java | 5 +++++ .../admin/cluster/node/usage/NodesUsageRequestBuilder.java | 5 +++++ .../admin/cluster/node/usage/NodesUsageResponse.java | 2 ++ .../cluster/node/usage/TransportNodesUsageAction.java | 5 +++++ .../action/admin/cluster/remote/RemoteInfoAction.java | 5 +++++ .../action/admin/cluster/remote/RemoteInfoRequest.java | 5 +++++ .../admin/cluster/remote/RemoteInfoRequestBuilder.java | 5 +++++ .../action/admin/cluster/remote/RemoteInfoResponse.java | 5 +++++ .../admin/cluster/remote/TransportRemoteInfoAction.java | 5 +++++ .../repositories/cleanup/CleanupRepositoryAction.java | 5 +++++ .../repositories/cleanup/CleanupRepositoryRequest.java | 5 +++++ .../cleanup/CleanupRepositoryRequestBuilder.java | 5 +++++ .../repositories/cleanup/CleanupRepositoryResponse.java | 5 +++++ .../cleanup/TransportCleanupRepositoryAction.java | 2 ++ .../repositories/delete/DeleteRepositoryAction.java | 2 ++ .../repositories/delete/DeleteRepositoryRequest.java | 2 ++ .../delete/DeleteRepositoryRequestBuilder.java | 2 ++ .../delete/TransportDeleteRepositoryAction.java | 2 ++ .../cluster/repositories/get/GetRepositoriesAction.java | 2 ++ .../cluster/repositories/get/GetRepositoriesRequest.java | 2 ++ .../repositories/get/GetRepositoriesRequestBuilder.java | 2 ++ .../cluster/repositories/get/GetRepositoriesResponse.java | 2 ++ .../repositories/get/TransportGetRepositoriesAction.java | 2 ++ .../cluster/repositories/put/PutRepositoryAction.java | 2 ++ .../cluster/repositories/put/PutRepositoryRequest.java | 2 ++ .../repositories/put/PutRepositoryRequestBuilder.java | 2 ++ .../repositories/put/TransportPutRepositoryAction.java | 2 ++ .../verify/TransportVerifyRepositoryAction.java | 2 ++ .../repositories/verify/VerifyRepositoryAction.java | 2 ++ .../repositories/verify/VerifyRepositoryRequest.java | 2 ++ .../verify/VerifyRepositoryRequestBuilder.java | 2 ++ .../repositories/verify/VerifyRepositoryResponse.java | 2 ++ .../action/admin/cluster/reroute/ClusterRerouteAction.java | 5 +++++ .../admin/cluster/reroute/ClusterRerouteRequest.java | 2 ++ .../cluster/reroute/ClusterRerouteRequestBuilder.java | 2 ++ .../admin/cluster/reroute/ClusterRerouteResponse.java | 2 ++ .../cluster/reroute/TransportClusterRerouteAction.java | 5 +++++ .../admin/cluster/settings/ClusterGetSettingsRequest.java | 2 ++ .../admin/cluster/settings/ClusterGetSettingsResponse.java | 2 ++ .../cluster/settings/ClusterUpdateSettingsAction.java | 5 +++++ .../cluster/settings/ClusterUpdateSettingsRequest.java | 2 ++ .../settings/ClusterUpdateSettingsRequestBuilder.java | 2 ++ .../cluster/settings/ClusterUpdateSettingsResponse.java | 2 ++ .../action/admin/cluster/settings/SettingsUpdater.java | 2 ++ .../settings/TransportClusterUpdateSettingsAction.java | 5 +++++ .../admin/cluster/shards/ClusterSearchShardsAction.java | 5 +++++ .../admin/cluster/shards/ClusterSearchShardsGroup.java | 5 +++++ .../admin/cluster/shards/ClusterSearchShardsRequest.java | 5 +++++ .../cluster/shards/ClusterSearchShardsRequestBuilder.java | 5 +++++ .../admin/cluster/shards/ClusterSearchShardsResponse.java | 5 +++++ .../cluster/shards/TransportClusterSearchShardsAction.java | 5 +++++ .../admin/cluster/snapshots/clone/CloneSnapshotAction.java | 5 +++++ .../cluster/snapshots/clone/CloneSnapshotRequest.java | 5 +++++ .../snapshots/clone/CloneSnapshotRequestBuilder.java | 5 +++++ .../snapshots/clone/TransportCloneSnapshotAction.java | 2 ++ .../cluster/snapshots/create/CreateSnapshotAction.java | 2 ++ .../cluster/snapshots/create/CreateSnapshotRequest.java | 2 ++ .../snapshots/create/CreateSnapshotRequestBuilder.java | 2 ++ .../cluster/snapshots/create/CreateSnapshotResponse.java | 2 ++ .../snapshots/create/TransportCreateSnapshotAction.java | 2 ++ .../cluster/snapshots/delete/DeleteSnapshotAction.java | 2 ++ .../cluster/snapshots/delete/DeleteSnapshotRequest.java | 2 ++ .../snapshots/delete/DeleteSnapshotRequestBuilder.java | 2 ++ .../snapshots/delete/TransportDeleteSnapshotAction.java | 2 ++ .../admin/cluster/snapshots/get/GetSnapshotsAction.java | 2 ++ .../admin/cluster/snapshots/get/GetSnapshotsRequest.java | 2 ++ .../cluster/snapshots/get/GetSnapshotsRequestBuilder.java | 2 ++ .../admin/cluster/snapshots/get/GetSnapshotsResponse.java | 2 ++ .../cluster/snapshots/get/TransportGetSnapshotsAction.java | 2 ++ .../snapshots/restore/RestoreClusterStateListener.java | 5 +++++ .../cluster/snapshots/restore/RestoreSnapshotAction.java | 2 ++ .../cluster/snapshots/restore/RestoreSnapshotRequest.java | 2 ++ .../snapshots/restore/RestoreSnapshotRequestBuilder.java | 2 ++ .../cluster/snapshots/restore/RestoreSnapshotResponse.java | 2 ++ .../snapshots/restore/TransportRestoreSnapshotAction.java | 2 ++ .../cluster/snapshots/status/SnapshotIndexShardStage.java | 5 +++++ .../cluster/snapshots/status/SnapshotIndexShardStatus.java | 5 +++++ .../cluster/snapshots/status/SnapshotIndexStatus.java | 2 ++ .../cluster/snapshots/status/SnapshotShardsStats.java | 2 ++ .../admin/cluster/snapshots/status/SnapshotStats.java | 5 +++++ .../admin/cluster/snapshots/status/SnapshotStatus.java | 2 ++ .../cluster/snapshots/status/SnapshotsStatusAction.java | 2 ++ .../cluster/snapshots/status/SnapshotsStatusRequest.java | 2 ++ .../snapshots/status/SnapshotsStatusRequestBuilder.java | 2 ++ .../cluster/snapshots/status/SnapshotsStatusResponse.java | 2 ++ .../snapshots/status/TransportNodesSnapshotsStatus.java | 2 ++ .../snapshots/status/TransportSnapshotsStatusAction.java | 5 +++++ .../action/admin/cluster/state/ClusterStateAction.java | 5 +++++ .../action/admin/cluster/state/ClusterStateRequest.java | 5 +++++ .../admin/cluster/state/ClusterStateRequestBuilder.java | 5 +++++ .../action/admin/cluster/state/ClusterStateResponse.java | 2 ++ .../admin/cluster/state/TransportClusterStateAction.java | 5 +++++ .../action/admin/cluster/stats/AnalysisStats.java | 2 ++ .../action/admin/cluster/stats/ClusterStatsAction.java | 5 +++++ .../action/admin/cluster/stats/ClusterStatsIndices.java | 5 +++++ .../admin/cluster/stats/ClusterStatsNodeResponse.java | 5 +++++ .../action/admin/cluster/stats/ClusterStatsNodes.java | 5 +++++ .../action/admin/cluster/stats/ClusterStatsRequest.java | 2 ++ .../admin/cluster/stats/ClusterStatsRequestBuilder.java | 5 +++++ .../action/admin/cluster/stats/ClusterStatsResponse.java | 5 +++++ .../action/admin/cluster/stats/IndexFeatureStats.java | 2 ++ .../action/admin/cluster/stats/MappingStats.java | 2 ++ .../action/admin/cluster/stats/MappingVisitor.java | 5 +++++ .../admin/cluster/stats/TransportClusterStatsAction.java | 5 +++++ .../cluster/storedscripts/DeleteStoredScriptAction.java | 5 +++++ .../cluster/storedscripts/DeleteStoredScriptRequest.java | 5 +++++ .../storedscripts/DeleteStoredScriptRequestBuilder.java | 5 +++++ .../cluster/storedscripts/GetScriptContextAction.java | 5 +++++ .../cluster/storedscripts/GetScriptContextRequest.java | 5 +++++ .../cluster/storedscripts/GetScriptContextResponse.java | 5 +++++ .../cluster/storedscripts/GetScriptLanguageAction.java | 5 +++++ .../cluster/storedscripts/GetScriptLanguageRequest.java | 5 +++++ .../cluster/storedscripts/GetScriptLanguageResponse.java | 5 +++++ .../admin/cluster/storedscripts/GetStoredScriptAction.java | 5 +++++ .../cluster/storedscripts/GetStoredScriptRequest.java | 5 +++++ .../storedscripts/GetStoredScriptRequestBuilder.java | 5 +++++ .../cluster/storedscripts/GetStoredScriptResponse.java | 5 +++++ .../admin/cluster/storedscripts/PutStoredScriptAction.java | 5 +++++ .../cluster/storedscripts/PutStoredScriptRequest.java | 5 +++++ .../storedscripts/PutStoredScriptRequestBuilder.java | 5 +++++ .../storedscripts/TransportDeleteStoredScriptAction.java | 5 +++++ .../storedscripts/TransportGetScriptContextAction.java | 5 +++++ .../storedscripts/TransportGetScriptLanguageAction.java | 5 +++++ .../storedscripts/TransportGetStoredScriptAction.java | 5 +++++ .../storedscripts/TransportPutStoredScriptAction.java | 5 +++++ .../admin/cluster/tasks/PendingClusterTasksAction.java | 5 +++++ .../admin/cluster/tasks/PendingClusterTasksRequest.java | 5 +++++ .../cluster/tasks/PendingClusterTasksRequestBuilder.java | 5 +++++ .../admin/cluster/tasks/PendingClusterTasksResponse.java | 5 +++++ .../cluster/tasks/TransportPendingClusterTasksAction.java | 5 +++++ .../org/opensearch/action/admin/indices/alias/Alias.java | 2 ++ .../action/admin/indices/alias/IndicesAliasesAction.java | 5 +++++ .../alias/IndicesAliasesClusterStateUpdateRequest.java | 2 ++ .../action/admin/indices/alias/IndicesAliasesRequest.java | 2 ++ .../admin/indices/alias/IndicesAliasesRequestBuilder.java | 2 ++ .../admin/indices/alias/TransportIndicesAliasesAction.java | 2 ++ .../admin/indices/alias/get/BaseAliasesRequestBuilder.java | 5 +++++ .../action/admin/indices/alias/get/GetAliasesAction.java | 5 +++++ .../action/admin/indices/alias/get/GetAliasesRequest.java | 5 +++++ .../admin/indices/alias/get/GetAliasesRequestBuilder.java | 5 +++++ .../action/admin/indices/alias/get/GetAliasesResponse.java | 5 +++++ .../admin/indices/alias/get/TransportGetAliasesAction.java | 5 +++++ .../action/admin/indices/analyze/AnalyzeAction.java | 5 +++++ .../admin/indices/analyze/AnalyzeRequestBuilder.java | 5 +++++ .../admin/indices/analyze/TransportAnalyzeAction.java | 2 ++ .../admin/indices/cache/clear/ClearIndicesCacheAction.java | 5 +++++ .../indices/cache/clear/ClearIndicesCacheRequest.java | 5 +++++ .../cache/clear/ClearIndicesCacheRequestBuilder.java | 5 +++++ .../indices/cache/clear/ClearIndicesCacheResponse.java | 2 ++ .../cache/clear/TransportClearIndicesCacheAction.java | 2 ++ .../action/admin/indices/close/CloseIndexAction.java | 5 +++++ .../indices/close/CloseIndexClusterStateUpdateRequest.java | 2 ++ .../action/admin/indices/close/CloseIndexRequest.java | 2 ++ .../admin/indices/close/CloseIndexRequestBuilder.java | 2 ++ .../action/admin/indices/close/CloseIndexResponse.java | 5 +++++ .../admin/indices/close/TransportCloseIndexAction.java | 2 ++ .../close/TransportVerifyShardBeforeCloseAction.java | 5 +++++ .../action/admin/indices/create/AutoCreateAction.java | 2 ++ .../action/admin/indices/create/CreateIndexAction.java | 5 +++++ .../create/CreateIndexClusterStateUpdateRequest.java | 2 ++ .../action/admin/indices/create/CreateIndexRequest.java | 2 ++ .../admin/indices/create/CreateIndexRequestBuilder.java | 2 ++ .../action/admin/indices/create/CreateIndexResponse.java | 2 ++ .../admin/indices/create/TransportCreateIndexAction.java | 2 ++ .../action/admin/indices/dangling/DanglingIndexInfo.java | 2 ++ .../indices/dangling/delete/DeleteDanglingIndexAction.java | 2 ++ .../dangling/delete/DeleteDanglingIndexRequest.java | 2 ++ .../delete/TransportDeleteDanglingIndexAction.java | 2 ++ .../indices/dangling/find/FindDanglingIndexAction.java | 2 ++ .../indices/dangling/find/FindDanglingIndexRequest.java | 5 +++++ .../indices/dangling/find/FindDanglingIndexResponse.java | 2 ++ .../dangling/find/NodeFindDanglingIndexRequest.java | 2 ++ .../dangling/find/NodeFindDanglingIndexResponse.java | 2 ++ .../dangling/find/TransportFindDanglingIndexAction.java | 2 ++ .../dangling/import_index/ImportDanglingIndexAction.java | 2 ++ .../dangling/import_index/ImportDanglingIndexRequest.java | 2 ++ .../import_index/TransportImportDanglingIndexAction.java | 2 ++ .../indices/dangling/list/ListDanglingIndicesAction.java | 2 ++ .../indices/dangling/list/ListDanglingIndicesRequest.java | 5 +++++ .../indices/dangling/list/ListDanglingIndicesResponse.java | 2 ++ .../dangling/list/NodeListDanglingIndicesRequest.java | 2 ++ .../dangling/list/NodeListDanglingIndicesResponse.java | 2 ++ .../dangling/list/TransportListDanglingIndicesAction.java | 2 ++ .../admin/indices/datastream/CreateDataStreamAction.java | 5 +++++ .../admin/indices/datastream/DataStreamsStatsAction.java | 5 +++++ .../admin/indices/datastream/DeleteDataStreamAction.java | 5 +++++ .../admin/indices/datastream/GetDataStreamAction.java | 5 +++++ .../action/admin/indices/delete/DeleteIndexAction.java | 5 +++++ .../delete/DeleteIndexClusterStateUpdateRequest.java | 2 ++ .../action/admin/indices/delete/DeleteIndexRequest.java | 2 ++ .../admin/indices/delete/DeleteIndexRequestBuilder.java | 5 +++++ .../admin/indices/delete/TransportDeleteIndexAction.java | 2 ++ .../admin/indices/exists/indices/IndicesExistsAction.java | 5 +++++ .../admin/indices/exists/indices/IndicesExistsRequest.java | 5 +++++ .../exists/indices/IndicesExistsRequestBuilder.java | 5 +++++ .../indices/exists/indices/IndicesExistsResponse.java | 5 +++++ .../exists/indices/TransportIndicesExistsAction.java | 2 ++ .../opensearch/action/admin/indices/flush/FlushAction.java | 5 +++++ .../action/admin/indices/flush/FlushRequest.java | 2 ++ .../action/admin/indices/flush/FlushRequestBuilder.java | 5 +++++ .../action/admin/indices/flush/FlushResponse.java | 2 ++ .../action/admin/indices/flush/ShardFlushRequest.java | 5 +++++ .../action/admin/indices/flush/TransportFlushAction.java | 2 ++ .../admin/indices/flush/TransportShardFlushAction.java | 5 +++++ .../action/admin/indices/forcemerge/ForceMergeAction.java | 5 +++++ .../action/admin/indices/forcemerge/ForceMergeRequest.java | 2 ++ .../admin/indices/forcemerge/ForceMergeRequestBuilder.java | 2 ++ .../admin/indices/forcemerge/ForceMergeResponse.java | 2 ++ .../indices/forcemerge/TransportForceMergeAction.java | 2 ++ .../action/admin/indices/get/GetIndexAction.java | 5 +++++ .../action/admin/indices/get/GetIndexRequest.java | 2 ++ .../action/admin/indices/get/GetIndexRequestBuilder.java | 5 +++++ .../action/admin/indices/get/GetIndexResponse.java | 2 ++ .../action/admin/indices/get/TransportGetIndexAction.java | 2 ++ .../admin/indices/mapping/get/GetFieldMappingsAction.java | 5 +++++ .../indices/mapping/get/GetFieldMappingsIndexRequest.java | 5 +++++ .../admin/indices/mapping/get/GetFieldMappingsRequest.java | 2 ++ .../mapping/get/GetFieldMappingsRequestBuilder.java | 6 +++++- .../indices/mapping/get/GetFieldMappingsResponse.java | 2 ++ .../admin/indices/mapping/get/GetMappingsAction.java | 5 +++++ .../admin/indices/mapping/get/GetMappingsRequest.java | 5 +++++ .../indices/mapping/get/GetMappingsRequestBuilder.java | 5 +++++ .../admin/indices/mapping/get/GetMappingsResponse.java | 5 +++++ .../mapping/get/TransportGetFieldMappingsAction.java | 5 +++++ .../mapping/get/TransportGetFieldMappingsIndexAction.java | 2 ++ .../indices/mapping/get/TransportGetMappingsAction.java | 5 +++++ .../admin/indices/mapping/put/AutoPutMappingAction.java | 5 +++++ .../action/admin/indices/mapping/put/PutMappingAction.java | 5 +++++ .../mapping/put/PutMappingClusterStateUpdateRequest.java | 2 ++ .../admin/indices/mapping/put/PutMappingRequest.java | 2 ++ .../indices/mapping/put/PutMappingRequestBuilder.java | 2 ++ .../indices/mapping/put/TransportAutoPutMappingAction.java | 5 +++++ .../indices/mapping/put/TransportPutMappingAction.java | 2 ++ .../action/admin/indices/open/OpenIndexAction.java | 5 +++++ .../indices/open/OpenIndexClusterStateUpdateRequest.java | 2 ++ .../action/admin/indices/open/OpenIndexRequest.java | 2 ++ .../action/admin/indices/open/OpenIndexRequestBuilder.java | 2 ++ .../action/admin/indices/open/OpenIndexResponse.java | 2 ++ .../admin/indices/open/TransportOpenIndexAction.java | 2 ++ .../action/admin/indices/readonly/AddIndexBlockAction.java | 5 +++++ .../readonly/AddIndexBlockClusterStateUpdateRequest.java | 2 ++ .../admin/indices/readonly/AddIndexBlockRequest.java | 2 ++ .../indices/readonly/AddIndexBlockRequestBuilder.java | 2 ++ .../admin/indices/readonly/AddIndexBlockResponse.java | 5 +++++ .../indices/readonly/TransportAddIndexBlockAction.java | 2 ++ .../readonly/TransportVerifyShardIndexBlockAction.java | 2 ++ .../action/admin/indices/recovery/RecoveryAction.java | 2 ++ .../action/admin/indices/recovery/RecoveryRequest.java | 2 ++ .../admin/indices/recovery/RecoveryRequestBuilder.java | 2 ++ .../action/admin/indices/recovery/RecoveryResponse.java | 2 ++ .../admin/indices/recovery/TransportRecoveryAction.java | 2 ++ .../action/admin/indices/refresh/RefreshAction.java | 5 +++++ .../action/admin/indices/refresh/RefreshRequest.java | 2 ++ .../admin/indices/refresh/RefreshRequestBuilder.java | 2 ++ .../action/admin/indices/refresh/RefreshResponse.java | 2 ++ .../admin/indices/refresh/TransportRefreshAction.java | 2 ++ .../admin/indices/refresh/TransportShardRefreshAction.java | 5 +++++ .../action/admin/indices/resolve/ResolveIndexAction.java | 5 +++++ .../action/admin/indices/rollover/Condition.java | 2 ++ .../action/admin/indices/rollover/MaxAgeCondition.java | 2 ++ .../action/admin/indices/rollover/MaxDocsCondition.java | 2 ++ .../action/admin/indices/rollover/MaxSizeCondition.java | 2 ++ .../admin/indices/rollover/MetadataRolloverService.java | 2 ++ .../action/admin/indices/rollover/RolloverAction.java | 5 +++++ .../action/admin/indices/rollover/RolloverInfo.java | 2 ++ .../action/admin/indices/rollover/RolloverRequest.java | 2 ++ .../admin/indices/rollover/RolloverRequestBuilder.java | 5 +++++ .../action/admin/indices/rollover/RolloverResponse.java | 2 ++ .../admin/indices/rollover/TransportRolloverAction.java | 2 ++ .../action/admin/indices/segments/IndexSegments.java | 5 +++++ .../action/admin/indices/segments/IndexShardSegments.java | 5 +++++ .../admin/indices/segments/IndicesSegmentResponse.java | 5 +++++ .../admin/indices/segments/IndicesSegmentsAction.java | 5 +++++ .../admin/indices/segments/IndicesSegmentsRequest.java | 5 +++++ .../indices/segments/IndicesSegmentsRequestBuilder.java | 5 +++++ .../action/admin/indices/segments/ShardSegments.java | 5 +++++ .../indices/segments/TransportIndicesSegmentsAction.java | 5 +++++ .../admin/indices/settings/get/GetSettingsAction.java | 5 +++++ .../admin/indices/settings/get/GetSettingsRequest.java | 5 +++++ .../indices/settings/get/GetSettingsRequestBuilder.java | 5 +++++ .../admin/indices/settings/get/GetSettingsResponse.java | 5 +++++ .../indices/settings/get/TransportGetSettingsAction.java | 5 +++++ .../settings/put/TransportUpdateSettingsAction.java | 5 +++++ .../admin/indices/settings/put/UpdateSettingsAction.java | 5 +++++ .../put/UpdateSettingsClusterStateUpdateRequest.java | 2 ++ .../admin/indices/settings/put/UpdateSettingsRequest.java | 2 ++ .../indices/settings/put/UpdateSettingsRequestBuilder.java | 2 ++ .../indices/shards/IndicesShardStoreRequestBuilder.java | 2 ++ .../admin/indices/shards/IndicesShardStoresAction.java | 2 ++ .../admin/indices/shards/IndicesShardStoresRequest.java | 2 ++ .../admin/indices/shards/IndicesShardStoresResponse.java | 2 ++ .../indices/shards/TransportIndicesShardStoresAction.java | 2 ++ .../action/admin/indices/shrink/ResizeAction.java | 5 +++++ .../action/admin/indices/shrink/ResizeRequest.java | 2 ++ .../action/admin/indices/shrink/ResizeRequestBuilder.java | 5 +++++ .../action/admin/indices/shrink/ResizeResponse.java | 2 ++ .../opensearch/action/admin/indices/shrink/ResizeType.java | 2 ++ .../action/admin/indices/shrink/TransportResizeAction.java | 2 ++ .../opensearch/action/admin/indices/stats/CommonStats.java | 5 +++++ .../action/admin/indices/stats/CommonStatsFlags.java | 5 +++++ .../action/admin/indices/stats/IndexShardStats.java | 5 +++++ .../opensearch/action/admin/indices/stats/IndexStats.java | 5 +++++ .../action/admin/indices/stats/IndicesStatsAction.java | 5 +++++ .../action/admin/indices/stats/IndicesStatsRequest.java | 2 ++ .../admin/indices/stats/IndicesStatsRequestBuilder.java | 2 ++ .../action/admin/indices/stats/IndicesStatsResponse.java | 5 +++++ .../opensearch/action/admin/indices/stats/ShardStats.java | 5 +++++ .../admin/indices/stats/TransportIndicesStatsAction.java | 5 +++++ .../template/delete/DeleteComponentTemplateAction.java | 5 +++++ .../delete/DeleteComposableIndexTemplateAction.java | 5 +++++ .../indices/template/delete/DeleteIndexTemplateAction.java | 5 +++++ .../template/delete/DeleteIndexTemplateRequest.java | 2 ++ .../template/delete/DeleteIndexTemplateRequestBuilder.java | 5 +++++ .../delete/TransportDeleteComponentTemplateAction.java | 5 +++++ .../TransportDeleteComposableIndexTemplateAction.java | 5 +++++ .../delete/TransportDeleteIndexTemplateAction.java | 2 ++ .../indices/template/get/GetComponentTemplateAction.java | 2 ++ .../template/get/GetComposableIndexTemplateAction.java | 5 +++++ .../indices/template/get/GetIndexTemplatesAction.java | 5 +++++ .../indices/template/get/GetIndexTemplatesRequest.java | 2 ++ .../template/get/GetIndexTemplatesRequestBuilder.java | 5 +++++ .../indices/template/get/GetIndexTemplatesResponse.java | 5 +++++ .../template/get/TransportGetComponentTemplateAction.java | 5 +++++ .../get/TransportGetComposableIndexTemplateAction.java | 5 +++++ .../template/get/TransportGetIndexTemplatesAction.java | 5 +++++ .../indices/template/post/SimulateIndexTemplateAction.java | 6 ++++++ .../template/post/SimulateIndexTemplateRequest.java | 6 ++++++ .../template/post/SimulateIndexTemplateResponse.java | 2 ++ .../indices/template/post/SimulateTemplateAction.java | 2 ++ .../post/TransportSimulateIndexTemplateAction.java | 6 ++++++ .../template/post/TransportSimulateTemplateAction.java | 2 ++ .../indices/template/put/PutComponentTemplateAction.java | 2 ++ .../template/put/PutComposableIndexTemplateAction.java | 5 +++++ .../admin/indices/template/put/PutIndexTemplateAction.java | 5 +++++ .../indices/template/put/PutIndexTemplateRequest.java | 2 ++ .../template/put/PutIndexTemplateRequestBuilder.java | 5 +++++ .../template/put/TransportPutComponentTemplateAction.java | 5 +++++ .../put/TransportPutComposableIndexTemplateAction.java | 5 +++++ .../template/put/TransportPutIndexTemplateAction.java | 2 ++ .../admin/indices/upgrade/get/IndexShardUpgradeStatus.java | 5 +++++ .../admin/indices/upgrade/get/IndexUpgradeStatus.java | 5 +++++ .../admin/indices/upgrade/get/ShardUpgradeStatus.java | 5 +++++ .../indices/upgrade/get/TransportUpgradeStatusAction.java | 5 +++++ .../admin/indices/upgrade/get/UpgradeStatusAction.java | 5 +++++ .../admin/indices/upgrade/get/UpgradeStatusRequest.java | 5 +++++ .../indices/upgrade/get/UpgradeStatusRequestBuilder.java | 5 +++++ .../admin/indices/upgrade/get/UpgradeStatusResponse.java | 5 +++++ .../admin/indices/upgrade/post/ShardUpgradeResult.java | 5 +++++ .../admin/indices/upgrade/post/TransportUpgradeAction.java | 2 ++ .../upgrade/post/TransportUpgradeSettingsAction.java | 5 +++++ .../action/admin/indices/upgrade/post/UpgradeAction.java | 2 ++ .../action/admin/indices/upgrade/post/UpgradeRequest.java | 2 ++ .../admin/indices/upgrade/post/UpgradeRequestBuilder.java | 2 ++ .../action/admin/indices/upgrade/post/UpgradeResponse.java | 2 +- .../admin/indices/upgrade/post/UpgradeSettingsAction.java | 5 +++++ .../post/UpgradeSettingsClusterStateUpdateRequest.java | 2 ++ .../admin/indices/upgrade/post/UpgradeSettingsRequest.java | 2 ++ .../admin/indices/validate/query/QueryExplanation.java | 5 +++++ .../indices/validate/query/ShardValidateQueryRequest.java | 2 ++ .../indices/validate/query/ShardValidateQueryResponse.java | 2 +- .../validate/query/TransportValidateQueryAction.java | 5 +++++ .../admin/indices/validate/query/ValidateQueryAction.java | 5 +++++ .../admin/indices/validate/query/ValidateQueryRequest.java | 2 ++ .../validate/query/ValidateQueryRequestBuilder.java | 5 +++++ .../indices/validate/query/ValidateQueryResponse.java | 2 +- .../src/main/java/org/opensearch/plugins/ActionPlugin.java | 5 +++++ .../main/java/org/opensearch/plugins/ExtensiblePlugin.java | 5 +++++ .../main/java/org/opensearch/plugins/PluginsService.java | 5 +++++ .../org/opensearch/transport/RequestHandlerRegistry.java | 7 ++++++- .../main/java/org/opensearch/transport/StatsTracker.java | 7 ++++++- 435 files changed, 1526 insertions(+), 8 deletions(-) diff --git a/gradle/missing-javadoc.gradle b/gradle/missing-javadoc.gradle index 2dea9a0e11d91..29fede8967a59 100644 --- a/gradle/missing-javadoc.gradle +++ b/gradle/missing-javadoc.gradle @@ -180,7 +180,7 @@ configure(project(":server")) { project.tasks.withType(MissingJavadocTask) { isExcluded = true // TODO: reenable after fixing missing javadocs - // javadocMissingLevel = "class" + javadocMissingLevel = "class" } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainAction.java index 4c95d1e336b3a..0e99513a8fc7e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainAction.java @@ -36,6 +36,8 @@ /** * ActionType for explaining shard allocation for a shard in the cluster + * + * @opensearch.internal */ public class ClusterAllocationExplainAction extends ActionType { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java index 1a055fa0d14c2..0102cc517dbcd 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java @@ -47,6 +47,8 @@ /** * A request to explain the allocation of a shard in the cluster + * + * @opensearch.internal */ public class ClusterAllocationExplainRequest extends MasterNodeRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java index 78a22cd59b284..240520241c42b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java @@ -37,6 +37,8 @@ /** * Builder for requests to explain the allocation of a shard in the cluster + * + * @opensearch.internal */ public class ClusterAllocationExplainRequestBuilder extends MasterNodeOperationRequestBuilder< ClusterAllocationExplainRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java index 2fc72ccb13e28..5ce26aba3b395 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java @@ -40,6 +40,8 @@ /** * Explanation response for a shard in the cluster + * + * @opensearch.internal */ public class ClusterAllocationExplainResponse extends ActionResponse { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java index e9505c7fc83c2..e0fb92560ebf4 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java @@ -57,6 +57,8 @@ * A {@code ClusterAllocationExplanation} is an explanation of why a shard is unassigned, * or if it is not unassigned, then which nodes it could possibly be relocated to. * It is an immutable class. + * + * @opensearch.internal */ public final class ClusterAllocationExplanation implements ToXContentObject, Writeable { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java index baa2ce0847501..233cc506a32c1 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java @@ -65,6 +65,8 @@ /** * The {@code TransportClusterAllocationExplainAction} is responsible for actually executing the explanation of a shard's allocation on the * cluster-manager node in the cluster. + * + * @opensearch.internal */ public class TransportClusterAllocationExplainAction extends TransportMasterNodeAction< ClusterAllocationExplainRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsAction.java index 963a5d567c0bc..4a4f28b360801 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsAction.java @@ -33,7 +33,12 @@ import org.opensearch.action.ActionType; -public class AddVotingConfigExclusionsAction extends ActionType { +/** + * Transport endpoint for adding exclusions to voting config + * + * @opensearch.internal + */ +public final class AddVotingConfigExclusionsAction extends ActionType { public static final AddVotingConfigExclusionsAction INSTANCE = new AddVotingConfigExclusionsAction(); public static final String NAME = "cluster:admin/voting_config/add_exclusions"; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java index e0e5bf622b99e..ba44fdfeb8ff6 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java @@ -56,6 +56,8 @@ /** * A request to add voting config exclusions for certain cluster-manager-eligible nodes, and wait for these nodes to be removed from the voting * configuration. + * + * @opensearch.internal */ public class AddVotingConfigExclusionsRequest extends MasterNodeRequest { public static final String DEPRECATION_MESSAGE = "nodeDescription is deprecated and will be removed, use nodeIds or nodeNames instead"; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsResponse.java index 0493de7c439de..22b2d54bfd69d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsResponse.java @@ -42,6 +42,8 @@ /** * A response to {@link AddVotingConfigExclusionsRequest} indicating that voting config exclusions have been added for the requested nodes * and these nodes have been removed from the voting configuration. + * + * @opensearch.internal */ public class AddVotingConfigExclusionsResponse extends ActionResponse implements ToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsAction.java index ce31abceb6738..e0fcad8f8ccd8 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsAction.java @@ -33,6 +33,11 @@ import org.opensearch.action.ActionType; +/** + * Transport endpoint for clearing exclusions to voting config + * + * @opensearch.internal + */ public class ClearVotingConfigExclusionsAction extends ActionType { public static final ClearVotingConfigExclusionsAction INSTANCE = new ClearVotingConfigExclusionsAction(); public static final String NAME = "cluster:admin/voting_config/clear_exclusions"; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsRequest.java index 9ccccc88f3365..cbe19abe069b2 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsRequest.java @@ -42,6 +42,8 @@ /** * A request to clear the voting config exclusions from the cluster state, optionally waiting for these nodes to be removed from the * cluster first. + * + * @opensearch.internal */ public class ClearVotingConfigExclusionsRequest extends MasterNodeRequest { private boolean waitForRemoval = true; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsResponse.java index ed07f33b6c6a6..a8744bc8f666f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsResponse.java @@ -42,6 +42,8 @@ /** * A response to {@link ClearVotingConfigExclusionsRequest} indicating that voting config exclusions have been cleared from the * cluster state. + * + * @opensearch.internal */ public class ClearVotingConfigExclusionsResponse extends ActionResponse implements ToXContentObject { public ClearVotingConfigExclusionsResponse() {} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java index 30ba799db044a..c9b27f4822fcd 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java @@ -66,6 +66,11 @@ import java.util.function.Predicate; import java.util.stream.Collectors; +/** + * Transport endpoint action for adding exclusions to voting config + * + * @opensearch.internal + */ public class TransportAddVotingConfigExclusionsAction extends TransportMasterNodeAction< AddVotingConfigExclusionsRequest, AddVotingConfigExclusionsResponse> { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java index 31a1d07608071..3791b3e8301ee 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java @@ -60,6 +60,11 @@ import java.io.IOException; import java.util.function.Predicate; +/** + * Transport endpoint action for clearing exclusions to voting config + * + * @opensearch.internal + */ public class TransportClearVotingConfigExclusionsAction extends TransportMasterNodeAction< ClearVotingConfigExclusionsRequest, ClearVotingConfigExclusionsResponse> { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthAction.java index a37c0b0f1cb88..5c712b801f927 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport endpoint action for obtaining cluster health + * + * @opensearch.internal + */ public class ClusterHealthAction extends ActionType { public static final ClusterHealthAction INSTANCE = new ClusterHealthAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java index 204a182edafab..d6d494c4261bd 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java @@ -48,6 +48,11 @@ import java.util.Objects; import java.util.concurrent.TimeUnit; +/** + * Transport request for requesting cluster health + * + * @opensearch.internal + */ public class ClusterHealthRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { private String[] indices; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java index 86fee83130bb3..d1e68e0a22510 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java @@ -40,6 +40,11 @@ import org.opensearch.common.Priority; import org.opensearch.common.unit.TimeValue; +/** + * Builder for requesting cluster health + * + * @opensearch.internal + */ public class ClusterHealthRequestBuilder extends MasterNodeReadOperationRequestBuilder< ClusterHealthRequest, ClusterHealthResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java index ce731fd1c8aca..e4ec75fb7045a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java @@ -60,6 +60,11 @@ import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg; import static org.opensearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; +/** + * Transport response for Cluster Health + * + * @opensearch.internal + */ public class ClusterHealthResponse extends ActionResponse implements StatusToXContentObject { private static final String CLUSTER_NAME = "cluster_name"; private static final String STATUS = "status"; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java index 98c264e54a1d0..09082536dfbbb 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -67,6 +67,11 @@ import java.util.function.Consumer; import java.util.function.Predicate; +/** + * Transport action for obtaining Cluster Health + * + * @opensearch.internal + */ public class TransportClusterHealthAction extends TransportMasterNodeReadAction { private static final Logger logger = LogManager.getLogger(TransportClusterHealthAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodeHotThreads.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodeHotThreads.java index 7d873bd22719d..7de148b0a1000 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodeHotThreads.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodeHotThreads.java @@ -39,6 +39,11 @@ import java.io.IOException; +/** + * Transport for OpenSearch Hot Threads + * + * @opensearch.internal + */ public class NodeHotThreads extends BaseNodeResponse { private String hotThreads; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java index 1a09bccf0b34e..64689b2e3bc6d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action for requesting OpenSearch Hot Threads + * + * @opensearch.internal + */ public class NodesHotThreadsAction extends ActionType { public static final NodesHotThreadsAction INSTANCE = new NodesHotThreadsAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java index 5f5c566e7b6d2..34d0d812ae609 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java @@ -40,6 +40,11 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; +/** + * Transport request for OpenSearch Hot Threads + * + * @opensearch.internal + */ public class NodesHotThreadsRequest extends BaseNodesRequest { int threads = 3; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java index 54a99de61c854..3639439dd3fb8 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java @@ -36,6 +36,11 @@ import org.opensearch.client.OpenSearchClient; import org.opensearch.common.unit.TimeValue; +/** + * Builder class for requesting OpenSearch Hot Threads + * + * @opensearch.internal + */ public class NodesHotThreadsRequestBuilder extends NodesOperationRequestBuilder< NodesHotThreadsRequest, NodesHotThreadsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java index 62c73101eddbb..a5789cee8b740 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java @@ -41,6 +41,11 @@ import java.io.IOException; import java.util.List; +/** + * Transport response for OpenSearch Hot Threads + * + * @opensearch.internal + */ public class NodesHotThreadsResponse extends BaseNodesResponse { public NodesHotThreadsResponse(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java index 4c4519c890d90..5706b7772805f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java @@ -48,6 +48,11 @@ import java.io.IOException; import java.util.List; +/** + * Transport action for OpenSearch Hot Threads + * + * @opensearch.internal + */ public class TransportNodesHotThreadsAction extends TransportNodesAction< NodesHotThreadsRequest, NodesHotThreadsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodeInfo.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodeInfo.java index ddef959e66473..192815af1908f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodeInfo.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodeInfo.java @@ -58,6 +58,8 @@ /** * Node information (static, does not change over time). + * + * @opensearch.internal */ public class NodeInfo extends BaseNodeResponse { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoAction.java index 6850cbee55412..ea0c9d9d7f3c9 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action for OpenSearch Node Information + * + * @opensearch.internal + */ public class NodesInfoAction extends ActionType { public static final NodesInfoAction INSTANCE = new NodesInfoAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java index 4024c9b0c12a9..d51be9bc27ac9 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java @@ -47,6 +47,8 @@ /** * A request to get node (cluster) level information. + * + * @opensearch.internal */ public class NodesInfoRequest extends BaseNodesRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java index 179239cac5257..76ef75b77a1cf 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java @@ -35,6 +35,11 @@ import org.opensearch.action.support.nodes.NodesOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +/** + * Transport action for OpenSearch Node Information + * + * @opensearch.internal + */ public class NodesInfoRequestBuilder extends NodesOperationRequestBuilder { public NodesInfoRequestBuilder(OpenSearchClient client, NodesInfoAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoResponse.java index a483a2fa1f845..08ca022b026ee 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoResponse.java @@ -56,6 +56,11 @@ import java.util.List; import java.util.Map; +/** + * Transport response for OpenSearch Node Information + * + * @opensearch.internal + */ public class NodesInfoResponse extends BaseNodesResponse implements ToXContentFragment { public NodesInfoResponse(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/PluginsAndModules.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/PluginsAndModules.java index a51a0ab7b6ea6..b2d94b7a97c22 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/PluginsAndModules.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/PluginsAndModules.java @@ -46,6 +46,8 @@ /** * Information about plugins and modules + * + * @opensearch.internal */ public class PluginsAndModules implements ReportingService.Info { private final List plugins; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java index 52f53f4c1368e..71ed085beda5f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java @@ -48,6 +48,11 @@ import java.util.List; import java.util.Set; +/** + * Transport action for OpenSearch Node Information + * + * @opensearch.internal + */ public class TransportNodesInfoAction extends TransportNodesAction< NodesInfoRequest, NodesInfoResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/liveness/LivenessRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/liveness/LivenessRequest.java index c75e020c269eb..fc67ccb0f5574 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/liveness/LivenessRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/liveness/LivenessRequest.java @@ -40,6 +40,8 @@ /** * Transport level private response for the transport handler registered under * {@value TransportLivenessAction#NAME} + * + * @opensearch.internal */ public final class LivenessRequest extends ActionRequest { public LivenessRequest() {} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/liveness/LivenessResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/liveness/LivenessResponse.java index 1737d868f0afb..8fdbf66f10e64 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/liveness/LivenessResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/liveness/LivenessResponse.java @@ -43,6 +43,8 @@ /** * Transport level private response for the transport handler registered under * {@value TransportLivenessAction#NAME} + * + * @opensearch.internal */ public final class LivenessResponse extends ActionResponse { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/liveness/TransportLivenessAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/liveness/TransportLivenessAction.java index 83f945ab85557..8a075cbee6e90 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/liveness/TransportLivenessAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/liveness/TransportLivenessAction.java @@ -40,6 +40,11 @@ import org.opensearch.transport.TransportRequestHandler; import org.opensearch.transport.TransportService; +/** + * Transport action for OpenSearch Node Liveness + * + * @opensearch.internal + */ public final class TransportLivenessAction implements TransportRequestHandler { private final ClusterService clusterService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java index 252c4491bc56c..0f7c847582349 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action for reloading OpenSearch Secure Settings + * + * @opensearch.internal + */ public class NodesReloadSecureSettingsAction extends ActionType { public static final NodesReloadSecureSettingsAction INSTANCE = new NodesReloadSecureSettingsAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java index 5a6c4c2ae7c83..e31f5f304c836 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java @@ -49,6 +49,8 @@ /** * Request for a reload secure settings action + * + * @opensearch.internal */ public class NodesReloadSecureSettingsRequest extends BaseNodesRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java index 8bb67f5042893..36a4a22460126 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java @@ -38,6 +38,8 @@ /** * Builder for the reload secure settings nodes request + * + * @opensearch.internal */ public class NodesReloadSecureSettingsRequestBuilder extends NodesOperationRequestBuilder< NodesReloadSecureSettingsRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsResponse.java index 9dd8596b86260..052631df85c89 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsResponse.java @@ -49,6 +49,8 @@ /** * The response for the reload secure settings action + * + * @opensearch.internal */ public class NodesReloadSecureSettingsResponse extends BaseNodesResponse implements diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java index d4e1a936263c9..d9173b85f569a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java @@ -60,6 +60,11 @@ import java.util.ArrayList; import java.util.List; +/** + * Transport action for reloading OpenSearch Secure Settings + * + * @opensearch.internal + */ public class TransportNodesReloadSecureSettingsAction extends TransportNodesAction< NodesReloadSecureSettingsRequest, NodesReloadSecureSettingsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index dd2c649e07c28..7f0ac615cc449 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -64,6 +64,8 @@ /** * Node statistics (dynamic, changes depending on when created). + * + * @opensearch.internal */ public class NodeStats extends BaseNodeResponse implements ToXContentFragment { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsAction.java index 77bb141966651..fc039778f05af 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action for obtaining OpenSearch Node Stats + * + * @opensearch.internal + */ public class NodesStatsAction extends ActionType { public static final NodesStatsAction INSTANCE = new NodesStatsAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java index 2ebf2ca424c27..babec0b7c119f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -47,6 +47,8 @@ /** * A request to get node (cluster) level stats. + * + * @opensearch.internal */ public class NodesStatsRequest extends BaseNodesRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java index 40d6946823727..e382278f5ddb8 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java @@ -36,6 +36,11 @@ import org.opensearch.action.support.nodes.NodesOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +/** + * Transport builder for obtaining OpenSearch Node Stats + * + * @opensearch.internal + */ public class NodesStatsRequestBuilder extends NodesOperationRequestBuilder< NodesStatsRequest, NodesStatsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsResponse.java index 8c84775e356c4..8ba39b6e0b8f4 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsResponse.java @@ -45,6 +45,11 @@ import java.io.IOException; import java.util.List; +/** + * Transport response for obtaining OpenSearch Node Stats + * + * @opensearch.internal + */ public class NodesStatsResponse extends BaseNodesResponse implements ToXContentFragment { public NodesStatsResponse(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index b7ea714de4eb4..18eebd69736ba 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -48,6 +48,11 @@ import java.util.List; import java.util.Set; +/** + * Transport action for obtaining OpenSearch Node Stats + * + * @opensearch.internal + */ public class TransportNodesStatsAction extends TransportNodesAction< NodesStatsRequest, NodesStatsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksAction.java index 6d6098adaa92c..d4f6dc39c93f7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksAction.java @@ -36,6 +36,8 @@ /** * ActionType for cancelling running tasks + * + * @opensearch.internal */ public class CancelTasksAction extends ActionType { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java index ea7e225051b80..a4b24a7a91f1f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java @@ -44,6 +44,8 @@ /** * A request to cancel tasks + * + * @opensearch.internal */ public class CancelTasksRequest extends BaseTasksRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java index c0686f166c859..ee19e8b104603 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java @@ -37,6 +37,8 @@ /** * Builder for the request to cancel tasks running on the specified nodes + * + * @opensearch.internal */ public class CancelTasksRequestBuilder extends TasksRequestBuilder { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java index 056463b85fe80..78aea59bcd10e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java @@ -47,6 +47,8 @@ /** * Returns the list of tasks that were cancelled + * + * @opensearch.internal */ public class CancelTasksResponse extends ListTasksResponse { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java index 963ebe24ae8e2..909fb009aa100 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java @@ -54,6 +54,8 @@ *

      * For a task to be cancellable it has to return an instance of * {@link CancellableTask} from {@link TransportRequest#createTask} + * + * @opensearch.internal */ public class TransportCancelTasksAction extends TransportTasksAction { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskAction.java index 4297e911d6bde..3b429d61c31b3 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskAction.java @@ -36,6 +36,8 @@ /** * ActionType for retrieving a list of currently running tasks + * + * @opensearch.internal */ public class GetTaskAction extends ActionType { public static final String TASKS_ORIGIN = "tasks"; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java index a8fee2dc22d30..663fe51f35adc 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java @@ -45,6 +45,8 @@ /** * A request to get node tasks + * + * @opensearch.internal */ public class GetTaskRequest extends ActionRequest { private TaskId taskId = TaskId.EMPTY_TASK_ID; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequestBuilder.java index d7d0be125bee3..17b2a21b2863b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequestBuilder.java @@ -39,6 +39,8 @@ /** * Builder for the request to retrieve the list of tasks running on the specified nodes + * + * @opensearch.internal */ public class GetTaskRequestBuilder extends ActionRequestBuilder { public GetTaskRequestBuilder(OpenSearchClient client, GetTaskAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskResponse.java index 32d7062449a71..1097831113971 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskResponse.java @@ -46,6 +46,8 @@ /** * Returns the list of tasks currently running on the nodes + * + * @opensearch.internal */ public class GetTaskResponse extends ActionResponse implements ToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java index 80049b5e30fdf..0d6926fe9a2fa 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java @@ -74,6 +74,8 @@ *

    6. Look up the task and return it if it exists *
    7. If it doesn't then look up the task from the results index * + * + * @opensearch.internal */ public class TransportGetTaskAction extends HandledTransportAction { private final ThreadPool threadPool; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksAction.java index 76c07cd570622..7c8b83ad9c913 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksAction.java @@ -36,6 +36,8 @@ /** * ActionType for retrieving a list of currently running tasks + * + * @opensearch.internal */ public class ListTasksAction extends ActionType { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java index 4f1a5765d1eea..decd26e3058f5 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java @@ -40,6 +40,8 @@ /** * A request to get node tasks + * + * @opensearch.internal */ public class ListTasksRequest extends BaseTasksRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java index dc10fb99e90b9..45beb0dd899b5 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java @@ -37,6 +37,8 @@ /** * Builder for the request to retrieve the list of tasks running on the specified nodes + * + * @opensearch.internal */ public class ListTasksRequestBuilder extends TasksRequestBuilder { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java index db0b1491fd693..8d2ad55df1a8c 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java @@ -62,6 +62,8 @@ /** * Returns the list of tasks currently running on the nodes + * + * @opensearch.internal */ public class ListTasksResponse extends BaseTasksResponse implements ToXContentObject { private static final String TASKS = "tasks"; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TaskGroup.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TaskGroup.java index 6d1d96bfd7869..56e89224ed780 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TaskGroup.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TaskGroup.java @@ -44,6 +44,8 @@ /** * Information about a currently running task and all its subtasks. + * + * @opensearch.internal */ public class TaskGroup implements ToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java index b7875c5f99774..796ea023edd40 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java @@ -50,6 +50,11 @@ import static org.opensearch.common.unit.TimeValue.timeValueSeconds; +/** + * Transport action for listing tasks currently running on the nodes + * + * @opensearch.internal + */ public class TransportListTasksAction extends TransportTasksAction { public static long waitForCompletionTimeout(TimeValue timeout) { if (timeout == null) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodeUsage.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodeUsage.java index 2094861185b50..58e43ca9f3568 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodeUsage.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodeUsage.java @@ -43,6 +43,11 @@ import java.io.IOException; import java.util.Map; +/** + * Concrete class for collecting OpenSearch telemetry + * + * @opensearch.internal + */ public class NodeUsage extends BaseNodeResponse implements ToXContentFragment { private final long timestamp; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageAction.java index 2a3c6be6e0bbe..8a3aec7679709 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action for collecting OpenSearch telemetry + * + * @opensearch.internal + */ public class NodesUsageAction extends ActionType { public static final NodesUsageAction INSTANCE = new NodesUsageAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequest.java index 0a94f2a9bfca7..01f66bd843642 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequest.java @@ -39,6 +39,11 @@ import java.io.IOException; +/** + * Transport request for collecting OpenSearch telemetry + * + * @opensearch.internal + */ public class NodesUsageRequest extends BaseNodesRequest { private boolean restActions; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequestBuilder.java index fefccf4368230..7d1823b59dc04 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequestBuilder.java @@ -36,6 +36,11 @@ import org.opensearch.action.support.nodes.NodesOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +/** + * Transport builder for collecting OpenSearch telemetry + * + * @opensearch.internal + */ public class NodesUsageRequestBuilder extends NodesOperationRequestBuilder< NodesUsageRequest, NodesUsageResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageResponse.java index d3e5aeea72193..77b6cf067a1d2 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageResponse.java @@ -48,6 +48,8 @@ /** * The response for the nodes usage api which contains the individual usage * statistics for all nodes queried. + * + * @opensearch.internal */ public class NodesUsageResponse extends BaseNodesResponse implements ToXContentFragment { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java index 207a5eb41c85d..828d9a30df5db 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java @@ -49,6 +49,11 @@ import java.util.List; import java.util.Map; +/** + * Transport action for collecting OpenSearch telemetry + * + * @opensearch.internal + */ public class TransportNodesUsageAction extends TransportNodesAction< NodesUsageRequest, NodesUsageResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoAction.java index 099b3abc7438c..55f75a142a53c 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action for remote monitoring + * + * @opensearch.internal + */ public final class RemoteInfoAction extends ActionType { public static final String NAME = "cluster:monitor/remote/info"; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoRequest.java index c140a639ac3ae..e022ef504bd46 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoRequest.java @@ -38,6 +38,11 @@ import java.io.IOException; +/** + * Transport request for remote monitoring + * + * @opensearch.internal + */ public final class RemoteInfoRequest extends ActionRequest { public RemoteInfoRequest() {} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoRequestBuilder.java index 5fceced383731..03fd2a3f778ec 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoRequestBuilder.java @@ -35,6 +35,11 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.client.OpenSearchClient; +/** + * Transport builder for remote monitoring + * + * @opensearch.internal + */ public final class RemoteInfoRequestBuilder extends ActionRequestBuilder { public RemoteInfoRequestBuilder(OpenSearchClient client, RemoteInfoAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoResponse.java index 415e71e6d1010..08ff58d6cbee1 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoResponse.java @@ -45,6 +45,11 @@ import java.util.Collections; import java.util.List; +/** + * Transport response for remote monitoring + * + * @opensearch.internal + */ public final class RemoteInfoResponse extends ActionResponse implements ToXContentObject { private List infos; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remote/TransportRemoteInfoAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/remote/TransportRemoteInfoAction.java index 10b1917b0c94b..794d21f1cbc7c 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remote/TransportRemoteInfoAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remote/TransportRemoteInfoAction.java @@ -43,6 +43,11 @@ import static java.util.stream.Collectors.toList; +/** + * Transport action for remote monitoring + * + * @opensearch.internal + */ public final class TransportRemoteInfoAction extends HandledTransportAction { private final RemoteClusterService remoteClusterService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryAction.java index 4feb755c3e5a1..0ec1281648cdb 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryAction.java @@ -33,6 +33,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action for cleaning up snapshot repositories + * + * @opensearch.internal + */ public final class CleanupRepositoryAction extends ActionType { public static final CleanupRepositoryAction INSTANCE = new CleanupRepositoryAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java index d1fc914201390..0f265681cd241 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java @@ -40,6 +40,11 @@ import static org.opensearch.action.ValidateActions.addValidationError; +/** + * Transport request for cleaning up snapshot repositories + * + * @opensearch.internal + */ public class CleanupRepositoryRequest extends AcknowledgedRequest { private String repository; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java index b5db8d7eca669..fc5365e7e836d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java @@ -35,6 +35,11 @@ import org.opensearch.action.support.master.MasterNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +/** + * Transport builder for cleaning up snapshot repositories + * + * @opensearch.internal + */ public class CleanupRepositoryRequestBuilder extends MasterNodeOperationRequestBuilder< CleanupRepositoryRequest, CleanupRepositoryResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryResponse.java index e6bb65e732082..3f6c9000dedac 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryResponse.java @@ -43,6 +43,11 @@ import java.io.IOException; +/** + * Transport response for cleaning up snapshot repositories + * + * @opensearch.internal + */ public final class CleanupRepositoryResponse extends ActionResponse implements ToXContentObject { private static final ObjectParser PARSER = new ObjectParser<>( diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java index c56b2fd2b2205..fb972136bf695 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java @@ -82,6 +82,8 @@ * {@link BlobStoreRepository#cleanup} ensures that the repository state id has not changed between creation of the cluster state entry * and any delete/write operations. TODO: This will not work if we also want to clean up at the shard level as those will involve writes * as well as deletes. + * + * @opensearch.internal */ public final class TransportCleanupRepositoryAction extends TransportMasterNodeAction { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java index 3e6cbcbae8a8f..2031e4f7a716f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java @@ -37,6 +37,8 @@ /** * Unregister repository action + * + * @opensearch.internal */ public class DeleteRepositoryAction extends ActionType { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java index 610b985eb0484..a3f4bb768c649 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java @@ -45,6 +45,8 @@ * Unregister repository request. *

      * The unregister repository command just unregisters the repository. No data is getting deleted from the repository. + * + * @opensearch.internal */ public class DeleteRepositoryRequest extends AcknowledgedRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java index 3a390dd1df2ab..ffef8d5b41979 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java @@ -38,6 +38,8 @@ /** * Builder for unregister repository request + * + * @opensearch.internal */ public class DeleteRepositoryRequestBuilder extends AcknowledgedRequestBuilder< DeleteRepositoryRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java index c7369cabbc75d..97a0463df0e41 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java @@ -51,6 +51,8 @@ /** * Transport action for unregister repository operation + * + * @opensearch.internal */ public class TransportDeleteRepositoryAction extends TransportMasterNodeAction { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesAction.java index 86bc21ef8fd41..8d0fa4dc5010c 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesAction.java @@ -36,6 +36,8 @@ /** * Get repositories action + * + * @opensearch.internal */ public class GetRepositoriesAction extends ActionType { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java index 02b42d82e6702..9e93b7ab68dc3 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java @@ -44,6 +44,8 @@ /** * Get repository request + * + * @opensearch.internal */ public class GetRepositoriesRequest extends MasterNodeReadRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java index 8c4555ce54f1e..2174d02c6852e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java @@ -38,6 +38,8 @@ /** * Get repository request builder + * + * @opensearch.internal */ public class GetRepositoriesRequestBuilder extends MasterNodeReadOperationRequestBuilder< GetRepositoriesRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java index ffb78f9622228..a599474290061 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java @@ -49,6 +49,8 @@ /** * Get repositories response + * + * @opensearch.internal */ public class GetRepositoriesResponse extends ActionResponse implements ToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java index d3a66671b585b..e7cef381a2346 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java @@ -59,6 +59,8 @@ /** * Transport action for get repositories operation + * + * @opensearch.internal */ public class TransportGetRepositoriesAction extends TransportMasterNodeReadAction { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryAction.java index 35bcdb6444ed7..c2f90d869d873 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryAction.java @@ -37,6 +37,8 @@ /** * Register repository action + * + * @opensearch.internal */ public class PutRepositoryAction extends ActionType { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java index a3561f6b3052a..1bdc8e024447d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java @@ -54,6 +54,8 @@ *

      * Registers a repository with given name, type and settings. If the repository with the same name already * exists in the cluster, the new repository will replace the existing repository. + * + * @opensearch.internal */ public class PutRepositoryRequest extends AcknowledgedRequest implements ToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java index 09a33533c8889..6e1b2795b6375 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java @@ -42,6 +42,8 @@ /** * Register repository request builder + * + * @opensearch.internal */ public class PutRepositoryRequestBuilder extends AcknowledgedRequestBuilder< PutRepositoryRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java index c91f492209bdd..1d47dbb0fd194 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java @@ -51,6 +51,8 @@ /** * Transport action for register repository operation + * + * @opensearch.internal */ public class TransportPutRepositoryAction extends TransportMasterNodeAction { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java index 2c727ece7f130..661e99aa1dee3 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java @@ -51,6 +51,8 @@ /** * Transport action for verifying repository operation + * + * @opensearch.internal */ public class TransportVerifyRepositoryAction extends TransportMasterNodeAction { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryAction.java index 98239c9da0f80..10b82f2d5c0f0 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryAction.java @@ -36,6 +36,8 @@ /** * Verify repository action + * + * @opensearch.internal */ public class VerifyRepositoryAction extends ActionType { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java index 12788a421c75f..001030f6a67f5 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java @@ -43,6 +43,8 @@ /** * Verify repository request. + * + * @opensearch.internal */ public class VerifyRepositoryRequest extends AcknowledgedRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java index db01ac2268b9a..85c6d4e341e72 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java @@ -37,6 +37,8 @@ /** * Builder for verify repository request + * + * @opensearch.internal */ public class VerifyRepositoryRequestBuilder extends MasterNodeOperationRequestBuilder< VerifyRepositoryRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java index f31189f3073e0..c191622955e2d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java @@ -52,6 +52,8 @@ /** * Verify repository response + * + * @opensearch.internal */ public class VerifyRepositoryResponse extends ActionResponse implements ToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteAction.java index f52169911e3a9..d6a09f47cbf12 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action for rerouting allocation commands + * + * @opensearch.internal + */ public class ClusterRerouteAction extends ActionType { public static final ClusterRerouteAction INSTANCE = new ClusterRerouteAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java index ddef4f62c5298..1ca5ca1148b87 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java @@ -44,6 +44,8 @@ /** * Request to submit cluster reroute allocation commands + * + * @opensearch.internal */ public class ClusterRerouteRequest extends AcknowledgedRequest { private AllocationCommands commands = new AllocationCommands(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java index 8e0116bb3646e..01d52cb43320d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java @@ -38,6 +38,8 @@ /** * Builder for a cluster reroute request + * + * @opensearch.internal */ public class ClusterRerouteRequestBuilder extends AcknowledgedRequestBuilder< ClusterRerouteRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java index da4cfb4333b3d..dcddc98bdc43a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java @@ -45,6 +45,8 @@ /** * Response returned after a cluster reroute request + * + * @opensearch.internal */ public class ClusterRerouteResponse extends AcknowledgedResponse implements ToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java index c0e3d30fdb702..a5d4935a3caf8 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java @@ -70,6 +70,11 @@ import java.util.List; import java.util.Map; +/** + * Transport action for rerouting cluster allocation commands + * + * @opensearch.internal + */ public class TransportClusterRerouteAction extends TransportMasterNodeAction { private static final Logger logger = LogManager.getLogger(TransportClusterRerouteAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterGetSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterGetSettingsRequest.java index b490f172ce7ad..01aeb0f6ec988 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterGetSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterGetSettingsRequest.java @@ -39,6 +39,8 @@ /** * This request is specific to the REST client. {@link ClusterStateRequest} * is used on the transport layer. + * + * @opensearch.internal */ public class ClusterGetSettingsRequest extends MasterNodeReadRequest { private boolean includeDefaults = false; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterGetSettingsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterGetSettingsResponse.java index 95c148e9fe5d3..93d921c7b0f3a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterGetSettingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterGetSettingsResponse.java @@ -52,6 +52,8 @@ /** * This response is specific to the REST client. {@link ClusterStateResponse} * is used on the transport layer. + * + * @opensearch.internal */ public class ClusterGetSettingsResponse extends ActionResponse implements ToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsAction.java index dcc7c786cc3ae..12134e6936ddb 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action for updating cluster settings + * + * @opensearch.internal + */ public class ClusterUpdateSettingsAction extends ActionType { public static final ClusterUpdateSettingsAction INSTANCE = new ClusterUpdateSettingsAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java index 12cce7cf74efa..f3f7db03ac67e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java @@ -54,6 +54,8 @@ /** * Request for an update cluster settings action + * + * @opensearch.internal */ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest implements ToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java index 1ec7bd6cfb47d..4d08c94f78b6a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java @@ -41,6 +41,8 @@ /** * Builder for a cluster update settings request + * + * @opensearch.internal */ public class ClusterUpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder< ClusterUpdateSettingsRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java index 91955126dd745..a4edd1d99148a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java @@ -48,6 +48,8 @@ /** * A response for a cluster update settings action. + * + * @opensearch.internal */ public class ClusterUpdateSettingsResponse extends AcknowledgedResponse { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/SettingsUpdater.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/SettingsUpdater.java index bcc95c01b4189..340d868c25853 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/SettingsUpdater.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/SettingsUpdater.java @@ -50,6 +50,8 @@ /** * Updates transient and persistent cluster state settings if there are any changes * due to the update. + * + * @opensearch.internal */ final class SettingsUpdater { final Settings.Builder transientUpdates = Settings.builder(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 3bfdf2a0cbd5a..af5da6f538d67 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -58,6 +58,11 @@ import java.io.IOException; +/** + * Transport action for updating cluster settings + * + * @opensearch.internal + */ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAction< ClusterUpdateSettingsRequest, ClusterUpdateSettingsResponse> { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsAction.java index 9d6b0f9677a95..176d52d3eeaab 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action for searching shards + * + * @opensearch.internal + */ public class ClusterSearchShardsAction extends ActionType { public static final ClusterSearchShardsAction INSTANCE = new ClusterSearchShardsAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java index 660c4629f6db2..6e1a99c95a878 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java @@ -42,6 +42,11 @@ import java.io.IOException; +/** + * Transport action for searching shard groups + * + * @opensearch.internal + */ public class ClusterSearchShardsGroup implements Writeable, ToXContentObject { private final ShardId shardId; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java index 7ae4a70d6bd36..e79697a415f1e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java @@ -44,6 +44,11 @@ import java.io.IOException; import java.util.Objects; +/** + * Transport request for searching shards + * + * @opensearch.internal + */ public class ClusterSearchShardsRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { private String[] indices = Strings.EMPTY_ARRAY; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java index ae5c22a1c57a7..26246197cbfa8 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java @@ -36,6 +36,11 @@ import org.opensearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +/** + * Transport request builder for searching shards + * + * @opensearch.internal + */ public class ClusterSearchShardsRequestBuilder extends MasterNodeReadOperationRequestBuilder< ClusterSearchShardsRequest, ClusterSearchShardsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java index 1381a39664a49..6ef55458e413d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java @@ -44,6 +44,11 @@ import java.util.Arrays; import java.util.Map; +/** + * Transport response for searching shards + * + * @opensearch.internal + */ public class ClusterSearchShardsResponse extends ActionResponse implements ToXContentObject { private final ClusterSearchShardsGroup[] groups; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java index 37a556216c2c9..1d0173b2446dd 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java @@ -58,6 +58,11 @@ import java.util.Map; import java.util.Set; +/** + * Transport action for searching shards + * + * @opensearch.internal + */ public class TransportClusterSearchShardsAction extends TransportMasterNodeReadAction< ClusterSearchShardsRequest, ClusterSearchShardsResponse> { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotAction.java index 0ecc088ff462a..c6fe102544a7e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotAction.java @@ -35,6 +35,11 @@ import org.opensearch.action.ActionType; import org.opensearch.action.support.master.AcknowledgedResponse; +/** + * Transport action for cloning a snapshot + * + * @opensearch.internal + */ public final class CloneSnapshotAction extends ActionType { public static final CloneSnapshotAction INSTANCE = new CloneSnapshotAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java index 58725c23b7fc5..7044a7412058a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java @@ -46,6 +46,11 @@ import static org.opensearch.action.ValidateActions.addValidationError; +/** + * Transport request for cloning a snapshot + * + * @opensearch.internal + */ public class CloneSnapshotRequest extends MasterNodeRequest implements IndicesRequest.Replaceable, ToXContentObject { private final String repository; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java index ba6f7a61bdc8d..c2dd9b2b491f2 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java @@ -39,6 +39,11 @@ import org.opensearch.client.OpenSearchClient; import org.opensearch.common.Strings; +/** + * Transport request builder for cloning a snapshot + * + * @opensearch.internal + */ public class CloneSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder< CloneSnapshotRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java index 806cb4a82c3c5..a17d19bb870fa 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java @@ -51,6 +51,8 @@ /** * Transport action for the clone snapshot operation. + * + * @opensearch.internal */ public final class TransportCloneSnapshotAction extends TransportMasterNodeAction { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotAction.java index 324a28027949f..fb95e311a9783 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotAction.java @@ -36,6 +36,8 @@ /** * Create snapshot action + * + * @opensearch.internal */ public class CreateSnapshotAction extends ActionType { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index 438d2fd006e63..b9d96ed2a752d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -75,6 +75,8 @@ *

    8. must be lowercase
    9. *
    10. must not contain invalid file name characters {@link org.opensearch.common.Strings#INVALID_FILENAME_CHARS}
    11. * + * + * @opensearch.internal */ public class CreateSnapshotRequest extends MasterNodeRequest implements diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java index db30ce8295530..3f74e7d24bcdb 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java @@ -42,6 +42,8 @@ /** * Create snapshot request builder + * + * @opensearch.internal */ public class CreateSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder< CreateSnapshotRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java index 9a92971a2a791..c62b1e3f69cb5 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java @@ -50,6 +50,8 @@ /** * Create snapshot response + * + * @opensearch.internal */ public class CreateSnapshotResponse extends ActionResponse implements ToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java index 377fe03a9b030..f05980f2eb41f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java @@ -50,6 +50,8 @@ /** * Transport action for create snapshot operation + * + * @opensearch.internal */ public class TransportCreateSnapshotAction extends TransportMasterNodeAction { private final SnapshotsService snapshotsService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java index 4c577bd0bbce2..0b98a4b31fd53 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java @@ -37,6 +37,8 @@ /** * Delete snapshot action + * + * @opensearch.internal */ public class DeleteSnapshotAction extends ActionType { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java index fe9dc4db626f7..d446221e8e175 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java @@ -47,6 +47,8 @@ *

      * Delete snapshot request removes snapshots from the repository and cleans up all files that are associated with the snapshots. * All files that are shared with at least one other existing snapshot are left intact. + * + * @opensearch.internal */ public class DeleteSnapshotRequest extends MasterNodeRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java index 951f86bafb7b1..684d96d1aa8d9 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java @@ -38,6 +38,8 @@ /** * Delete snapshot request builder + * + * @opensearch.internal */ public class DeleteSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder< DeleteSnapshotRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java index 0781f01e9bc61..ad71f970edcd5 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java @@ -51,6 +51,8 @@ /** * Transport action for delete snapshot operation + * + * @opensearch.internal */ public class TransportDeleteSnapshotAction extends TransportMasterNodeAction { private final SnapshotsService snapshotsService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsAction.java index 22e3e859394c7..59d7969d04f86 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsAction.java @@ -36,6 +36,8 @@ /** * Get snapshots action + * + * @opensearch.internal */ public class GetSnapshotsAction extends ActionType { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java index 549e3cea70deb..822598bd7f78b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java @@ -44,6 +44,8 @@ /** * Get snapshot request + * + * @opensearch.internal */ public class GetSnapshotsRequest extends MasterNodeRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java index bd2ced2733169..46317a3493650 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java @@ -38,6 +38,8 @@ /** * Get snapshots request builder + * + * @opensearch.internal */ public class GetSnapshotsRequestBuilder extends MasterNodeOperationRequestBuilder< GetSnapshotsRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java index 28981a6bdafe2..4d0daf6b67e45 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java @@ -51,6 +51,8 @@ /** * Get snapshots response + * + * @opensearch.internal */ public class GetSnapshotsResponse extends ActionResponse implements ToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index 91cec4b268f9b..dab87c72c5dce 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -76,6 +76,8 @@ /** * Transport Action for get snapshots operation + * + * @opensearch.internal */ public class TransportGetSnapshotsAction extends TransportMasterNodeAction { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java index cb6f8493551f6..7d2ca99e3dbf5 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java @@ -46,6 +46,11 @@ import static org.opensearch.snapshots.RestoreService.restoreInProgress; +/** + * Transport listener for cluster state updates + * + * @opensearch.internal + */ public class RestoreClusterStateListener implements ClusterStateListener { private static final Logger logger = LogManager.getLogger(RestoreClusterStateListener.class); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotAction.java index e86ad12b8a231..1813e8214aeb0 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotAction.java @@ -36,6 +36,8 @@ /** * Restore snapshot action + * + * @opensearch.internal */ public class RestoreSnapshotAction extends ActionType { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index ed45ba4f65941..ec3809fb52516 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -61,6 +61,8 @@ /** * Restore snapshot request + * + * @opensearch.internal */ public class RestoreSnapshotRequest extends MasterNodeRequest implements ToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java index 19fc86c9c4ace..34a6586d52917 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java @@ -43,6 +43,8 @@ /** * Restore snapshot request builder + * + * @opensearch.internal */ public class RestoreSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder< RestoreSnapshotRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java index 324e3054fb1a3..658d7139644c1 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java @@ -52,6 +52,8 @@ /** * Contains information about restores snapshot + * + * @opensearch.internal */ public class RestoreSnapshotResponse extends ActionResponse implements ToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java index c07f771081902..2deed9f2dc93b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java @@ -50,6 +50,8 @@ /** * Transport action for restore snapshot operation + * + * @opensearch.internal */ public class TransportRestoreSnapshotAction extends TransportMasterNodeAction { private final RestoreService restoreService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java index 59ead89c4a4ee..6e250962d1210 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java @@ -32,6 +32,11 @@ package org.opensearch.action.admin.cluster.snapshots.status; +/** + * Stage for snapshotting an Index Shard + * + * @opensearch.internal + */ public enum SnapshotIndexShardStage { /** diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java index 74ac12b951dc8..edbbec35fad1a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java @@ -53,6 +53,11 @@ import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg; import static org.opensearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; +/** + * Status for snapshotting an Index Shard + * + * @opensearch.internal + */ public class SnapshotIndexShardStatus extends BroadcastShardResponse implements ToXContentFragment { private SnapshotIndexShardStage stage = SnapshotIndexShardStage.INIT; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java index 63cbc0a18ffff..06c9bca7c4b05 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java @@ -53,6 +53,8 @@ /** * Represents snapshot status of all shards in the index + * + * @opensearch.internal */ public class SnapshotIndexStatus implements Iterable, ToXContentFragment { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java index b945ba20afe80..5ff9840cba19b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java @@ -46,6 +46,8 @@ /** * Status of a snapshot shards + * + * @opensearch.internal */ public class SnapshotShardsStats implements ToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStats.java index 3f348e47c4f4a..8d7d6679f116c 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStats.java @@ -46,6 +46,11 @@ import java.io.IOException; +/** + * Stats for snapshots + * + * @opensearch.internal + */ public class SnapshotStats implements Writeable, ToXContentObject { private long startTime; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java index 76764a39ff7e2..8fd1ed22a0d14 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java @@ -67,6 +67,8 @@ /** * Status of a snapshot + * + * @opensearch.internal */ public class SnapshotStatus implements ToXContentObject, Writeable { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusAction.java index d4f398d543b81..c4fe06e985f6f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusAction.java @@ -36,6 +36,8 @@ /** * Snapshots status action + * + * @opensearch.internal */ public class SnapshotsStatusAction extends ActionType { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java index f7e29cfef0bb4..d5c7c63b0db43 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java @@ -44,6 +44,8 @@ /** * Get snapshot status request + * + * @opensearch.internal */ public class SnapshotsStatusRequest extends MasterNodeRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java index 67c82182e38c9..3e281ce8059d1 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java @@ -38,6 +38,8 @@ /** * Snapshots status request builder + * + * @opensearch.internal */ public class SnapshotsStatusRequestBuilder extends MasterNodeOperationRequestBuilder< SnapshotsStatusRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java index 8519b1f6a3379..86183d8a2d8eb 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java @@ -50,6 +50,8 @@ /** * Snapshot status response + * + * @opensearch.internal */ public class SnapshotsStatusResponse extends ActionResponse implements ToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java index b5247141739b7..507ea3b154fc6 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java @@ -63,6 +63,8 @@ /** * Transport action that collects snapshot shard statuses from data nodes + * + * @opensearch.internal */ public class TransportNodesSnapshotsStatus extends TransportNodesAction< TransportNodesSnapshotsStatus.Request, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index 1e29a70e1f41f..dec169a6633cf 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -84,6 +84,11 @@ import static java.util.Collections.unmodifiableMap; +/** + * Transport action for accessing snapshot status + * + * @opensearch.internal + */ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction { private static final Logger logger = LogManager.getLogger(TransportSnapshotsStatusAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateAction.java index dff45b708fb13..ba3658d68296e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action for obtaining cluster state + * + * @opensearch.internal + */ public class ClusterStateAction extends ActionType { public static final ClusterStateAction INSTANCE = new ClusterStateAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequest.java index 91e01aa74f8a5..bf2204a9f8e15 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequest.java @@ -43,6 +43,11 @@ import java.io.IOException; +/** + * Transport request for obtaining cluster state + * + * @opensearch.internal + */ public class ClusterStateRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { public static final TimeValue DEFAULT_WAIT_FOR_NODE_TIMEOUT = TimeValue.timeValueMinutes(1); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestBuilder.java index cf3eabfc4167d..530d99f5db808 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestBuilder.java @@ -37,6 +37,11 @@ import org.opensearch.client.OpenSearchClient; import org.opensearch.common.unit.TimeValue; +/** + * Transport request builder for obtaining cluster state + * + * @opensearch.internal + */ public class ClusterStateRequestBuilder extends MasterNodeReadOperationRequestBuilder< ClusterStateRequest, ClusterStateResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java index 80d1f7022967d..ec2697fbd7339 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java @@ -46,6 +46,8 @@ /** * The response for getting the cluster state. + * + * @opensearch.internal */ public class ClusterStateResponse extends ActionResponse { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java index 595127d83d4bf..885769dd200cf 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -58,6 +58,11 @@ import java.io.IOException; import java.util.function.Predicate; +/** + * Transport action for obtaining cluster state + * + * @opensearch.internal + */ public class TransportClusterStateAction extends TransportMasterNodeReadAction { private final Logger logger = LogManager.getLogger(getClass()); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/AnalysisStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/AnalysisStats.java index 3ae8d684ee870..1f1920d8d675d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/AnalysisStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/AnalysisStats.java @@ -59,6 +59,8 @@ /** * Statistics about analysis usage. + * + * @opensearch.internal */ public final class AnalysisStats implements ToXContentFragment, Writeable { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsAction.java index baeeec91bd8ce..ef20087a667df 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action for obtaining cluster stats + * + * @opensearch.internal + */ public class ClusterStatsAction extends ActionType { public static final ClusterStatsAction INSTANCE = new ClusterStatsAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java index dd74c2ad66a9b..0bba411e887b1 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java @@ -47,6 +47,11 @@ import java.io.IOException; import java.util.List; +/** + * Cluster Stats per index + * + * @opensearch.internal + */ public class ClusterStatsIndices implements ToXContentFragment { private int indexCount; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java index 01d4d5ac0fb53..2d684c3333f24 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java @@ -44,6 +44,11 @@ import java.io.IOException; +/** + * Transport action for obtaining cluster stats from node level + * + * @opensearch.internal + */ public class ClusterStatsNodeResponse extends BaseNodeResponse { private final NodeInfo nodeInfo; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java index fbca94780f827..78fe1951094db 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java @@ -69,6 +69,11 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +/** + * Per Node Cluster Stats + * + * @opensearch.internal + */ public class ClusterStatsNodes implements ToXContentFragment { private final Counts counts; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java index ed658ae23999a..43b3cf11237f7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java @@ -40,6 +40,8 @@ /** * A request to get cluster level stats. + * + * @opensearch.internal */ public class ClusterStatsRequest extends BaseNodesRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java index 33c346a493986..aaf5e3aeffeb8 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java @@ -35,6 +35,11 @@ import org.opensearch.action.support.nodes.NodesOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +/** + * Transport request builder for obtaining cluster stats + * + * @opensearch.internal + */ public class ClusterStatsRequestBuilder extends NodesOperationRequestBuilder< ClusterStatsRequest, ClusterStatsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java index 172159a1efe5b..c6519d6669ea8 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java @@ -49,6 +49,11 @@ import java.util.List; import java.util.Locale; +/** + * Transport response for obtaining cluster stats + * + * @opensearch.internal + */ public class ClusterStatsResponse extends BaseNodesResponse implements ToXContentFragment { final ClusterStatsNodes nodesStats; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/IndexFeatureStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/IndexFeatureStats.java index 0c428ee4fe0ee..0da99097aa4f4 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/IndexFeatureStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/IndexFeatureStats.java @@ -43,6 +43,8 @@ /** * Statistics about an index feature. + * + * @opensearch.internal */ public final class IndexFeatureStats implements ToXContent, Writeable { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/MappingStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/MappingStats.java index fc4b61af4ddde..71e183e6158dc 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/MappingStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/MappingStats.java @@ -56,6 +56,8 @@ /** * Usage statistics about mappings usage. + * + * @opensearch.internal */ public final class MappingStats implements ToXContentFragment, Writeable { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/MappingVisitor.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/MappingVisitor.java index 4f16dbcff668e..b2a5acb235ace 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/MappingVisitor.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/MappingVisitor.java @@ -35,6 +35,11 @@ import java.util.Map; import java.util.function.Consumer; +/** + * Visitor pattern for obtaining index mappings + * + * @opensearch.internal + */ final class MappingVisitor { private MappingVisitor() {} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index c0d7519e79862..d72c797fed248 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -64,6 +64,11 @@ import java.util.ArrayList; import java.util.List; +/** + * Transport action for obtaining cluster state + * + * @opensearch.internal + */ public class TransportClusterStatsAction extends TransportNodesAction< ClusterStatsRequest, ClusterStatsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java index ab20c4052938d..3645ef21d2e12 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java @@ -35,6 +35,11 @@ import org.opensearch.action.ActionType; import org.opensearch.action.support.master.AcknowledgedResponse; +/** + * Transport action for deleting stored scripts + * + * @opensearch.internal + */ public class DeleteStoredScriptAction extends ActionType { public static final DeleteStoredScriptAction INSTANCE = new DeleteStoredScriptAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java index eda9aa053854f..93d2c3ba3c452 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java @@ -41,6 +41,11 @@ import static org.opensearch.action.ValidateActions.addValidationError; +/** + * Transport request for deleting stored scripts + * + * @opensearch.internal + */ public class DeleteStoredScriptRequest extends AcknowledgedRequest { private String id; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java index d45b0b02d9d83..34e0d429f2098 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java @@ -36,6 +36,11 @@ import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +/** + * Transport request builder for deleting stored scripts + * + * @opensearch.internal + */ public class DeleteStoredScriptRequestBuilder extends AcknowledgedRequestBuilder< DeleteStoredScriptRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptContextAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptContextAction.java index 9aebd60c8997b..df33aa8081849 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptContextAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptContextAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action for getting stored scripts + * + * @opensearch.internal + */ public class GetScriptContextAction extends ActionType { public static final GetScriptContextAction INSTANCE = new GetScriptContextAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptContextRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptContextRequest.java index 90ec611eeae9d..385948b613199 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptContextRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptContextRequest.java @@ -37,6 +37,11 @@ import java.io.IOException; +/** + * Transport context for getting stored scripts + * + * @opensearch.internal + */ public class GetScriptContextRequest extends ActionRequest { public GetScriptContextRequest() { super(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptContextResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptContextResponse.java index 7bfcface8b75c..b06e10834abfc 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptContextResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptContextResponse.java @@ -54,6 +54,11 @@ import java.util.function.Function; import java.util.stream.Collectors; +/** + * Transport context response for getting stored scripts + * + * @opensearch.internal + */ public class GetScriptContextResponse extends ActionResponse implements StatusToXContentObject { private static final ParseField CONTEXTS = new ParseField("contexts"); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptLanguageAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptLanguageAction.java index 90cc05ba983de..b2da146a7ccbe 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptLanguageAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptLanguageAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action for getting script language + * + * @opensearch.internal + */ public class GetScriptLanguageAction extends ActionType { public static final GetScriptLanguageAction INSTANCE = new GetScriptLanguageAction(); public static final String NAME = "cluster:admin/script_language/get"; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptLanguageRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptLanguageRequest.java index 0a68af3acd5d7..b6159e10cecb7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptLanguageRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptLanguageRequest.java @@ -38,6 +38,11 @@ import java.io.IOException; +/** + * Transport request for getting script language + * + * @opensearch.internal + */ public class GetScriptLanguageRequest extends ActionRequest { public GetScriptLanguageRequest() { super(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptLanguageResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptLanguageResponse.java index d9d6fa9e650de..45d69a2596b19 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptLanguageResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptLanguageResponse.java @@ -45,6 +45,11 @@ import java.io.IOException; import java.util.Objects; +/** + * Transport response for getting script language + * + * @opensearch.internal + */ public class GetScriptLanguageResponse extends ActionResponse implements StatusToXContentObject, Writeable { public final ScriptLanguagesInfo info; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptAction.java index 40d887987ae40..f4cb82d68456a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action for getting stored script + * + * @opensearch.internal + */ public class GetStoredScriptAction extends ActionType { public static final GetStoredScriptAction INSTANCE = new GetStoredScriptAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java index afecdc09d991d..2a51bd9ad3eef 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java @@ -41,6 +41,11 @@ import static org.opensearch.action.ValidateActions.addValidationError; +/** + * Transport request for getting stored script + * + * @opensearch.internal + */ public class GetStoredScriptRequest extends MasterNodeReadRequest { protected String id; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java index cbae829e76c04..3c8b74c240f29 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java @@ -35,6 +35,11 @@ import org.opensearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +/** + * Transport request builder for getting stored script + * + * @opensearch.internal + */ public class GetStoredScriptRequestBuilder extends MasterNodeReadOperationRequestBuilder< GetStoredScriptRequest, GetStoredScriptResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java index 7739f87db74f9..799e308acae39 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java @@ -50,6 +50,11 @@ import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg; import static org.opensearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; +/** + * Transport response for getting stored script + * + * @opensearch.internal + */ public class GetStoredScriptResponse extends ActionResponse implements StatusToXContentObject { public static final ParseField _ID_PARSE_FIELD = new ParseField("_id"); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java index 75a2dc12d81f6..2845d895a69e8 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java @@ -35,6 +35,11 @@ import org.opensearch.action.ActionType; import org.opensearch.action.support.master.AcknowledgedResponse; +/** + * Transport action for putting stored script + * + * @opensearch.internal + */ public class PutStoredScriptAction extends ActionType { public static final PutStoredScriptAction INSTANCE = new PutStoredScriptAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java index 4665354918c8f..2bddf2823f962 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java @@ -48,6 +48,11 @@ import static org.opensearch.action.ValidateActions.addValidationError; +/** + * Transport request for putting stored script + * + * @opensearch.internal + */ public class PutStoredScriptRequest extends AcknowledgedRequest implements ToXContentFragment { private String id; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java index 414ed3d273b75..ef3c14df29627 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java @@ -38,6 +38,11 @@ import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.xcontent.XContentType; +/** + * Transport request builder for putting stored script + * + * @opensearch.internal + */ public class PutStoredScriptRequestBuilder extends AcknowledgedRequestBuilder< PutStoredScriptRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java index 0b5f9d3040add..1550af534e5bf 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java @@ -49,6 +49,11 @@ import java.io.IOException; +/** + * Transport action for deleting stored script + * + * @opensearch.internal + */ public class TransportDeleteStoredScriptAction extends TransportMasterNodeAction { private final ScriptService scriptService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetScriptContextAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetScriptContextAction.java index 0bcd9a71109ed..88184d59932ea 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetScriptContextAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetScriptContextAction.java @@ -42,6 +42,11 @@ import java.util.Set; +/** + * Transport action for getting script context + * + * @opensearch.internal + */ public class TransportGetScriptContextAction extends HandledTransportAction { private final ScriptService scriptService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetScriptLanguageAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetScriptLanguageAction.java index 255889b63dbd8..0ecd6e8cf35d7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetScriptLanguageAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetScriptLanguageAction.java @@ -40,6 +40,11 @@ import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; +/** + * Transport action for getting script language + * + * @opensearch.internal + */ public class TransportGetScriptLanguageAction extends HandledTransportAction { private final ScriptService scriptService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java index 4a87f6795da50..b3f5890de40b9 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java @@ -48,6 +48,11 @@ import java.io.IOException; +/** + * Transport action for getting stored script + * + * @opensearch.internal + */ public class TransportGetStoredScriptAction extends TransportMasterNodeReadAction { private final ScriptService scriptService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java index a8288fc0147c2..fa0e97d4c02f1 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java @@ -49,6 +49,11 @@ import java.io.IOException; +/** + * Transport action for putting stored script + * + * @opensearch.internal + */ public class TransportPutStoredScriptAction extends TransportMasterNodeAction { private final ScriptService scriptService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksAction.java index 851b88f1e639a..9f3e8720d1f56 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action for getting pending cluster tasks + * + * @opensearch.internal + */ public class PendingClusterTasksAction extends ActionType { public static final PendingClusterTasksAction INSTANCE = new PendingClusterTasksAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java index 463481fe5d0e5..27f5e3bc9b991 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java @@ -38,6 +38,11 @@ import java.io.IOException; +/** + * Transport request for getting pending cluster tasks + * + * @opensearch.internal + */ public class PendingClusterTasksRequest extends MasterNodeReadRequest { public PendingClusterTasksRequest() {} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java index 8f3bccdf5e55f..08afa81a8194d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java @@ -35,6 +35,11 @@ import org.opensearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +/** + * Transport request builder for getting pending cluster tasks + * + * @opensearch.internal + */ public class PendingClusterTasksRequestBuilder extends MasterNodeReadOperationRequestBuilder< PendingClusterTasksRequest, PendingClusterTasksResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java index 5b355f9f00166..89b8c46cb14d0 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java @@ -43,6 +43,11 @@ import java.util.Iterator; import java.util.List; +/** + * Transport response for getting pending cluster tasks + * + * @opensearch.internal + */ public class PendingClusterTasksResponse extends ActionResponse implements Iterable, ToXContentObject { private final List pendingTasks; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java index 932722eae8a80..13a805e1e49f0 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java @@ -50,6 +50,11 @@ import java.io.IOException; import java.util.List; +/** + * Transport action for getting pending cluster tasks + * + * @opensearch.internal + */ public class TransportPendingClusterTasksAction extends TransportMasterNodeReadAction< PendingClusterTasksRequest, PendingClusterTasksResponse> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/Alias.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/Alias.java index d8853bcede3ef..f9a785d1759d8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/Alias.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/Alias.java @@ -55,6 +55,8 @@ /** * Represents an alias, to be associated with an index + * + * @opensearch.internal */ public class Alias implements Writeable, ToXContentFragment { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesAction.java index 33566820d9762..4d735e984c34e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesAction.java @@ -35,6 +35,11 @@ import org.opensearch.action.ActionType; import org.opensearch.action.support.master.AcknowledgedResponse; +/** + * Transport action for listing index aliases + * + * @opensearch.internal + */ public class IndicesAliasesAction extends ActionType { public static final IndicesAliasesAction INSTANCE = new IndicesAliasesAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesClusterStateUpdateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesClusterStateUpdateRequest.java index f89f8a06727c1..4a445ca92d2dd 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesClusterStateUpdateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesClusterStateUpdateRequest.java @@ -38,6 +38,8 @@ /** * Cluster state update request that allows to add or remove aliases + * + * @opensearch.internal */ public class IndicesAliasesClusterStateUpdateRequest extends ClusterStateUpdateRequest { private final List actions; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java index 9481e6287c878..91c187ad9026a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -72,6 +72,8 @@ /** * A request to add/remove aliases for one or more indices. + * + * @opensearch.internal */ public class IndicesAliasesRequest extends AcknowledgedRequest implements ToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java index f74363db3dcfd..13c57cc781925 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java @@ -41,6 +41,8 @@ /** * Builder for request to modify many aliases at once. + * + * @opensearch.internal */ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder< IndicesAliasesRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java index 82eb3aed7da16..2e5cf23360125 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java @@ -72,6 +72,8 @@ /** * Add/remove aliases action + * + * @opensearch.internal */ public class TransportIndicesAliasesAction extends TransportMasterNodeAction { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java index 3d8fa05fb7658..82f9d9a35dd2c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java @@ -39,6 +39,11 @@ import org.opensearch.client.OpenSearchClient; import org.opensearch.common.util.ArrayUtils; +/** + * Base request builder for listing index aliases + * + * @opensearch.internal + */ public abstract class BaseAliasesRequestBuilder< Response extends ActionResponse, Builder extends BaseAliasesRequestBuilder> extends MasterNodeReadOperationRequestBuilder< diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesAction.java index 198fa328111b8..d45f988330010 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action for listing index aliases + * + * @opensearch.internal + */ public class GetAliasesAction extends ActionType { public static final GetAliasesAction INSTANCE = new GetAliasesAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequest.java index 661af82d7020d..46f2ee8765910 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequest.java @@ -41,6 +41,11 @@ import java.io.IOException; +/** + * Transport request for listing index aliases + * + * @opensearch.internal + */ public class GetAliasesRequest extends MasterNodeReadRequest implements AliasesRequest { private String[] indices = Strings.EMPTY_ARRAY; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java index f7902ef73fbd5..aecbd689a647c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java @@ -34,6 +34,11 @@ import org.opensearch.client.OpenSearchClient; +/** + * Transport request builder for listing index aliases + * + * @opensearch.internal + */ public class GetAliasesRequestBuilder extends BaseAliasesRequestBuilder { public GetAliasesRequestBuilder(OpenSearchClient client, GetAliasesAction action, String... aliases) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesResponse.java index 8901a067bb020..60c0a403566d5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesResponse.java @@ -42,6 +42,11 @@ import java.util.List; import java.util.Objects; +/** + * Transport response for listing index aliases + * + * @opensearch.internal + */ public class GetAliasesResponse extends ActionResponse { private final ImmutableOpenMap> aliases; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index fa26560e4fedf..1996b11901c3a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -59,6 +59,11 @@ import java.util.Set; import java.util.stream.Collectors; +/** + * Transport action for listing index aliases + * + * @opensearch.internal + */ public class TransportGetAliasesAction extends TransportMasterNodeReadAction { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(TransportGetAliasesAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeAction.java index 592575d9b6019..83c302ad43b7b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeAction.java @@ -59,6 +59,11 @@ import static org.opensearch.action.ValidateActions.addValidationError; +/** + * Transport action for analyzing text + * + * @opensearch.internal + */ public class AnalyzeAction extends ActionType { public static final AnalyzeAction INSTANCE = new AnalyzeAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java index 167feb7935be2..a7f21b2af16fc 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java @@ -36,6 +36,11 @@ import java.util.Map; +/** + * Transport request builder for analyzing text + * + * @opensearch.internal + */ public class AnalyzeRequestBuilder extends SingleShardOperationRequestBuilder< AnalyzeAction.Request, AnalyzeAction.Response, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/analyze/TransportAnalyzeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/analyze/TransportAnalyzeAction.java index cf578af8dbacb..6ce06e086ee6d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/analyze/TransportAnalyzeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/analyze/TransportAnalyzeAction.java @@ -82,6 +82,8 @@ /** * Transport action used to execute analyze requests + * + * @opensearch.internal */ public class TransportAnalyzeAction extends TransportSingleShardAction { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java index b21b52f063691..f605f042a05ac 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action for clearing cache + * + * @opensearch.internal + */ public class ClearIndicesCacheAction extends ActionType { public static final ClearIndicesCacheAction INSTANCE = new ClearIndicesCacheAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java index a9eafc7defaba..35913c2579aa9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java @@ -39,6 +39,11 @@ import java.io.IOException; +/** + * Transport request for clearing cache + * + * @opensearch.internal + */ public class ClearIndicesCacheRequest extends BroadcastRequest { private boolean queryCache = false; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java index c7365a0e22e83..2a48e7d7e0eeb 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java @@ -35,6 +35,11 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +/** + * Transport request builder for clearing cache + * + * @opensearch.internal + */ public class ClearIndicesCacheRequestBuilder extends BroadcastOperationRequestBuilder< ClearIndicesCacheRequest, ClearIndicesCacheResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java index 58d8767697528..707a3cedf72d6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java @@ -44,6 +44,8 @@ /** * The response of a clear cache action. + * + * @opensearch.internal */ public class ClearIndicesCacheResponse extends BroadcastResponse { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java index baa51e023e75a..12f1c78cea0c7 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java @@ -53,6 +53,8 @@ /** * Indices clear cache action. + * + * @opensearch.internal */ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAction< ClearIndicesCacheRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexAction.java index ae8347a5ce812..2dea3e415ae0d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action for closing an index + * + * @opensearch.internal + */ public class CloseIndexAction extends ActionType { public static final CloseIndexAction INSTANCE = new CloseIndexAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexClusterStateUpdateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexClusterStateUpdateRequest.java index b94d080a331be..4b446f9d1ad2a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexClusterStateUpdateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexClusterStateUpdateRequest.java @@ -36,6 +36,8 @@ /** * Cluster state update request that allows to close one or more indices + * + * @opensearch.internal */ public class CloseIndexClusterStateUpdateRequest extends IndicesClusterStateUpdateRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java index 3fa1c8f066135..b16cabfda4d67 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java @@ -48,6 +48,8 @@ /** * A request to close an index. + * + * @opensearch.internal */ public class CloseIndexRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java index 66dc07e35fbaa..b3b53a0043c70 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java @@ -39,6 +39,8 @@ /** * Builder for close index request + * + * @opensearch.internal */ public class CloseIndexRequestBuilder extends AcknowledgedRequestBuilder { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java index 998ffc4af7028..92e77194cf15f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java @@ -52,6 +52,11 @@ import static java.util.Collections.emptyList; import static java.util.Collections.unmodifiableList; +/** + * Transport response for closing an index + * + * @opensearch.internal + */ public class CloseIndexResponse extends ShardsAcknowledgedResponse { private final List indices; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/TransportCloseIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/close/TransportCloseIndexAction.java index 13525a2a3b23e..0084977d0fdf0 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -61,6 +61,8 @@ /** * Close index action + * + * @opensearch.internal */ public class TransportCloseIndexAction extends TransportMasterNodeAction { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java b/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java index 8baf1f5d851ad..ef1c5640079ff 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java @@ -60,6 +60,11 @@ import java.io.IOException; import java.util.Objects; +/** + * Transport action for verifying a shard before closing an index + * + * @opensearch.internal + */ public class TransportVerifyShardBeforeCloseAction extends TransportReplicationAction< TransportVerifyShardBeforeCloseAction.ShardRequest, TransportVerifyShardBeforeCloseAction.ShardRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/AutoCreateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/create/AutoCreateAction.java index 23cb728540ab3..191b568ada169 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/AutoCreateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/AutoCreateAction.java @@ -62,6 +62,8 @@ /** * Api that auto creates an index or data stream that originate from requests that write into an index that doesn't yet exist. + * + * @opensearch.internal */ public final class AutoCreateAction extends ActionType { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexAction.java index 87bbc093fce43..220620790fa4d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action for creating an index + * + * @opensearch.internal + */ public class CreateIndexAction extends ActionType { public static final CreateIndexAction INSTANCE = new CreateIndexAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java index 5ca6fb4226b64..aba5a60ef2804 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java @@ -46,6 +46,8 @@ /** * Cluster state update request that allows to create an index + * + * @opensearch.internal */ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java index 26ff4f1da3ba4..95837d82be7ac 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java @@ -80,6 +80,8 @@ * @see org.opensearch.client.IndicesAdminClient#create(CreateIndexRequest) * @see org.opensearch.client.Requests#createIndexRequest(String) * @see CreateIndexResponse + * + * @opensearch.internal */ public class CreateIndexRequest extends AcknowledgedRequest implements IndicesRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java index cc99f63c6a844..4c5780b87b3f2 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java @@ -46,6 +46,8 @@ /** * Builder for a create index request + * + * @opensearch.internal */ public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder< CreateIndexRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java index dedc022180cda..871576d8e336a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java @@ -48,6 +48,8 @@ /** * A response for a create index action. + * + * @opensearch.internal */ public class CreateIndexResponse extends ShardsAcknowledgedResponse { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/TransportCreateIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/create/TransportCreateIndexAction.java index 2269931deafc8..859a9d6b21bd3 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -50,6 +50,8 @@ /** * Create index action. + * + * @opensearch.internal */ public class TransportCreateIndexAction extends TransportMasterNodeAction { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/DanglingIndexInfo.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/DanglingIndexInfo.java index decca8ab4151c..cd0feae4428db 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/DanglingIndexInfo.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/DanglingIndexInfo.java @@ -41,6 +41,8 @@ /** * Contains information about a dangling index, i.e. an index that OpenSearch has found * on-disk but is not present in the cluster state. + * + * @opensearch.internal */ public class DanglingIndexInfo implements Writeable { private final String nodeId; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexAction.java index f151a3ea46465..6559ef4cd89bd 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexAction.java @@ -37,6 +37,8 @@ /** * This action causes a dangling index to be considered as deleted by the cluster. + * + * @opensearch.internal */ public class DeleteDanglingIndexAction extends ActionType { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java index 733fb0a24ebec..4fad5498de375 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java @@ -43,6 +43,8 @@ /** * Represents a request to delete a particular dangling index, specified by its UUID. The {@link #acceptDataLoss} * flag must also be explicitly set to true, or later validation will fail. + * + * @opensearch.internal */ public class DeleteDanglingIndexRequest extends AcknowledgedRequest { private final String indexUUID; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java index baa173a0cdf88..495e8cb1fcac8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java @@ -70,6 +70,8 @@ * Implements the deletion of a dangling index. When handling a {@link DeleteDanglingIndexAction}, * this class first checks that such a dangling index exists. It then submits a cluster state update * to add the index to the index graveyard. + * + * @opensearch.internal */ public class TransportDeleteDanglingIndexAction extends TransportMasterNodeAction { private static final Logger logger = LogManager.getLogger(TransportDeleteDanglingIndexAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/FindDanglingIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/FindDanglingIndexAction.java index fa55661788d3e..f308728c7a85e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/FindDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/FindDanglingIndexAction.java @@ -36,6 +36,8 @@ /** * Represents a request to find a particular dangling index by UUID. + * + * @opensearch.internal */ public class FindDanglingIndexAction extends ActionType { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/FindDanglingIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/FindDanglingIndexRequest.java index c5a3621bd8340..5c1e3c0a23eae 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/FindDanglingIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/FindDanglingIndexRequest.java @@ -39,6 +39,11 @@ import java.io.IOException; +/** + * Transport request for finding a dangling index + * + * @opensearch.internal + */ public class FindDanglingIndexRequest extends BaseNodesRequest { private final String indexUUID; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/FindDanglingIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/FindDanglingIndexResponse.java index c1b1a2901e5e2..8c99ad8c55f10 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/FindDanglingIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/FindDanglingIndexResponse.java @@ -44,6 +44,8 @@ /** * Models a response to a {@link FindDanglingIndexRequest}. A find request queries every node in the * cluster looking for a dangling index with a specific UUID. + * + * @opensearch.internal */ public class FindDanglingIndexResponse extends BaseNodesResponse { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java index ddb16ec51a080..6026dd10c607b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java @@ -40,6 +40,8 @@ /** * Used when querying every node in the cluster for a specific dangling index. + * + * @opensearch.internal */ public class NodeFindDanglingIndexRequest extends BaseNodeRequest { private final String indexUUID; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexResponse.java index 0f21db3eb7a91..cfe71a7acf9a3 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexResponse.java @@ -43,6 +43,8 @@ /** * Used when querying every node in the cluster for a specific dangling index. + * + * @opensearch.internal */ public class NodeFindDanglingIndexResponse extends BaseNodeResponse { /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/TransportFindDanglingIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/TransportFindDanglingIndexAction.java index 0da65f79db2af..3119625c6b796 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/TransportFindDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/TransportFindDanglingIndexAction.java @@ -50,6 +50,8 @@ /** * Finds a specified dangling index by its UUID, searching across all nodes. + * + * @opensearch.internal */ public class TransportFindDanglingIndexAction extends TransportNodesAction< FindDanglingIndexRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexAction.java index 1f6bbdecc7c12..5f7a096b1d749 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexAction.java @@ -37,6 +37,8 @@ /** * Represents a request to import a particular dangling index. + * + * @opensearch.internal */ public class ImportDanglingIndexAction extends ActionType { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java index b154d048a10d6..73fbad248b8b1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java @@ -45,6 +45,8 @@ * Represents a request to import a particular dangling index, specified * by its UUID. The {@link #acceptDataLoss} flag must also be * explicitly set to true, or later validation will fail. + * + * @opensearch.internal */ public class ImportDanglingIndexRequest extends AcknowledgedRequest { private final String indexUUID; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java index c648f9eea837a..2010515249371 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java @@ -62,6 +62,8 @@ * Implements the import of a dangling index. When handling a {@link ImportDanglingIndexAction}, * this class first checks that such a dangling index exists. It then calls {@link LocalAllocateDangledIndices} * to perform the actual allocation. + * + * @opensearch.internal */ public class TransportImportDanglingIndexAction extends HandledTransportAction { private static final Logger logger = LogManager.getLogger(TransportImportDanglingIndexAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesAction.java index 0e797ca930400..d8ace959c61d3 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesAction.java @@ -36,6 +36,8 @@ /** * Represents a request to list all dangling indices known to the cluster. + * + * @opensearch.internal */ public class ListDanglingIndicesAction extends ActionType { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesRequest.java index 6c2bd386119a2..ba5fd8fc139ec 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesRequest.java @@ -39,6 +39,11 @@ import java.io.IOException; +/** + * Transport request for listing a dangling indices + * + * @opensearch.internal + */ public class ListDanglingIndicesRequest extends BaseNodesRequest { /** * Filter the response by index UUID. Leave as null to find all indices. diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java index d412d3e323d68..361dfa99e893f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java @@ -57,6 +57,8 @@ * cluster and aggregates their responses. When the aggregated response is converted to {@link XContent}, * information for each dangling index is presented under the "dangling_indices" key. If any nodes * in the cluster failed to answer, the details are presented under the "_nodes.failures" key. + * + * @opensearch.internal */ public class ListDanglingIndicesResponse extends BaseNodesResponse implements StatusToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesRequest.java index ad0ed102145b5..9b737fff8316e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesRequest.java @@ -40,6 +40,8 @@ /** * Used when querying every node in the cluster for dangling indices, in response to a list request. + * + * @opensearch.internal */ public class NodeListDanglingIndicesRequest extends BaseNodeRequest { /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesResponse.java index 3495a028af3c0..5f0fa78ff231a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesResponse.java @@ -43,6 +43,8 @@ /** * Used when querying every node in the cluster for dangling indices, in response to a list request. + * + * @opensearch.internal */ public class NodeListDanglingIndicesResponse extends BaseNodeResponse { private final List indexMetaData; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/TransportListDanglingIndicesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/TransportListDanglingIndicesAction.java index 1274010bfb8a2..6641ff172c2f0 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/TransportListDanglingIndicesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/TransportListDanglingIndicesAction.java @@ -52,6 +52,8 @@ /** * Implements the listing of all dangling indices. All nodes in the cluster are queried, and * their answers aggregated. Finding dangling indices is performed in {@link DanglingIndicesState}. + * + * @opensearch.internal */ public class TransportListDanglingIndicesAction extends TransportNodesAction< ListDanglingIndicesRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java b/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java index 6c5c417825abf..8fd096410560d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java @@ -58,6 +58,11 @@ import java.io.IOException; import java.util.Objects; +/** + * Transport action for creating a datastream + * + * @opensearch.internal + */ public class CreateDataStreamAction extends ActionType { public static final CreateDataStreamAction INSTANCE = new CreateDataStreamAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/datastream/DataStreamsStatsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/datastream/DataStreamsStatsAction.java index cca6b83015ecc..477acd6a2255f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/datastream/DataStreamsStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/datastream/DataStreamsStatsAction.java @@ -78,6 +78,11 @@ import java.util.SortedMap; import java.util.stream.Stream; +/** + * Transport action for retrieving datastream stats + * + * @opensearch.internal + */ public class DataStreamsStatsAction extends ActionType { public static final DataStreamsStatsAction INSTANCE = new DataStreamsStatsAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java b/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java index 5d79f51cbab65..30a51e393609e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java @@ -73,6 +73,11 @@ import static org.opensearch.action.ValidateActions.addValidationError; +/** + * Transport action for deleting a datastream + * + * @opensearch.internal + */ public class DeleteDataStreamAction extends ActionType { private static final Logger logger = LogManager.getLogger(DeleteDataStreamAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java b/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java index d519c80f6d6c2..2747c68a2849a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java @@ -73,6 +73,11 @@ import java.util.Objects; import java.util.stream.Collectors; +/** + * Transport action for getting a datastream + * + * @opensearch.internal + */ public class GetDataStreamAction extends ActionType { public static final GetDataStreamAction INSTANCE = new GetDataStreamAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexAction.java index 6f0dd781b4cec..696c1244c7504 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexAction.java @@ -35,6 +35,11 @@ import org.opensearch.action.ActionType; import org.opensearch.action.support.master.AcknowledgedResponse; +/** + * Transport action for deleting an index + * + * @opensearch.internal + */ public class DeleteIndexAction extends ActionType { public static final DeleteIndexAction INSTANCE = new DeleteIndexAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexClusterStateUpdateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexClusterStateUpdateRequest.java index a6d06833ebae9..5088d021ca9b8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexClusterStateUpdateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexClusterStateUpdateRequest.java @@ -35,6 +35,8 @@ /** * Cluster state update request that allows to close one or more indices + * + * @opensearch.internal */ public class DeleteIndexClusterStateUpdateRequest extends IndicesClusterStateUpdateRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java index c29072b135b85..7475121a910c4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java @@ -46,6 +46,8 @@ /** * A request to delete an index. Best created with {@link org.opensearch.client.Requests#deleteIndexRequest(String)}. + * + * @opensearch.internal */ public class DeleteIndexRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java index 741f46d44d8b7..33f6342e94139 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java @@ -37,6 +37,11 @@ import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +/** + * Transport request builder for deleting an index + * + * @opensearch.internal + */ public class DeleteIndexRequestBuilder extends AcknowledgedRequestBuilder< DeleteIndexRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/delete/TransportDeleteIndexAction.java index ec8c0fe8ed011..70cb6d8115f15 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/delete/TransportDeleteIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/delete/TransportDeleteIndexAction.java @@ -60,6 +60,8 @@ /** * Delete index action. + * + * @opensearch.internal */ public class TransportDeleteIndexAction extends TransportMasterNodeAction { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsAction.java index 1eae6b99bc1ee..6bd0cddc00d07 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action for checking if an index exists + * + * @opensearch.internal + */ public class IndicesExistsAction extends ActionType { public static final IndicesExistsAction INSTANCE = new IndicesExistsAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequest.java index d511c18f5505e..89c237a990dc8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequest.java @@ -44,6 +44,11 @@ import static org.opensearch.action.ValidateActions.addValidationError; +/** + * Transport request for checking if an index exists + * + * @opensearch.internal + */ public class IndicesExistsRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { private String[] indices = Strings.EMPTY_ARRAY; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java index bb38d14530588..9b83d2b29302c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java @@ -35,6 +35,11 @@ import org.opensearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +/** + * Transport request builder for checking if an index exists + * + * @opensearch.internal + */ public class IndicesExistsRequestBuilder extends MasterNodeReadOperationRequestBuilder< IndicesExistsRequest, IndicesExistsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsResponse.java index 336bb6147d07d..5bf51fe2ba653 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsResponse.java @@ -38,6 +38,11 @@ import java.io.IOException; +/** + * Transport response for checking if an index exists + * + * @opensearch.internal + */ public class IndicesExistsResponse extends ActionResponse { private boolean exists; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java index 076e2ecc2c119..49ab15dadb19a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java @@ -51,6 +51,8 @@ /** * Indices exists action. + * + * @opensearch.internal */ public class TransportIndicesExistsAction extends TransportMasterNodeReadAction { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushAction.java b/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushAction.java index ace63f2346e30..288a46977521e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action for flushing one or more indices + * + * @opensearch.internal + */ public class FlushAction extends ActionType { public static final FlushAction INSTANCE = new FlushAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequest.java index 1020a0cfb33a9..0ff502dee8396 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequest.java @@ -51,6 +51,8 @@ * @see org.opensearch.client.Requests#flushRequest(String...) * @see org.opensearch.client.IndicesAdminClient#flush(FlushRequest) * @see FlushResponse + * + * @opensearch.internal */ public class FlushRequest extends BroadcastRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequestBuilder.java index c72a55b1f8519..d0cbd1d27fba6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequestBuilder.java @@ -35,6 +35,11 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +/** + * Transport request builder for flushing one or more indices + * + * @opensearch.internal + */ public class FlushRequestBuilder extends BroadcastOperationRequestBuilder { public FlushRequestBuilder(OpenSearchClient client, FlushAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushResponse.java index 9ab04dfe8c081..0345f42ab52f1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushResponse.java @@ -44,6 +44,8 @@ /** * A response to flush action. + * + * @opensearch.internal */ public class FlushResponse extends BroadcastResponse { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/flush/ShardFlushRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/flush/ShardFlushRequest.java index d49bf04af623b..f56a5cc6dd5e2 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/flush/ShardFlushRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/flush/ShardFlushRequest.java @@ -40,6 +40,11 @@ import java.io.IOException; +/** + * Transport request for flushing one or more indices + * + * @opensearch.internal + */ public class ShardFlushRequest extends ReplicationRequest { private final FlushRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/flush/TransportFlushAction.java b/server/src/main/java/org/opensearch/action/admin/indices/flush/TransportFlushAction.java index 552f88400a87d..28faed924de6e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/flush/TransportFlushAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/flush/TransportFlushAction.java @@ -46,6 +46,8 @@ /** * Flush ActionType. + * + * @opensearch.internal */ public class TransportFlushAction extends TransportBroadcastReplicationAction< FlushRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/flush/TransportShardFlushAction.java b/server/src/main/java/org/opensearch/action/admin/indices/flush/TransportShardFlushAction.java index 53e774306e746..045bbe1d2a5ff 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -55,6 +55,11 @@ import java.io.IOException; +/** + * Transport action for flushing one or more indices + * + * @opensearch.internal + */ public class TransportShardFlushAction extends TransportReplicationAction { public static final String NAME = FlushAction.NAME + "[s]"; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeAction.java index 07b337e61323e..06b639fadc1c7 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action to request force merging the segments of one or more indices. + * + * @opensearch.internal + */ public class ForceMergeAction extends ActionType { public static final ForceMergeAction INSTANCE = new ForceMergeAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java index 605f39ffd1312..244f76ce8b798 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java @@ -54,6 +54,8 @@ * @see org.opensearch.client.Requests#forceMergeRequest(String...) * @see org.opensearch.client.IndicesAdminClient#forceMerge(ForceMergeRequest) * @see ForceMergeResponse + * + * @opensearch.internal */ public class ForceMergeRequest extends BroadcastRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java index 2d0e7d4401a01..cff05f194cac4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java @@ -41,6 +41,8 @@ * {@link #setMaxNumSegments(int)} allows to control the number of segments to force * merge down to. By default, will cause the force merge process to merge down * to half the configured number of segments. + * + * @opensearch.internal */ public class ForceMergeRequestBuilder extends BroadcastOperationRequestBuilder< ForceMergeRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeResponse.java index c898dad8bcdc9..4f8cd7efb59f6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeResponse.java @@ -44,6 +44,8 @@ /** * A response for force merge action. + * + * @opensearch.internal */ public class ForceMergeResponse extends BroadcastResponse { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/TransportForceMergeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/TransportForceMergeAction.java index 22f4b912cbe0b..dc48e3c93176a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/TransportForceMergeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/TransportForceMergeAction.java @@ -54,6 +54,8 @@ /** * ForceMerge index/indices action. + * + * @opensearch.internal */ public class TransportForceMergeAction extends TransportBroadcastByNodeAction< ForceMergeRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexAction.java index 6d5875a012b91..92200c71ef685 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action to get information about an index. + * + * @opensearch.internal + */ public class GetIndexAction extends ActionType { public static final GetIndexAction INSTANCE = new GetIndexAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java index e1e0b2c54c904..909092078b6ae 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java @@ -42,6 +42,8 @@ /** * A request to retrieve information about an index. + * + * @opensearch.internal */ public class GetIndexRequest extends ClusterInfoRequest { public enum Feature { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java index 6f93614fe3487..3019191e5570e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java @@ -35,6 +35,11 @@ import org.opensearch.action.support.master.info.ClusterInfoRequestBuilder; import org.opensearch.client.OpenSearchClient; +/** + * Transport request builder to get information about an index. + * + * @opensearch.internal + */ public class GetIndexRequestBuilder extends ClusterInfoRequestBuilder { public GetIndexRequestBuilder(OpenSearchClient client, GetIndexAction action, String... indices) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java index 4465dc88fe87d..e93fbe86e4ece 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java @@ -56,6 +56,8 @@ /** * A response for a get index action. + * + * @opensearch.internal */ public class GetIndexResponse extends ActionResponse implements ToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/TransportGetIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/get/TransportGetIndexAction.java index 0cd3214307359..493d3354a1b70 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/TransportGetIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/TransportGetIndexAction.java @@ -58,6 +58,8 @@ /** * Get index action. + * + * @opensearch.internal */ public class TransportGetIndexAction extends TransportClusterInfoAction { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java index 320cfa622f11a..2ddd763be20fd 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action to get field mappings. + * + * @opensearch.internal + */ public class GetFieldMappingsAction extends ActionType { public static final GetFieldMappingsAction INSTANCE = new GetFieldMappingsAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsIndexRequest.java index 961662ecdcf7e..176eca5e35cec 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsIndexRequest.java @@ -43,6 +43,11 @@ import java.io.IOException; +/** + * Transport action to get field mappings. + * + * @opensearch.internal + */ public class GetFieldMappingsIndexRequest extends SingleShardRequest { private final boolean includeDefaults; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java index e6a2ad3187250..db00f3d5244ac 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java @@ -49,6 +49,8 @@ * * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should go to that client class as well. + * + * @opensearch.internal */ public class GetFieldMappingsRequest extends ActionRequest implements IndicesRequest.Replaceable { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java index 4a8c624e7e06e..ebc0c015c5140 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java @@ -37,7 +37,11 @@ import org.opensearch.client.OpenSearchClient; import org.opensearch.common.util.ArrayUtils; -/** A helper class to build {@link GetFieldMappingsRequest} objects */ +/** + * A helper class to build {@link GetFieldMappingsRequest} objects + * + * @opensearch.internal + **/ public class GetFieldMappingsRequestBuilder extends ActionRequestBuilder { public GetFieldMappingsRequestBuilder(OpenSearchClient client, GetFieldMappingsAction action, String... indices) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java index 12024ef455a32..c155e5d63f97b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java @@ -64,6 +64,8 @@ * * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should go to that client class as well. + * + * @opensearch.internal */ public class GetFieldMappingsResponse extends ActionResponse implements ToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsAction.java index 6d9ed5ba0411a..a78625a1595ad 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action to get field mappings. + * + * @opensearch.internal + */ public class GetMappingsAction extends ActionType { public static final GetMappingsAction INSTANCE = new GetMappingsAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java index ae78f5f3a0b30..2c9bec8398b66 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java @@ -38,6 +38,11 @@ import java.io.IOException; +/** + * Transport request to get field mappings. + * + * @opensearch.internal + */ public class GetMappingsRequest extends ClusterInfoRequest { public GetMappingsRequest() {} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java index 8401272353eaf..85bf8c2ffd9c6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java @@ -35,6 +35,11 @@ import org.opensearch.action.support.master.info.ClusterInfoRequestBuilder; import org.opensearch.client.OpenSearchClient; +/** + * Transport request builder to get field mappings. + * + * @opensearch.internal + */ public class GetMappingsRequestBuilder extends ClusterInfoRequestBuilder< GetMappingsRequest, GetMappingsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponse.java index 3be8e75be7290..643ed719b5e2a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponse.java @@ -47,6 +47,11 @@ import java.io.IOException; +/** + * Transport response to get field mappings. + * + * @opensearch.internal + */ public class GetMappingsResponse extends ActionResponse implements ToXContentFragment { private static final ParseField MAPPINGS = new ParseField("mappings"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java index bdb5222a6dcba..93f76f42b2f05 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java @@ -50,6 +50,11 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; +/** + * Transport action to get field mappings. + * + * @opensearch.internal + */ public class TransportGetFieldMappingsAction extends HandledTransportAction { private final ClusterService clusterService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java index ca07475f0deab..64f76db5e1549 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java @@ -69,6 +69,8 @@ /** * Transport action used to retrieve the mappings related to fields that belong to a specific index + * + * @opensearch.internal */ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAction< GetFieldMappingsIndexRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java index 3f6cb8ed35af9..1edbba547a917 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java @@ -50,6 +50,11 @@ import java.io.IOException; +/** + * Transport action to get field mappings. + * + * @opensearch.internal + */ public class TransportGetMappingsAction extends TransportClusterInfoAction { private static final Logger logger = LogManager.getLogger(TransportGetMappingsAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/AutoPutMappingAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/AutoPutMappingAction.java index 72cfcdaffca31..f2430eb54db9b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/AutoPutMappingAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/AutoPutMappingAction.java @@ -35,6 +35,11 @@ import org.opensearch.action.ActionType; import org.opensearch.action.support.master.AcknowledgedResponse; +/** + * Transport action to automatically put field mappings. + * + * @opensearch.internal + */ public class AutoPutMappingAction extends ActionType { public static final AutoPutMappingAction INSTANCE = new AutoPutMappingAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingAction.java index 4f90e38ac4416..8bca1b59ee2e2 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingAction.java @@ -35,6 +35,11 @@ import org.opensearch.action.ActionType; import org.opensearch.action.support.master.AcknowledgedResponse; +/** + * Transport action to put field mappings. + * + * @opensearch.internal + */ public class PutMappingAction extends ActionType { public static final PutMappingAction INSTANCE = new PutMappingAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java index 27081048fcdae..8d51182d838cc 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java @@ -36,6 +36,8 @@ /** * Cluster state update request that allows to put a mapping + * + * @opensearch.internal */ public class PutMappingClusterStateUpdateRequest extends IndicesClusterStateUpdateRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java index be3e676a4a1a2..85fd74f0762a5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -74,6 +74,8 @@ * @see org.opensearch.client.Requests#putMappingRequest(String...) * @see org.opensearch.client.IndicesAdminClient#putMapping(PutMappingRequest) * @see AcknowledgedResponse + * + * @opensearch.internal */ public class PutMappingRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable, ToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java index 3ef96254b3f9b..78115e1fab4ec 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java @@ -44,6 +44,8 @@ /** * Builder for a put mapping request + * + * @opensearch.internal */ public class PutMappingRequestBuilder extends AcknowledgedRequestBuilder< PutMappingRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java index a172fce831c8f..5252fd24fd2fa 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java @@ -50,6 +50,11 @@ import java.io.IOException; +/** + * Transport action to automatically put field mappings. + * + * @opensearch.internal + */ public class TransportAutoPutMappingAction extends TransportMasterNodeAction { private final MetadataMappingService metadataMappingService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index f1093a15a3d26..ec5a92ada4454 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -63,6 +63,8 @@ /** * Put mapping action. + * + * @opensearch.internal */ public class TransportPutMappingAction extends TransportMasterNodeAction { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexAction.java index 1b80e5cb8a347..5c4302d94e4a6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action to open an index. + * + * @opensearch.internal + */ public class OpenIndexAction extends ActionType { public static final OpenIndexAction INSTANCE = new OpenIndexAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexClusterStateUpdateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexClusterStateUpdateRequest.java index bf299d103ba78..a36c2744f7669 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexClusterStateUpdateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexClusterStateUpdateRequest.java @@ -36,6 +36,8 @@ /** * Cluster state update request that allows to open one or more indices + * + * @opensearch.internal */ public class OpenIndexClusterStateUpdateRequest extends IndicesClusterStateUpdateRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java index be0e0254edff6..c6c1c2dc8f0cb 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java @@ -47,6 +47,8 @@ /** * A request to open an index. + * + * @opensearch.internal */ public class OpenIndexRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java index 5aadd81cc8838..bf09c3f173491 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java @@ -39,6 +39,8 @@ /** * Builder for for open index request + * + * @opensearch.internal */ public class OpenIndexRequestBuilder extends AcknowledgedRequestBuilder { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java index 4b811b215d717..f7bd4cf31aa17 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java @@ -42,6 +42,8 @@ /** * A response for a open index action. + * + * @opensearch.internal */ public class OpenIndexResponse extends ShardsAcknowledgedResponse { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/TransportOpenIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/open/TransportOpenIndexAction.java index 05b3bfd7a885e..6cd3c0682e851 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/open/TransportOpenIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/TransportOpenIndexAction.java @@ -57,6 +57,8 @@ /** * Open index action + * + * @opensearch.internal */ public class TransportOpenIndexAction extends TransportMasterNodeAction { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockAction.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockAction.java index 482f543ae6b04..3bca633554908 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action to add an index block. + * + * @opensearch.internal + */ public class AddIndexBlockAction extends ActionType { public static final AddIndexBlockAction INSTANCE = new AddIndexBlockAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockClusterStateUpdateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockClusterStateUpdateRequest.java index c934cad44b097..4b44624ece303 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockClusterStateUpdateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockClusterStateUpdateRequest.java @@ -36,6 +36,8 @@ /** * Cluster state update request that allows to add a block to one or more indices + * + * @opensearch.internal */ public class AddIndexBlockClusterStateUpdateRequest extends IndicesClusterStateUpdateRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java index c2f24d3e927b5..7d208b5e0ac77 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java @@ -48,6 +48,8 @@ /** * A request to add a block to an index. + * + * @opensearch.internal */ public class AddIndexBlockRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java index 074e7fc5f6664..8322ba19f433e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java @@ -39,6 +39,8 @@ /** * Builder for add index block request + * + * @opensearch.internal */ public class AddIndexBlockRequestBuilder extends AcknowledgedRequestBuilder< AddIndexBlockRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java index 036a8ec635efc..4b0e3153258c3 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java @@ -50,6 +50,11 @@ import static java.util.Collections.unmodifiableList; +/** + * Transport response to open an index. + * + * @opensearch.internal + */ public class AddIndexBlockResponse extends ShardsAcknowledgedResponse { private final List indices; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/TransportAddIndexBlockAction.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/TransportAddIndexBlockAction.java index a58d199287ff7..80af0a2c2dcc9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/TransportAddIndexBlockAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/TransportAddIndexBlockAction.java @@ -62,6 +62,8 @@ * in-flight writes to an index have been completed prior to the response being returned. These actions * are done in multiple cluster state updates (at least two). See also {@link TransportVerifyShardIndexBlockAction} * for the eventual delegation for shard-level verification. + * + * @opensearch.internal */ public class TransportAddIndexBlockAction extends TransportMasterNodeAction { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockAction.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockAction.java index c96a94476c1fb..0751e89985189 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockAction.java @@ -64,6 +64,8 @@ * and are no longer executing any operations in violation of that block. This action * requests all operation permits of the shard in order to wait for all write operations * to complete. + * + * @opensearch.internal */ public class TransportVerifyShardIndexBlockAction extends TransportReplicationAction< TransportVerifyShardIndexBlockAction.ShardRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryAction.java b/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryAction.java index ce764f8890f86..ac00e7f404b37 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryAction.java @@ -36,6 +36,8 @@ /** * Recovery information action + * + * @opensearch.internal */ public class RecoveryAction extends ActionType { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequest.java index 2457ca3fc5b90..8b1fe1e88da22 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequest.java @@ -42,6 +42,8 @@ /** * Request for recovery information + * + * @opensearch.internal */ public class RecoveryRequest extends BroadcastRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequestBuilder.java index 12c58c22fe8c8..99a1fb430fb28 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequestBuilder.java @@ -37,6 +37,8 @@ /** * Recovery information request builder. + * + * @opensearch.internal */ public class RecoveryRequestBuilder extends BroadcastOperationRequestBuilder { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryResponse.java index 9c2b380392b03..e95110c732d79 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryResponse.java @@ -46,6 +46,8 @@ /** * Information regarding the recovery state of indices and their associated shards. + * + * @opensearch.internal */ public class RecoveryResponse extends BroadcastResponse { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/recovery/TransportRecoveryAction.java b/server/src/main/java/org/opensearch/action/admin/indices/recovery/TransportRecoveryAction.java index 7c3666e44f093..132354ed83b6c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/recovery/TransportRecoveryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/recovery/TransportRecoveryAction.java @@ -60,6 +60,8 @@ /** * Transport action for shard recovery operation. This transport action does not actually * perform shard recovery, it only reports on recoveries (both active and complete). + * + * @opensearch.internal */ public class TransportRecoveryAction extends TransportBroadcastByNodeAction { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshAction.java b/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshAction.java index 6b7006c3b70ce..c321700ae65bd 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Refresh information action + * + * @opensearch.internal + */ public class RefreshAction extends ActionType { public static final RefreshAction INSTANCE = new RefreshAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequest.java index c113527d1cc2f..4f078f8d9a23d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequest.java @@ -45,6 +45,8 @@ * @see org.opensearch.client.Requests#refreshRequest(String...) * @see org.opensearch.client.IndicesAdminClient#refresh(RefreshRequest) * @see RefreshResponse + * + * @opensearch.internal */ public class RefreshRequest extends BroadcastRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequestBuilder.java index 47a16f2fc968d..5b27ae13f24be 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequestBuilder.java @@ -39,6 +39,8 @@ * A refresh request making all operations performed since the last refresh available for search. The (near) real-time * capabilities depends on the index engine used. For example, the internal one requires refresh to be called, but by * default a refresh is scheduled periodically. + * + * @opensearch.internal */ public class RefreshRequestBuilder extends BroadcastOperationRequestBuilder { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshResponse.java index deae44b1c4676..19cd70410317e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshResponse.java @@ -44,6 +44,8 @@ /** * The response of a refresh action. + * + * @opensearch.internal */ public class RefreshResponse extends BroadcastResponse { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/refresh/TransportRefreshAction.java b/server/src/main/java/org/opensearch/action/admin/indices/refresh/TransportRefreshAction.java index 97e8344d32664..7fb4a9908c422 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/refresh/TransportRefreshAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/refresh/TransportRefreshAction.java @@ -48,6 +48,8 @@ /** * Refresh action. + * + * @opensearch.internal */ public class TransportRefreshAction extends TransportBroadcastReplicationAction< RefreshRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/server/src/main/java/org/opensearch/action/admin/indices/refresh/TransportShardRefreshAction.java index c58d8c520ad0b..9c955c8157459 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/refresh/TransportShardRefreshAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -49,6 +49,11 @@ import java.io.IOException; +/** + * Refresh information action + * + * @opensearch.internal + */ public class TransportShardRefreshAction extends TransportReplicationAction< BasicReplicationRequest, BasicReplicationRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/resolve/ResolveIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/resolve/ResolveIndexAction.java index 8a9df3f59bc4c..2eaae76f13f6b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/resolve/ResolveIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/resolve/ResolveIndexAction.java @@ -79,6 +79,11 @@ import java.util.TreeMap; import java.util.stream.StreamSupport; +/** + * Transport action to resolve an index. + * + * @opensearch.internal + */ public class ResolveIndexAction extends ActionType { public static final ResolveIndexAction INSTANCE = new ResolveIndexAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/Condition.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/Condition.java index 7b2707ed16ca2..0e7f0dd16ca03 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/Condition.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/Condition.java @@ -41,6 +41,8 @@ /** * Base class for rollover request conditions + * + * @opensearch.internal */ public abstract class Condition implements NamedWriteable, ToXContentFragment { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/MaxAgeCondition.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/MaxAgeCondition.java index 20c45d88c8d32..5c2fc43f94b3c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/MaxAgeCondition.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/MaxAgeCondition.java @@ -43,6 +43,8 @@ /** * Condition for index maximum age. Evaluates to true * when the index is at least {@link #value} old + * + * @opensearch.internal */ public class MaxAgeCondition extends Condition { public static final String NAME = "max_age"; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/MaxDocsCondition.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/MaxDocsCondition.java index 8491b381fd9d3..6d67c61cf9758 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/MaxDocsCondition.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/MaxDocsCondition.java @@ -42,6 +42,8 @@ /** * Condition for maximum index docs. Evaluates to true * when the index has at least {@link #value} docs + * + * @opensearch.internal */ public class MaxDocsCondition extends Condition { public static final String NAME = "max_docs"; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/MaxSizeCondition.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/MaxSizeCondition.java index 147d81a52961d..720a81fc167bb 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/MaxSizeCondition.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/MaxSizeCondition.java @@ -44,6 +44,8 @@ /** * A size-based condition for an index size. * Evaluates to true if the index size is at least {@link #value}. + * + * @opensearch.internal */ public class MaxSizeCondition extends Condition { public static final String NAME = "max_size"; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverService.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverService.java index 19a7b8c95199b..1206971e805af 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverService.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverService.java @@ -69,6 +69,8 @@ /** * Service responsible for handling rollover requests for write aliases and data streams + * + * @opensearch.internal */ public class MetadataRolloverService { private static final Pattern INDEX_NAME_PATTERN = Pattern.compile("^.*-\\d+$"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverAction.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverAction.java index f95cf1b83250a..e6c22cc48c1ad 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action to rollover an index. + * + * @opensearch.internal + */ public class RolloverAction extends ActionType { public static final RolloverAction INSTANCE = new RolloverAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverInfo.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverInfo.java index 3fd145a7c7655..801238f213b6b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverInfo.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverInfo.java @@ -50,6 +50,8 @@ /** * Class for holding Rollover related information within an index + * + * @opensearch.internal */ public class RolloverInfo extends AbstractDiffable implements Writeable, ToXContentFragment { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java index 402b3741205a2..db5dd0af6ab2a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java @@ -57,6 +57,8 @@ * * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should also go to that client class. + * + * @opensearch.internal */ public class RolloverRequest extends AcknowledgedRequest implements IndicesRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequestBuilder.java index c74f71a70e09d..bec084450b389 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequestBuilder.java @@ -39,6 +39,11 @@ import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +/** + * Transport request to rollover an index. + * + * @opensearch.internal + */ public class RolloverRequestBuilder extends MasterNodeOperationRequestBuilder { public RolloverRequestBuilder(OpenSearchClient client, RolloverAction action) { super(client, action, new RolloverRequest()); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java index 5600d05120abb..330d258f9461f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java @@ -54,6 +54,8 @@ * * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should also go to that client class. + * + * @opensearch.internal */ public final class RolloverResponse extends ShardsAcknowledgedResponse implements ToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/TransportRolloverAction.java index 38f25422a5956..1a2f4be522e2b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -69,6 +69,8 @@ /** * Main class to swap the index pointed to by an alias, given some conditions + * + * @opensearch.internal */ public class TransportRolloverAction extends TransportMasterNodeAction { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexSegments.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexSegments.java index 907f352a5ed89..88973ce094d8b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexSegments.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexSegments.java @@ -38,6 +38,11 @@ import java.util.List; import java.util.Map; +/** + * List of Index Segments + * + * @opensearch.internal + */ public class IndexSegments implements Iterable { private final String index; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexShardSegments.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexShardSegments.java index 470480c2ac064..e0633d3d557ae 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexShardSegments.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexShardSegments.java @@ -37,6 +37,11 @@ import java.util.Arrays; import java.util.Iterator; +/** + * List of Index Shard Segments + * + * @opensearch.internal + */ public class IndexShardSegments implements Iterable { private final ShardId shardId; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java index 82fe438236d0f..e3d9ffb7f3fdb 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java @@ -55,6 +55,11 @@ import java.util.Map; import java.util.Set; +/** + * Transport response for retrieving indices segment information + * + * @opensearch.internal + */ public class IndicesSegmentResponse extends BroadcastResponse { private final ShardSegments[] shards; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsAction.java index 467de4590e746..54a10ce259190 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action for retrieving indices segment information + * + * @opensearch.internal + */ public class IndicesSegmentsAction extends ActionType { public static final IndicesSegmentsAction INSTANCE = new IndicesSegmentsAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequest.java index 14f6999692731..dd8c02a97c0f7 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequest.java @@ -39,6 +39,11 @@ import java.io.IOException; +/** + * Transport request for retrieving indices segment information + * + * @opensearch.internal + */ public class IndicesSegmentsRequest extends BroadcastRequest { protected boolean verbose = false; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java index 4352344114f85..4b758e1f4bfb1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java @@ -35,6 +35,11 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +/** + * Transport request builder for retrieving indices segment information + * + * @opensearch.internal + */ public class IndicesSegmentsRequestBuilder extends BroadcastOperationRequestBuilder< IndicesSegmentsRequest, IndicesSegmentResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/ShardSegments.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/ShardSegments.java index a6e34d1d55823..7214d0203966c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/ShardSegments.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/ShardSegments.java @@ -42,6 +42,11 @@ import java.util.Iterator; import java.util.List; +/** + * Collection of shard segments + * + * @opensearch.internal + */ public class ShardSegments implements Writeable, Iterable { private final ShardRouting shardRouting; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java index 7ff7bb3591f1d..eb3d64188a6e7 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java @@ -53,6 +53,11 @@ import java.io.IOException; import java.util.List; +/** + * Transport response for retrieving indices segment information + * + * @opensearch.internal + */ public class TransportIndicesSegmentsAction extends TransportBroadcastByNodeAction< IndicesSegmentsRequest, IndicesSegmentResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsAction.java index cc64009682d43..e035b4d9ef622 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action for getting index segments + * + * @opensearch.internal + */ public class GetSettingsAction extends ActionType { public static final GetSettingsAction INSTANCE = new GetSettingsAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequest.java index 739dfcae6287c..bf68a66d24c5a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequest.java @@ -45,6 +45,11 @@ import java.util.Arrays; import java.util.Objects; +/** + * Transport request for getting index segments + * + * @opensearch.internal + */ public class GetSettingsRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { private String[] indices = Strings.EMPTY_ARRAY; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java index 034d5d64f1ca7..d978ffd90386a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java @@ -37,6 +37,11 @@ import org.opensearch.client.OpenSearchClient; import org.opensearch.common.util.ArrayUtils; +/** + * Transport request builder for getting index segments + * + * @opensearch.internal + */ public class GetSettingsRequestBuilder extends MasterNodeReadOperationRequestBuilder< GetSettingsRequest, GetSettingsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsResponse.java index 0694f93f585ee..d16c629dd082e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsResponse.java @@ -52,6 +52,11 @@ import java.util.Map; import java.util.Objects; +/** + * Transport response for getting index segments + * + * @opensearch.internal + */ public class GetSettingsResponse extends ActionResponse implements ToXContentObject { private final ImmutableOpenMap indexToSettings; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/TransportGetSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/TransportGetSettingsAction.java index 5e2c13b5037e2..4f55cf3c4b5ca 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/TransportGetSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/TransportGetSettingsAction.java @@ -55,6 +55,11 @@ import java.io.IOException; +/** + * Transport action for getting index settings + * + * @opensearch.internal + */ public class TransportGetSettingsAction extends TransportMasterNodeReadAction { private final SettingsFilter settingsFilter; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index 8b81bb8d8a821..52ce7dffbad80 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -55,6 +55,11 @@ import java.io.IOException; +/** + * Transport action for updating index settings + * + * @opensearch.internal + */ public class TransportUpdateSettingsAction extends TransportMasterNodeAction { private static final Logger logger = LogManager.getLogger(TransportUpdateSettingsAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsAction.java index 7c0182b0704de..2333a2aad6bc6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsAction.java @@ -35,6 +35,11 @@ import org.opensearch.action.ActionType; import org.opensearch.action.support.master.AcknowledgedResponse; +/** + * Action for updating index settings + * + * @opensearch.internal + */ public class UpdateSettingsAction extends ActionType { public static final UpdateSettingsAction INSTANCE = new UpdateSettingsAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java index ce36e01ac465e..4b0dd05575309 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java @@ -37,6 +37,8 @@ /** * Cluster state update request that allows to update settings for some indices + * + * @opensearch.internal */ public class UpdateSettingsClusterStateUpdateRequest extends IndicesClusterStateUpdateRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java index 70f3dc683d599..2b0452301a5f5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java @@ -58,6 +58,8 @@ /** * Request for an update index settings action + * + * @opensearch.internal */ public class UpdateSettingsRequest extends AcknowledgedRequest implements diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java index dd3b78ce901f4..7501f0c7798de 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java @@ -43,6 +43,8 @@ /** * Builder for an update index settings request + * + * @opensearch.internal */ public class UpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder< UpdateSettingsRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java index 8a5b8bea75119..6eec8ccb63d20 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java @@ -40,6 +40,8 @@ /** * Request builder for {@link IndicesShardStoresRequest} + * + * @opensearch.internal */ public class IndicesShardStoreRequestBuilder extends MasterNodeReadOperationRequestBuilder< IndicesShardStoresRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresAction.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresAction.java index ec67ae051a5b1..79a6f88b1dbc4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresAction.java @@ -40,6 +40,8 @@ * Exposes shard store information for requested indices. * Shard store information reports which nodes hold shard copies, how recent they are * and any exceptions on opening the shard index or from previous engine failures + * + * @opensearch.internal */ public class IndicesShardStoresAction extends ActionType { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresRequest.java index ff5b5c4cf0c2a..0b66e314731d1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresRequest.java @@ -45,6 +45,8 @@ /** * Request for {@link IndicesShardStoresAction} + * + * @opensearch.internal */ public class IndicesShardStoresRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java index d09bf99ca7258..175d0efd2b812 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -56,6 +56,8 @@ * * Consists of {@link StoreStatus}s for requested indices grouped by * indices and shard ids and a list of encountered node {@link Failure}s + * + * @opensearch.internal */ public class IndicesShardStoresResponse extends ActionResponse implements ToXContentFragment { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java index 32acd9e305130..afa57d36f0419 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java @@ -78,6 +78,8 @@ /** * Transport action that reads the cluster state for shards with the requested criteria (see {@link ClusterHealthStatus}) of specific * indices and fetches store information from all the nodes using {@link TransportNodesListGatewayStartedShards} + * + * @opensearch.internal */ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAction< IndicesShardStoresRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeAction.java index 5ecdd62206177..7ea6391f1c429 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action for resizing an index + * + * @opensearch.internal + */ public class ResizeAction extends ActionType { public static final ResizeAction INSTANCE = new ResizeAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java index b74c6c8f8d195..50784e60a3f19 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java @@ -55,6 +55,8 @@ /** * Request class to shrink an index into a single shard + * + * @opensearch.internal */ public class ResizeRequest extends AcknowledgedRequest implements IndicesRequest, ToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java index 766ed78e63497..418e83a5431ec 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java @@ -38,6 +38,11 @@ import org.opensearch.client.OpenSearchClient; import org.opensearch.common.settings.Settings; +/** + * Transport request builder for resizing an index + * + * @opensearch.internal + */ public class ResizeRequestBuilder extends AcknowledgedRequestBuilder { public ResizeRequestBuilder(OpenSearchClient client, ActionType action) { super(client, action, new ResizeRequest()); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeResponse.java index 3846591d26f3a..88c38856d5025 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeResponse.java @@ -41,6 +41,8 @@ /** * A response for a resize index action, either shrink or split index. + * + * @opensearch.internal */ public final class ResizeResponse extends CreateIndexResponse { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeType.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeType.java index 62b20ed9d3dcf..6403ed735ae49 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeType.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeType.java @@ -34,6 +34,8 @@ /** * The type of the resize operation + * + * @opensearch.internal */ public enum ResizeType { SHRINK, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java index b35febe60af31..24c5466c2ba0b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java @@ -66,6 +66,8 @@ /** * Main class to initiate resizing (shrink / split) an index into a new index + * + * @opensearch.internal */ public class TransportResizeAction extends TransportMasterNodeAction { private final MetadataCreateIndexService createIndexService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStats.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStats.java index 2949af00a30d0..472fb45f452dd 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStats.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStats.java @@ -65,6 +65,11 @@ import java.util.Objects; import java.util.stream.Stream; +/** + * Common Stats for OpenSearch + * + * @opensearch.internal + */ public class CommonStats implements Writeable, ToXContentFragment { @Nullable diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java index e17b497ce312a..9eec34d87c384 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java @@ -43,6 +43,11 @@ import java.util.Collections; import java.util.EnumSet; +/** + * Common Stats Flags for OpenSearch + * + * @opensearch.internal + */ public class CommonStatsFlags implements Writeable, Cloneable { public static final CommonStatsFlags ALL = new CommonStatsFlags().all(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexShardStats.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexShardStats.java index b8ecd6d4bfc3d..6a423cf8d15ea 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexShardStats.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexShardStats.java @@ -41,6 +41,11 @@ import java.util.Arrays; import java.util.Iterator; +/** + * IndexShardStats for OpenSearch + * + * @opensearch.internal + */ public class IndexShardStats implements Iterable, Writeable { private final ShardId shardId; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexStats.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexStats.java index f0eec4ebbd27a..c57c5f80d1b30 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexStats.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexStats.java @@ -38,6 +38,11 @@ import java.util.List; import java.util.Map; +/** + * Index Stats for OpenSearch + * + * @opensearch.internal + */ public class IndexStats implements Iterable { private final String index; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsAction.java index c0a56c3f00536..bee33671ee291 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Transport action for retrieving index stats + * + * @opensearch.internal + */ public class IndicesStatsAction extends ActionType { public static final IndicesStatsAction INSTANCE = new IndicesStatsAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequest.java index bbe69b700b876..06315377797d9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequest.java @@ -45,6 +45,8 @@ *

      * All the stats to be returned can be cleared using {@link #clear()}, at which point, specific * stats can be enabled. + * + * @opensearch.internal */ public class IndicesStatsRequest extends BroadcastRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java index 23c33401966b4..c211812b32c48 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java @@ -44,6 +44,8 @@ *

      * All the stats to be returned can be cleared using {@link #clear()}, at which point, specific * stats can be enabled. + * + * @opensearch.internal */ public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder< IndicesStatsRequest, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponse.java index 3614d8de6c884..8edfef7891a98 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponse.java @@ -50,6 +50,11 @@ import static java.util.Collections.unmodifiableMap; +/** + * Transport response for retrieving indices stats + * + * @opensearch.internal + */ public class IndicesStatsResponse extends BroadcastResponse { private ShardStats[] shards; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/ShardStats.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/ShardStats.java index c5d3fba2a5805..b15e192d20fe7 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/ShardStats.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/ShardStats.java @@ -46,6 +46,11 @@ import java.io.IOException; +/** + * Shard Stats for OpenSearch + * + * @opensearch.internal + */ public class ShardStats implements Writeable, ToXContentFragment { private ShardRouting shardRouting; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/TransportIndicesStatsAction.java index 11ca1462787e8..39e0338aac5f6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/TransportIndicesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/TransportIndicesStatsAction.java @@ -58,6 +58,11 @@ import java.io.IOException; import java.util.List; +/** + * Transport action for retrieving indices stats + * + * @opensearch.internal + */ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction { private final IndicesService indicesService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComponentTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComponentTemplateAction.java index 1a4f0a1892ec7..388836e254f27 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComponentTemplateAction.java @@ -43,6 +43,11 @@ import static org.opensearch.action.ValidateActions.addValidationError; +/** + * Transport action for deleting an index template component + * + * @opensearch.internal + */ public class DeleteComponentTemplateAction extends ActionType { public static final DeleteComponentTemplateAction INSTANCE = new DeleteComponentTemplateAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComposableIndexTemplateAction.java index bc8b96f6259e8..ec37cc09007c6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComposableIndexTemplateAction.java @@ -44,6 +44,11 @@ import static org.opensearch.action.ValidateActions.addValidationError; +/** + * Transport action for deleting a composable index template + * + * @opensearch.internal + */ public class DeleteComposableIndexTemplateAction extends ActionType { public static final DeleteComposableIndexTemplateAction INSTANCE = new DeleteComposableIndexTemplateAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java index a91ec2850a107..789d03f8e8d8c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java @@ -35,6 +35,11 @@ import org.opensearch.action.ActionType; import org.opensearch.action.support.master.AcknowledgedResponse; +/** + * Transport action for deleting an index template + * + * @opensearch.internal + */ public class DeleteIndexTemplateAction extends ActionType { public static final DeleteIndexTemplateAction INSTANCE = new DeleteIndexTemplateAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java index a3762bb62fb94..e3a92107670ff 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java @@ -42,6 +42,8 @@ /** * A request to delete an index template. + * + * @opensearch.internal */ public class DeleteIndexTemplateRequest extends MasterNodeRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java index b58cdd06da5e9..2fc958ba93c6b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java @@ -35,6 +35,11 @@ import org.opensearch.action.support.master.MasterNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +/** + * Transport request builder for deleting an index template + * + * @opensearch.internal + */ public class DeleteIndexTemplateRequestBuilder extends MasterNodeOperationRequestBuilder< DeleteIndexTemplateRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java index 15ed0443525fb..491b3e520ee51 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java @@ -51,6 +51,11 @@ import java.io.IOException; +/** + * Transport action for deleting a component template + * + * @opensearch.internal + */ public class TransportDeleteComponentTemplateAction extends TransportMasterNodeAction< DeleteComponentTemplateAction.Request, AcknowledgedResponse> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java index 87b3a883a3c10..4e7f3f292d810 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java @@ -51,6 +51,11 @@ import java.io.IOException; +/** + * Transport action for deleting a composable index template + * + * @opensearch.internal + */ public class TransportDeleteComposableIndexTemplateAction extends TransportMasterNodeAction< DeleteComposableIndexTemplateAction.Request, AcknowledgedResponse> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java index e75d3aafe6a85..89668fd0ed164 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java @@ -53,6 +53,8 @@ /** * Delete index action. + * + * @opensearch.internal */ public class TransportDeleteIndexTemplateAction extends TransportMasterNodeAction { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetComponentTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetComponentTemplateAction.java index 7f01ec0717332..5bc9f2e62e188 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetComponentTemplateAction.java @@ -50,6 +50,8 @@ /** * Action to retrieve one or more component templates + * + * @opensearch.internal */ public class GetComponentTemplateAction extends ActionType { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java index 5f7ccb0b4f90f..7014f4d0994c4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java @@ -48,6 +48,11 @@ import java.util.Map; import java.util.Objects; +/** + * Action to retrieve one or more Composable Index templates + * + * @opensearch.internal + */ public class GetComposableIndexTemplateAction extends ActionType { public static final GetComposableIndexTemplateAction INSTANCE = new GetComposableIndexTemplateAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesAction.java index 1194abbab1cbb..f380ad6fddb55 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesAction.java @@ -33,6 +33,11 @@ import org.opensearch.action.ActionType; +/** + * Action to retrieve one or more Index templates + * + * @opensearch.internal + */ public class GetIndexTemplatesAction extends ActionType { public static final GetIndexTemplatesAction INSTANCE = new GetIndexTemplatesAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java index da48bc4d8bc29..806b1ee0b9162 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java @@ -43,6 +43,8 @@ /** * Request that allows to retrieve index templates + * + * @opensearch.internal */ public class GetIndexTemplatesRequest extends MasterNodeReadRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java index 348075d051616..d5e5845955dae 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java @@ -34,6 +34,11 @@ import org.opensearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +/** + * Request builder to retrieve one or more Index templates + * + * @opensearch.internal + */ public class GetIndexTemplatesRequestBuilder extends MasterNodeReadOperationRequestBuilder< GetIndexTemplatesRequest, GetIndexTemplatesResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java index e6d487e0a40b3..556d809315edc 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java @@ -46,6 +46,11 @@ import static java.util.Collections.singletonMap; +/** + * Response for retrieving one or more Index templates + * + * @opensearch.internal + */ public class GetIndexTemplatesResponse extends ActionResponse implements ToXContentObject { private final List indexTemplates; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java index 62615465fbb4a..b7efb584ba92f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java @@ -52,6 +52,11 @@ import java.util.HashMap; import java.util.Map; +/** + * Action to retrieve one or more Component templates + * + * @opensearch.internal + */ public class TransportGetComponentTemplateAction extends TransportMasterNodeReadAction< GetComponentTemplateAction.Request, GetComponentTemplateAction.Response> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java index efa6d4723ce0d..e3eb619b52569 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java @@ -52,6 +52,11 @@ import java.util.HashMap; import java.util.Map; +/** + * Transport Action to retrieve one or more Composable Index templates + * + * @opensearch.internal + */ public class TransportGetComposableIndexTemplateAction extends TransportMasterNodeReadAction< GetComposableIndexTemplateAction.Request, GetComposableIndexTemplateAction.Response> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java index df6531d0a3862..993f40fd4f625 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java @@ -52,6 +52,11 @@ import java.util.Arrays; import java.util.List; +/** + * Transport action to retrieve one or more Index templates + * + * @opensearch.internal + */ public class TransportGetIndexTemplatesAction extends TransportMasterNodeReadAction { @Inject diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateAction.java index 3e6a586638e58..66b15c8b807f8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateAction.java @@ -34,6 +34,12 @@ import org.opensearch.action.ActionType; +/** + * Transport Action for handling simulating an index template either by name (looking it up in the + * cluster state), or by a provided template configuration + * + * @opensearch.internal + */ public class SimulateIndexTemplateAction extends ActionType { public static final SimulateIndexTemplateAction INSTANCE = new SimulateIndexTemplateAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java index 9d6735751ac7e..3db0ec47c5df2 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java @@ -43,6 +43,12 @@ import java.io.IOException; import java.util.Objects; +/** + * Transport Request for handling simulating an index template either by name (looking it up in the + * cluster state), or by a provided template configuration + * + * @opensearch.internal + */ public class SimulateIndexTemplateRequest extends MasterNodeReadRequest { private String indexName; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java index 9116db02459ef..517d61a1e4dbc 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java @@ -49,6 +49,8 @@ /** * Contains the information on what V2 templates would match a given index. + * + * @opensearch.internal */ public class SimulateIndexTemplateResponse extends ActionResponse implements ToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateTemplateAction.java index f53925e8abd50..f033f94dd4658 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateTemplateAction.java @@ -47,6 +47,8 @@ /** * An action for simulating the complete composed settings of the specified * index template name, or index template configuration + * + * @opensearch.internal */ public class SimulateTemplateAction extends ActionType { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java index 598b5bdbf6d3b..89d04dc3f59f5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java @@ -75,6 +75,12 @@ import static org.opensearch.cluster.metadata.MetadataIndexTemplateService.findV2Template; import static org.opensearch.cluster.metadata.MetadataIndexTemplateService.resolveSettings; +/** + * Transport Action for handling simulating an index template either by name (looking it up in the + * cluster state), or by a provided template configuration + * + * @opensearch.internal + */ public class TransportSimulateIndexTemplateAction extends TransportMasterNodeReadAction< SimulateIndexTemplateRequest, SimulateIndexTemplateResponse> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java index e12544a02be4c..5b14d0a00ebfe 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java @@ -64,6 +64,8 @@ /** * Handles simulating an index template either by name (looking it up in the * cluster state), or by a provided template configuration + * + * @opensearch.internal */ public class TransportSimulateTemplateAction extends TransportMasterNodeReadAction< SimulateTemplateAction.Request, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComponentTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComponentTemplateAction.java index 69149b953cc08..fc4d76d1f74ad 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComponentTemplateAction.java @@ -48,6 +48,8 @@ /** * An action for putting a single component template into the cluster state + * + * @opensearch.internal */ public class PutComponentTemplateAction extends ActionType { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateAction.java index 059f3be745bb0..d57e645e56b46 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateAction.java @@ -51,6 +51,11 @@ import static org.opensearch.action.ValidateActions.addValidationError; +/** + * An action for putting a composable template into the cluster state + * + * @opensearch.internal + */ public class PutComposableIndexTemplateAction extends ActionType { public static final PutComposableIndexTemplateAction INSTANCE = new PutComposableIndexTemplateAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateAction.java index e3c072908596f..06a9f6fbba409 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateAction.java @@ -35,6 +35,11 @@ import org.opensearch.action.ActionType; import org.opensearch.action.support.master.AcknowledgedResponse; +/** + * An action for putting an index template into the cluster state + * + * @opensearch.internal + */ public class PutIndexTemplateAction extends ActionType { public static final PutIndexTemplateAction INSTANCE = new PutIndexTemplateAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index 608e3da699318..4c826477978fc 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -77,6 +77,8 @@ /** * A request to create an index template. + * + * @opensearch.internal */ public class PutIndexTemplateRequest extends MasterNodeRequest implements IndicesRequest, ToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java index e5a02acb4a6e9..df12dc5d66998 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java @@ -43,6 +43,11 @@ import java.util.List; import java.util.Map; +/** + * A request builder for putting an index template into the cluster state + * + * @opensearch.internal + */ public class PutIndexTemplateRequestBuilder extends MasterNodeOperationRequestBuilder< PutIndexTemplateRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java index 022ffbeb7e03a..469c20e497822 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java @@ -54,6 +54,11 @@ import java.io.IOException; +/** + * An action for putting a single component template into the cluster state + * + * @opensearch.internal + */ public class TransportPutComponentTemplateAction extends TransportMasterNodeAction< PutComponentTemplateAction.Request, AcknowledgedResponse> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java index 7739c66b4cbd4..19485afe5d706 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java @@ -50,6 +50,11 @@ import java.io.IOException; +/** + * An action for putting a composable index template into the cluster state + * + * @opensearch.internal + */ public class TransportPutComposableIndexTemplateAction extends TransportMasterNodeAction< PutComposableIndexTemplateAction.Request, AcknowledgedResponse> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java index 42d932c62da55..778e0b374e2aa 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java @@ -56,6 +56,8 @@ /** * Put index template action. + * + * @opensearch.internal */ public class TransportPutIndexTemplateAction extends TransportMasterNodeAction { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexShardUpgradeStatus.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexShardUpgradeStatus.java index 954ab28542904..df937fab349ec 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexShardUpgradeStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexShardUpgradeStatus.java @@ -37,6 +37,11 @@ import java.util.Arrays; import java.util.Iterator; +/** + * Status for an Index Shard Upgrade + * + * @opensearch.internal + */ public class IndexShardUpgradeStatus implements Iterable { private final ShardId shardId; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java index 19f97faff3c61..2cff1f04d3fd2 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java @@ -38,6 +38,11 @@ import java.util.List; import java.util.Map; +/** + * Status for an Index Upgrade + * + * @opensearch.internal + */ public class IndexUpgradeStatus implements Iterable { private final String index; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/ShardUpgradeStatus.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/ShardUpgradeStatus.java index 579bd5e969eef..8ca8188899d02 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/ShardUpgradeStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/ShardUpgradeStatus.java @@ -39,6 +39,11 @@ import java.io.IOException; +/** + * Status for a Shard Upgrade + * + * @opensearch.internal + */ public class ShardUpgradeStatus extends BroadcastShardResponse { private ShardRouting shardRouting; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java index fcad2ccc6298d..b2f6cd62b1be7 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java @@ -55,6 +55,11 @@ import java.io.IOException; import java.util.List; +/** + * Transport Action for Upgrading an Index + * + * @opensearch.internal + */ public class TransportUpgradeStatusAction extends TransportBroadcastByNodeAction< UpgradeStatusRequest, UpgradeStatusResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusAction.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusAction.java index 7612bcb4464d4..a43d7580a571f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Action for Upgrading an Index + * + * @opensearch.internal + */ public class UpgradeStatusAction extends ActionType { public static final UpgradeStatusAction INSTANCE = new UpgradeStatusAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequest.java index bc7f0f31a6197..48a19ccdf0b94 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequest.java @@ -38,6 +38,11 @@ import java.io.IOException; +/** + * Transport Request for retrieving status of upgrading an Index + * + * @opensearch.internal + */ public class UpgradeStatusRequest extends BroadcastRequest { public UpgradeStatusRequest() { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java index 6f4f601a81d8a..c698c38fe12d5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java @@ -35,6 +35,11 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +/** + * Transport Request Builder for retrieving status of upgrading an Index + * + * @opensearch.internal + */ public class UpgradeStatusRequestBuilder extends BroadcastOperationRequestBuilder< UpgradeStatusRequest, UpgradeStatusResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java index a8bb5cfc8bba1..90cc8e86a231b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java @@ -47,6 +47,11 @@ import java.util.Map; import java.util.Set; +/** + * Transport Response for retrieving status of upgrading an Index + * + * @opensearch.internal + */ public class UpgradeStatusResponse extends BroadcastResponse { private ShardUpgradeStatus[] shards; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/ShardUpgradeResult.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/ShardUpgradeResult.java index a4fcbaca55299..4eea1f01742bd 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/ShardUpgradeResult.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/ShardUpgradeResult.java @@ -41,6 +41,11 @@ import java.io.IOException; import java.text.ParseException; +/** + * Result for Upgrading a Shard + * + * @opensearch.internal + */ class ShardUpgradeResult implements Writeable { private ShardId shardId; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java index 1048de90b214e..e50c8a94d5e20 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java @@ -67,6 +67,8 @@ /** * Upgrade index/indices action. + * + * @opensearch.internal */ public class TransportUpgradeAction extends TransportBroadcastByNodeAction { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java index cacd341730e12..4c352a172c040 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java @@ -53,6 +53,11 @@ import java.io.IOException; +/** + * Transport action for upgrading index settings + * + * @opensearch.internal + */ public class TransportUpgradeSettingsAction extends TransportMasterNodeAction { private static final Logger logger = LogManager.getLogger(TransportUpgradeSettingsAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeAction.java index ee9da454ad141..369503bb54ecf 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeAction.java @@ -36,6 +36,8 @@ /** * Upgrade index/indices action. + * + * @opensearch.internal */ public class UpgradeAction extends ActionType { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequest.java index 94d07075a8f3e..e4446f31f8164 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequest.java @@ -44,6 +44,8 @@ * @see org.opensearch.client.Requests#upgradeRequest(String...) * @see org.opensearch.client.IndicesAdminClient#upgrade(UpgradeRequest) * @see UpgradeResponse + * + * @opensearch.internal */ public class UpgradeRequest extends BroadcastRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequestBuilder.java index 5deff45aadc9b..8203f9d51b8e4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequestBuilder.java @@ -38,6 +38,8 @@ /** * A request to upgrade one or more indices. In order to optimize on all the indices, pass an empty array or * {@code null} for the indices. + * + * @opensearch.internal */ public class UpgradeRequestBuilder extends BroadcastOperationRequestBuilder { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeResponse.java index 07f9debbbf97b..ff1fd2b5635c0 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeResponse.java @@ -47,7 +47,7 @@ /** * A response for the upgrade action. * - * + * @opensearch.internal */ public class UpgradeResponse extends BroadcastResponse { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java index f9ecc9ace9171..05944e781d109 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java @@ -35,6 +35,11 @@ import org.opensearch.action.ActionType; import org.opensearch.action.support.master.AcknowledgedResponse; +/** + * Transport action for upgrading index settings + * + * @opensearch.internal + */ public class UpgradeSettingsAction extends ActionType { public static final UpgradeSettingsAction INSTANCE = new UpgradeSettingsAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsClusterStateUpdateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsClusterStateUpdateRequest.java index cf24e20a44ee3..ee6c6161713ac 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsClusterStateUpdateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsClusterStateUpdateRequest.java @@ -40,6 +40,8 @@ /** * Cluster state update request that allows to change minimum compatibility settings for some indices + * + * @opensearch.internal */ public class UpgradeSettingsClusterStateUpdateRequest extends ClusterStateUpdateRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsRequest.java index dc3a60c62033b..d6b784e44befb 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsRequest.java @@ -46,6 +46,8 @@ /** * Request for an update index settings action + * + * @opensearch.internal */ public class UpgradeSettingsRequest extends AcknowledgedRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/QueryExplanation.java b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/QueryExplanation.java index 3764cfd1e608b..087c5e4d2b1ab 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/QueryExplanation.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/QueryExplanation.java @@ -47,6 +47,11 @@ import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg; import static org.opensearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; +/** + * Query Explanation + * + * @opensearch.internal + */ public class QueryExplanation implements Writeable, ToXContentFragment { public static final String INDEX_FIELD = "index"; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java index 4d6525d002381..7df5a27929778 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java @@ -45,6 +45,8 @@ /** * Internal validate request executed directly against a specific index shard. + * + * @opensearch.internal */ public class ShardValidateQueryRequest extends BroadcastShardRequest { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ShardValidateQueryResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ShardValidateQueryResponse.java index 88080bd7667f3..7da73f5038e92 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ShardValidateQueryResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ShardValidateQueryResponse.java @@ -42,7 +42,7 @@ /** * Internal validate response of a shard validate request executed directly against a specific shard. * - * + * @opensearch.internal */ class ShardValidateQueryResponse extends BroadcastShardResponse { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index 1849b41ce707f..431c5468d1850 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -72,6 +72,11 @@ import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.function.LongSupplier; +/** + * Transport Action to Validate a Query + * + * @opensearch.internal + */ public class TransportValidateQueryAction extends TransportBroadcastAction< ValidateQueryRequest, ValidateQueryResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryAction.java b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryAction.java index a9b0954287c30..cdeb9e818a52f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryAction.java @@ -34,6 +34,11 @@ import org.opensearch.action.ActionType; +/** + * Action to Validate a Query + * + * @opensearch.internal + */ public class ValidateQueryAction extends ActionType { public static final ValidateQueryAction INSTANCE = new ValidateQueryAction(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequest.java index 1bb85c4e84483..75295f74582b9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequest.java @@ -52,6 +52,8 @@ * A request to validate a specific query. *

      * The request requires the query to be set using {@link #query(QueryBuilder)} + * + * @opensearch.internal */ public class ValidateQueryRequest extends BroadcastRequest implements ToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java index 88261e6536240..6209f41d88be2 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java @@ -36,6 +36,11 @@ import org.opensearch.client.OpenSearchClient; import org.opensearch.index.query.QueryBuilder; +/** + * Transport Request Builder to Validate a Query + * + * @opensearch.internal + */ public class ValidateQueryRequestBuilder extends BroadcastOperationRequestBuilder< ValidateQueryRequest, ValidateQueryResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryResponse.java index 53d1463c23351..2e843369f6af0 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryResponse.java @@ -52,7 +52,7 @@ /** * The response of the validate action. * - * + * @opensearch.internal */ public class ValidateQueryResponse extends BroadcastResponse { diff --git a/server/src/main/java/org/opensearch/plugins/ActionPlugin.java b/server/src/main/java/org/opensearch/plugins/ActionPlugin.java index 31f5159120337..e4254fb9acbad 100644 --- a/server/src/main/java/org/opensearch/plugins/ActionPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/ActionPlugin.java @@ -150,6 +150,11 @@ default UnaryOperator getRestHandlerWrapper(ThreadContext threadCon return null; } + /** + * Class responsible for handing Transport Actions + * + * @opensearch.internal + */ final class ActionHandler { private final ActionType action; private final Class> transportAction; diff --git a/server/src/main/java/org/opensearch/plugins/ExtensiblePlugin.java b/server/src/main/java/org/opensearch/plugins/ExtensiblePlugin.java index 9cab282910599..55109178cbdff 100644 --- a/server/src/main/java/org/opensearch/plugins/ExtensiblePlugin.java +++ b/server/src/main/java/org/opensearch/plugins/ExtensiblePlugin.java @@ -42,6 +42,11 @@ */ public interface ExtensiblePlugin { + /** + * Extension point for external plugins to be extendable + * + * @opensearch.api + */ interface ExtensionLoader { /** * Load extensions of the type from all extending plugins. The concrete extensions must have either a no-arg constructor diff --git a/server/src/main/java/org/opensearch/plugins/PluginsService.java b/server/src/main/java/org/opensearch/plugins/PluginsService.java index 4ef2dc4617de9..bff880e5a41d7 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginsService.java +++ b/server/src/main/java/org/opensearch/plugins/PluginsService.java @@ -83,6 +83,11 @@ import static org.opensearch.common.io.FileSystemUtils.isAccessibleDirectory; +/** + * Service responsible for loading plugins and modules (internal and external) + * + * @opensearch.internal + */ public class PluginsService implements ReportingService { private static final Logger logger = LogManager.getLogger(PluginsService.class); diff --git a/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java b/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java index dcb021531f0ac..b65b72b745a01 100644 --- a/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java +++ b/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java @@ -43,7 +43,12 @@ import java.io.IOException; -public class RequestHandlerRegistry { +/** + * Registry for OpenSearch RequestHandlers + * + * @opensearch.internal + */ +public final class RequestHandlerRegistry { private final String action; private final TransportRequestHandler handler; diff --git a/server/src/main/java/org/opensearch/transport/StatsTracker.java b/server/src/main/java/org/opensearch/transport/StatsTracker.java index a97a9c6a9374b..5548d2d558ae2 100644 --- a/server/src/main/java/org/opensearch/transport/StatsTracker.java +++ b/server/src/main/java/org/opensearch/transport/StatsTracker.java @@ -36,7 +36,12 @@ import java.util.concurrent.atomic.LongAdder; -public class StatsTracker { +/** + * Tracks transport statistics + * + * @opensearch.internal + */ +public final class StatsTracker { private final LongAdder bytesRead = new LongAdder(); private final LongAdder messagesReceived = new LongAdder(); From c76a4c901450c4ad3b34bc1030f7d5b5fa2c46f0 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Tue, 3 May 2022 22:18:28 -0500 Subject: [PATCH 149/653] [Remove] ShrinkAction, ShardUpgradeRequest, UpgradeSettingsRequestBuilder (#3169) Removes unused ShrinkAction, ShardUpgradeRequest, and UpgradeSettingsRequestBuilder classes. Signed-off-by: Nicholas Walter Knize --- .../admin/indices/shrink/ShrinkAction.java | 46 ------------- .../upgrade/post/ShardUpgradeRequest.java | 65 ------------------- .../post/UpgradeSettingsRequestBuilder.java | 62 ------------------ 3 files changed, 173 deletions(-) delete mode 100644 server/src/main/java/org/opensearch/action/admin/indices/shrink/ShrinkAction.java delete mode 100644 server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/ShardUpgradeRequest.java delete mode 100644 server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsRequestBuilder.java diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ShrinkAction.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ShrinkAction.java deleted file mode 100644 index 1fa894b265573..0000000000000 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ShrinkAction.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.admin.indices.shrink; - -import org.opensearch.action.ActionType; - -public class ShrinkAction extends ActionType { - - public static final ShrinkAction INSTANCE = new ShrinkAction(); - public static final String NAME = "indices:admin/shrink"; - - private ShrinkAction() { - super(NAME, ResizeResponse::new); - } - -} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/ShardUpgradeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/ShardUpgradeRequest.java deleted file mode 100644 index fcc85e1a9cb5c..0000000000000 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/ShardUpgradeRequest.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.admin.indices.upgrade.post; - -import org.opensearch.action.support.broadcast.BroadcastShardRequest; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.index.shard.ShardId; - -import java.io.IOException; - -public final class ShardUpgradeRequest extends BroadcastShardRequest { - - private UpgradeRequest request; - - public ShardUpgradeRequest(StreamInput in) throws IOException { - super(in); - request = new UpgradeRequest(in); - } - - ShardUpgradeRequest(ShardId shardId, UpgradeRequest request) { - super(shardId, request); - this.request = request; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - request.writeTo(out); - } - - public UpgradeRequest upgradeRequest() { - return this.request; - } -} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsRequestBuilder.java deleted file mode 100644 index d3a8cc311bb8a..0000000000000 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsRequestBuilder.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.admin.indices.upgrade.post; - -import org.opensearch.Version; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.client.OpenSearchClient; -import org.opensearch.common.collect.Tuple; - -import java.util.Map; - -/** - * Builder for an update index settings request - */ -public class UpgradeSettingsRequestBuilder extends AcknowledgedRequestBuilder< - UpgradeSettingsRequest, - AcknowledgedResponse, - UpgradeSettingsRequestBuilder> { - - public UpgradeSettingsRequestBuilder(OpenSearchClient client, UpgradeSettingsAction action) { - super(client, action, new UpgradeSettingsRequest()); - } - - /** - * Sets the index versions to be updated - */ - public UpgradeSettingsRequestBuilder setVersions(Map> versions) { - request.versions(versions); - return this; - } -} From 40024fcf71983c297108fd5907dafc999b1368c1 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Tue, 3 May 2022 22:18:43 -0500 Subject: [PATCH 150/653] [Javadocs] add to o.o.cluster (#3170) Adds javadocs to classes in the o.o.cluster package. Signed-off-by: Nicholas Walter Knize --- .../main/java/org/opensearch/cluster/AbstractDiffable.java | 2 ++ .../java/org/opensearch/cluster/AbstractNamedDiffable.java | 2 ++ .../opensearch/cluster/AckedClusterStateTaskListener.java | 5 +++++ .../org/opensearch/cluster/AckedClusterStateUpdateTask.java | 2 ++ .../java/org/opensearch/cluster/ClusterChangedEvent.java | 2 ++ .../src/main/java/org/opensearch/cluster/ClusterInfo.java | 2 ++ .../java/org/opensearch/cluster/ClusterInfoService.java | 2 ++ .../src/main/java/org/opensearch/cluster/ClusterModule.java | 2 ++ .../src/main/java/org/opensearch/cluster/ClusterName.java | 5 +++++ .../src/main/java/org/opensearch/cluster/ClusterState.java | 2 ++ .../java/org/opensearch/cluster/ClusterStateApplier.java | 2 ++ .../java/org/opensearch/cluster/ClusterStateListener.java | 2 +- .../java/org/opensearch/cluster/ClusterStateObserver.java | 2 ++ .../java/org/opensearch/cluster/ClusterStateTaskConfig.java | 2 ++ .../org/opensearch/cluster/ClusterStateTaskExecutor.java | 5 +++++ .../org/opensearch/cluster/ClusterStateTaskListener.java | 5 +++++ .../java/org/opensearch/cluster/ClusterStateUpdateTask.java | 2 ++ server/src/main/java/org/opensearch/cluster/Diff.java | 2 ++ server/src/main/java/org/opensearch/cluster/Diffable.java | 2 ++ .../src/main/java/org/opensearch/cluster/DiffableUtils.java | 5 +++++ server/src/main/java/org/opensearch/cluster/DiskUsage.java | 2 ++ .../org/opensearch/cluster/EmptyClusterInfoService.java | 2 ++ .../cluster/IncompatibleClusterStateVersionException.java | 2 ++ .../org/opensearch/cluster/InternalClusterInfoService.java | 2 ++ .../java/org/opensearch/cluster/LocalClusterUpdateTask.java | 2 ++ .../org/opensearch/cluster/LocalNodeMasterListener.java | 2 ++ .../org/opensearch/cluster/MasterNodeChangePredicate.java | 5 +++++ .../java/org/opensearch/cluster/MergableCustomMetadata.java | 2 ++ server/src/main/java/org/opensearch/cluster/NamedDiff.java | 2 ++ .../src/main/java/org/opensearch/cluster/NamedDiffable.java | 2 ++ .../opensearch/cluster/NamedDiffableValueSerializer.java | 2 ++ .../java/org/opensearch/cluster/NodeConnectionsService.java | 2 ++ .../java/org/opensearch/cluster/NotMasterException.java | 2 ++ .../org/opensearch/cluster/RepositoryCleanupInProgress.java | 5 +++++ .../main/java/org/opensearch/cluster/RestoreInProgress.java | 2 ++ .../org/opensearch/cluster/SnapshotDeletionsInProgress.java | 2 ++ .../java/org/opensearch/cluster/SnapshotsInProgress.java | 2 ++ .../org/opensearch/cluster/TimeoutClusterStateListener.java | 2 +- .../main/java/org/opensearch/cluster/ack/AckedRequest.java | 2 ++ .../opensearch/cluster/ack/ClusterStateUpdateRequest.java | 2 ++ .../opensearch/cluster/ack/ClusterStateUpdateResponse.java | 2 ++ .../cluster/ack/CreateIndexClusterStateUpdateResponse.java | 2 ++ .../cluster/ack/IndicesClusterStateUpdateRequest.java | 2 ++ .../cluster/ack/OpenIndexClusterStateUpdateResponse.java | 2 ++ .../cluster/action/index/MappingUpdatedAction.java | 2 ++ .../cluster/action/index/NodeMappingRefreshAction.java | 5 +++++ .../opensearch/cluster/action/shard/ShardStateAction.java | 5 +++++ .../java/org/opensearch/cluster/block/ClusterBlock.java | 5 +++++ .../org/opensearch/cluster/block/ClusterBlockException.java | 5 +++++ .../org/opensearch/cluster/block/ClusterBlockLevel.java | 5 +++++ .../java/org/opensearch/cluster/block/ClusterBlocks.java | 2 ++ .../opensearch/cluster/coordination/ApplyCommitRequest.java | 2 ++ .../cluster/coordination/ClusterBootstrapService.java | 5 +++++ .../cluster/coordination/ClusterFormationFailureHelper.java | 5 +++++ .../cluster/coordination/ClusterStatePublisher.java | 5 +++++ .../cluster/coordination/CoordinationMetadata.java | 5 +++++ .../opensearch/cluster/coordination/CoordinationState.java | 2 ++ .../coordination/CoordinationStateRejectedException.java | 2 ++ .../org/opensearch/cluster/coordination/Coordinator.java | 5 +++++ .../cluster/coordination/DetachClusterCommand.java | 5 +++++ .../cluster/coordination/ElectionSchedulerFactory.java | 2 ++ .../opensearch/cluster/coordination/ElectionStrategy.java | 2 ++ .../coordination/FailedToCommitClusterStateException.java | 2 ++ .../opensearch/cluster/coordination/FollowersChecker.java | 2 ++ .../cluster/coordination/InMemoryPersistedState.java | 5 +++++ .../main/java/org/opensearch/cluster/coordination/Join.java | 2 ++ .../org/opensearch/cluster/coordination/JoinHelper.java | 5 +++++ .../org/opensearch/cluster/coordination/JoinRequest.java | 5 +++++ .../opensearch/cluster/coordination/JoinTaskExecutor.java | 5 +++++ .../org/opensearch/cluster/coordination/LagDetector.java | 2 ++ .../org/opensearch/cluster/coordination/LeaderChecker.java | 2 ++ .../cluster/coordination/NoMasterBlockService.java | 5 +++++ .../coordination/NodeHealthCheckFailureException.java | 2 ++ .../coordination/NodeRemovalClusterStateTaskExecutor.java | 5 +++++ .../org/opensearch/cluster/coordination/NodeToolCli.java | 6 ++++++ .../cluster/coordination/OpenSearchNodeCommand.java | 5 +++++ .../org/opensearch/cluster/coordination/PeersResponse.java | 5 +++++ .../cluster/coordination/PendingClusterStateStats.java | 2 ++ .../opensearch/cluster/coordination/PreVoteCollector.java | 5 +++++ .../org/opensearch/cluster/coordination/PreVoteRequest.java | 5 +++++ .../opensearch/cluster/coordination/PreVoteResponse.java | 5 +++++ .../org/opensearch/cluster/coordination/Publication.java | 5 +++++ .../cluster/coordination/PublicationTransportHandler.java | 5 +++++ .../cluster/coordination/PublishClusterStateStats.java | 2 ++ .../org/opensearch/cluster/coordination/PublishRequest.java | 2 ++ .../opensearch/cluster/coordination/PublishResponse.java | 2 ++ .../cluster/coordination/PublishWithJoinResponse.java | 2 ++ .../org/opensearch/cluster/coordination/Reconfigurator.java | 2 ++ .../cluster/coordination/RemoveCustomsCommand.java | 5 +++++ .../cluster/coordination/RemoveSettingsCommand.java | 5 +++++ .../opensearch/cluster/coordination/StartJoinRequest.java | 2 ++ .../opensearch/cluster/coordination/TermVersionRequest.java | 5 +++++ .../cluster/coordination/UnsafeBootstrapMasterCommand.java | 5 +++++ .../cluster/coordination/ValidateJoinRequest.java | 5 +++++ .../org/opensearch/cluster/health/ClusterHealthStatus.java | 5 +++++ .../org/opensearch/cluster/health/ClusterIndexHealth.java | 5 +++++ .../org/opensearch/cluster/health/ClusterShardHealth.java | 5 +++++ .../org/opensearch/cluster/health/ClusterStateHealth.java | 5 +++++ .../java/org/opensearch/cluster/metadata/AliasAction.java | 2 ++ .../java/org/opensearch/cluster/metadata/AliasMetadata.java | 5 +++++ .../org/opensearch/cluster/metadata/AliasValidator.java | 2 ++ .../org/opensearch/cluster/metadata/AutoExpandReplicas.java | 2 ++ .../cluster/metadata/ClusterNameExpressionResolver.java | 2 ++ .../org/opensearch/cluster/metadata/ComponentTemplate.java | 2 ++ .../cluster/metadata/ComponentTemplateMetadata.java | 2 ++ .../cluster/metadata/ComposableIndexTemplate.java | 2 ++ .../cluster/metadata/ComposableIndexTemplateMetadata.java | 2 ++ .../java/org/opensearch/cluster/metadata/DataStream.java | 5 +++++ .../org/opensearch/cluster/metadata/DataStreamMetadata.java | 2 ++ .../org/opensearch/cluster/metadata/DiffableStringMap.java | 2 ++ .../org/opensearch/cluster/metadata/IndexAbstraction.java | 2 ++ .../cluster/metadata/IndexAbstractionResolver.java | 5 +++++ .../org/opensearch/cluster/metadata/IndexGraveyard.java | 2 ++ .../java/org/opensearch/cluster/metadata/IndexMetadata.java | 5 +++++ .../cluster/metadata/IndexNameExpressionResolver.java | 5 +++++ .../opensearch/cluster/metadata/IndexTemplateMetadata.java | 5 +++++ .../main/java/org/opensearch/cluster/metadata/Manifest.java | 2 ++ .../org/opensearch/cluster/metadata/MappingMetadata.java | 2 ++ .../main/java/org/opensearch/cluster/metadata/Metadata.java | 5 +++++ .../cluster/metadata/MetadataCreateDataStreamService.java | 5 +++++ .../cluster/metadata/MetadataCreateIndexService.java | 2 ++ .../cluster/metadata/MetadataDeleteIndexService.java | 2 ++ .../cluster/metadata/MetadataIndexAliasesService.java | 2 ++ .../cluster/metadata/MetadataIndexStateService.java | 2 ++ .../cluster/metadata/MetadataIndexTemplateService.java | 2 ++ .../cluster/metadata/MetadataIndexUpgradeService.java | 2 ++ .../opensearch/cluster/metadata/MetadataMappingService.java | 2 ++ .../cluster/metadata/MetadataUpdateSettingsService.java | 2 ++ .../metadata/ProcessClusterEventTimeoutException.java | 5 +++++ .../opensearch/cluster/metadata/RepositoriesMetadata.java | 2 ++ .../org/opensearch/cluster/metadata/RepositoryMetadata.java | 2 ++ .../cluster/metadata/SystemIndexMetadataUpgradeService.java | 2 ++ .../main/java/org/opensearch/cluster/metadata/Template.java | 2 ++ .../opensearch/cluster/metadata/TemplateUpgradeService.java | 2 ++ .../java/org/opensearch/cluster/node/DiscoveryNode.java | 2 ++ .../org/opensearch/cluster/node/DiscoveryNodeFilters.java | 5 +++++ .../java/org/opensearch/cluster/node/DiscoveryNodeRole.java | 2 ++ .../java/org/opensearch/cluster/node/DiscoveryNodes.java | 2 ++ .../java/org/opensearch/cluster/routing/AllocationId.java | 2 ++ .../opensearch/cluster/routing/BatchedRerouteService.java | 2 ++ .../cluster/routing/DelayedAllocationService.java | 2 ++ .../org/opensearch/cluster/routing/GroupShardsIterator.java | 2 ++ .../cluster/routing/IllegalShardRoutingStateException.java | 2 ++ .../org/opensearch/cluster/routing/IndexRoutingTable.java | 2 ++ .../opensearch/cluster/routing/IndexShardRoutingTable.java | 2 ++ .../org/opensearch/cluster/routing/Murmur3HashFunction.java | 2 ++ .../org/opensearch/cluster/routing/OperationRouting.java | 5 +++++ .../org/opensearch/cluster/routing/PlainShardIterator.java | 2 ++ .../org/opensearch/cluster/routing/PlainShardsIterator.java | 2 ++ .../java/org/opensearch/cluster/routing/Preference.java | 2 ++ .../java/org/opensearch/cluster/routing/RecoverySource.java | 2 ++ .../java/org/opensearch/cluster/routing/RerouteService.java | 2 ++ .../opensearch/cluster/routing/RotationShardShuffler.java | 2 ++ .../opensearch/cluster/routing/RoutingChangesObserver.java | 2 ++ .../org/opensearch/cluster/routing/RoutingException.java | 2 ++ .../java/org/opensearch/cluster/routing/RoutingNode.java | 2 ++ .../java/org/opensearch/cluster/routing/RoutingNodes.java | 2 ++ .../java/org/opensearch/cluster/routing/RoutingTable.java | 2 ++ .../java/org/opensearch/cluster/routing/ShardIterator.java | 2 ++ .../java/org/opensearch/cluster/routing/ShardRouting.java | 2 ++ .../org/opensearch/cluster/routing/ShardRoutingState.java | 2 ++ .../java/org/opensearch/cluster/routing/ShardShuffler.java | 2 ++ .../java/org/opensearch/cluster/routing/ShardsIterator.java | 2 ++ .../java/org/opensearch/cluster/routing/UnassignedInfo.java | 2 ++ .../routing/allocation/AbstractAllocationDecision.java | 2 ++ .../routing/allocation/AllocateUnassignedDecision.java | 2 ++ .../cluster/routing/allocation/AllocationConstraints.java | 2 ++ .../cluster/routing/allocation/AllocationDecision.java | 2 ++ .../cluster/routing/allocation/AllocationService.java | 2 ++ .../cluster/routing/allocation/DiskThresholdMonitor.java | 2 ++ .../cluster/routing/allocation/DiskThresholdSettings.java | 2 ++ .../cluster/routing/allocation/ExistingShardsAllocator.java | 2 ++ .../opensearch/cluster/routing/allocation/FailedShard.java | 2 ++ .../cluster/routing/allocation/IndexMetadataUpdater.java | 2 ++ .../opensearch/cluster/routing/allocation/MoveDecision.java | 2 ++ .../cluster/routing/allocation/NodeAllocationResult.java | 2 ++ .../cluster/routing/allocation/RerouteExplanation.java | 2 ++ .../cluster/routing/allocation/RoutingAllocation.java | 2 ++ .../cluster/routing/allocation/RoutingExplanations.java | 2 ++ .../routing/allocation/RoutingNodesChangedObserver.java | 2 ++ .../cluster/routing/allocation/ShardAllocationDecision.java | 2 ++ .../opensearch/cluster/routing/allocation/StaleShard.java | 2 ++ .../allocation/allocator/BalancedShardsAllocator.java | 2 ++ .../routing/allocation/allocator/ShardsAllocator.java | 2 ++ .../command/AbstractAllocateAllocationCommand.java | 2 ++ .../command/AllocateEmptyPrimaryAllocationCommand.java | 2 ++ .../command/AllocateReplicaAllocationCommand.java | 2 ++ .../command/AllocateStalePrimaryAllocationCommand.java | 2 ++ .../routing/allocation/command/AllocationCommand.java | 2 ++ .../routing/allocation/command/AllocationCommands.java | 2 ++ .../allocation/command/BasePrimaryAllocationCommand.java | 2 ++ .../routing/allocation/command/CancelAllocationCommand.java | 2 ++ .../routing/allocation/command/MoveAllocationCommand.java | 2 ++ .../routing/allocation/decider/AllocationDecider.java | 2 ++ .../routing/allocation/decider/AllocationDeciders.java | 2 ++ .../allocation/decider/AwarenessAllocationDecider.java | 2 ++ .../decider/ClusterRebalanceAllocationDecider.java | 2 ++ .../decider/ConcurrentRebalanceAllocationDecider.java | 2 ++ .../decider/ConcurrentRecoveriesAllocationDecider.java | 2 ++ .../cluster/routing/allocation/decider/Decision.java | 2 ++ .../routing/allocation/decider/DiskThresholdDecider.java | 2 ++ .../routing/allocation/decider/EnableAllocationDecider.java | 2 ++ .../routing/allocation/decider/FilterAllocationDecider.java | 2 ++ .../allocation/decider/MaxRetryAllocationDecider.java | 1 + .../allocation/decider/NodeLoadAwareAllocationDecider.java | 2 ++ .../allocation/decider/NodeVersionAllocationDecider.java | 2 ++ .../decider/RebalanceOnlyWhenActiveAllocationDecider.java | 2 ++ .../decider/ReplicaAfterPrimaryActiveAllocationDecider.java | 2 ++ .../routing/allocation/decider/ResizeAllocationDecider.java | 2 ++ .../decider/RestoreInProgressAllocationDecider.java | 2 ++ .../allocation/decider/SameShardAllocationDecider.java | 2 ++ .../allocation/decider/ShardsLimitAllocationDecider.java | 2 ++ .../decider/SnapshotInProgressAllocationDecider.java | 2 ++ .../allocation/decider/ThrottlingAllocationDecider.java | 2 ++ .../java/org/opensearch/cluster/service/ClusterApplier.java | 5 +++++ .../opensearch/cluster/service/ClusterApplierService.java | 5 +++++ .../java/org/opensearch/cluster/service/ClusterService.java | 5 +++++ .../java/org/opensearch/cluster/service/MasterService.java | 5 +++++ .../org/opensearch/cluster/service/PendingClusterTask.java | 5 +++++ .../cluster/service/SourcePrioritizedRunnable.java | 2 ++ .../java/org/opensearch/cluster/service/TaskBatcher.java | 2 ++ 221 files changed, 611 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/cluster/AbstractDiffable.java b/server/src/main/java/org/opensearch/cluster/AbstractDiffable.java index 7b95545e58101..600f972fd9d63 100644 --- a/server/src/main/java/org/opensearch/cluster/AbstractDiffable.java +++ b/server/src/main/java/org/opensearch/cluster/AbstractDiffable.java @@ -41,6 +41,8 @@ /** * Abstract diffable object with simple diffs implementation that sends the entire object if object has changed or * nothing if object remained the same. + * + * @opensearch.internal */ public abstract class AbstractDiffable> implements Diffable { diff --git a/server/src/main/java/org/opensearch/cluster/AbstractNamedDiffable.java b/server/src/main/java/org/opensearch/cluster/AbstractNamedDiffable.java index 43ff74f30a249..8af061a0874c9 100644 --- a/server/src/main/java/org/opensearch/cluster/AbstractNamedDiffable.java +++ b/server/src/main/java/org/opensearch/cluster/AbstractNamedDiffable.java @@ -43,6 +43,8 @@ /** * Abstract diffable object with simple diffs implementation that sends the entire object if object has changed or * nothing is object remained the same. Comparing to AbstractDiffable, this class also works with NamedWriteables + * + * @opensearch.internal */ public abstract class AbstractNamedDiffable> implements Diffable, NamedWriteable { diff --git a/server/src/main/java/org/opensearch/cluster/AckedClusterStateTaskListener.java b/server/src/main/java/org/opensearch/cluster/AckedClusterStateTaskListener.java index 0f6531fb09be9..482087be1c8eb 100644 --- a/server/src/main/java/org/opensearch/cluster/AckedClusterStateTaskListener.java +++ b/server/src/main/java/org/opensearch/cluster/AckedClusterStateTaskListener.java @@ -35,6 +35,11 @@ import org.opensearch.common.Nullable; import org.opensearch.common.unit.TimeValue; +/** + * Listener when cluster state task is acknowledged + * + * @opensearch.internal + */ public interface AckedClusterStateTaskListener extends ClusterStateTaskListener { /** diff --git a/server/src/main/java/org/opensearch/cluster/AckedClusterStateUpdateTask.java b/server/src/main/java/org/opensearch/cluster/AckedClusterStateUpdateTask.java index 82ea05274d0d1..21c4460964067 100644 --- a/server/src/main/java/org/opensearch/cluster/AckedClusterStateUpdateTask.java +++ b/server/src/main/java/org/opensearch/cluster/AckedClusterStateUpdateTask.java @@ -41,6 +41,8 @@ /** * An extension interface to {@link ClusterStateUpdateTask} that allows to be notified when * all the nodes have acknowledged a cluster state update request + * + * @opensearch.internal */ public abstract class AckedClusterStateUpdateTask extends ClusterStateUpdateTask implements AckedClusterStateTaskListener { diff --git a/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java b/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java index 387a27da46820..dd4f6c59fabaa 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java @@ -53,6 +53,8 @@ /** * An event received by the local node, signaling that the cluster state has changed. + * + * @opensearch.internal */ public class ClusterChangedEvent { diff --git a/server/src/main/java/org/opensearch/cluster/ClusterInfo.java b/server/src/main/java/org/opensearch/cluster/ClusterInfo.java index 19e026c570c74..8803af9a5419a 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterInfo.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterInfo.java @@ -55,6 +55,8 @@ * and a map of shard ids to shard sizes, see * InternalClusterInfoService.shardIdentifierFromRouting(String) * for the key used in the shardSizes map + * + * @opensearch.internal */ public class ClusterInfo implements ToXContentFragment, Writeable { private final ImmutableOpenMap leastAvailableSpaceUsage; diff --git a/server/src/main/java/org/opensearch/cluster/ClusterInfoService.java b/server/src/main/java/org/opensearch/cluster/ClusterInfoService.java index 03ef68fbd1c4d..50675d11003bb 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterInfoService.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterInfoService.java @@ -36,6 +36,8 @@ /** * Interface for a class used to gather information about a cluster periodically. + * + * @opensearch.internal */ @FunctionalInterface public interface ClusterInfoService { diff --git a/server/src/main/java/org/opensearch/cluster/ClusterModule.java b/server/src/main/java/org/opensearch/cluster/ClusterModule.java index c85691b80d7c3..900dceb8564c9 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterModule.java @@ -110,6 +110,8 @@ /** * Configures classes and services that affect the entire cluster. + * + * @opensearch.internal */ public class ClusterModule extends AbstractModule { diff --git a/server/src/main/java/org/opensearch/cluster/ClusterName.java b/server/src/main/java/org/opensearch/cluster/ClusterName.java index 86b182c6f6c2d..77fc21b739f1a 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterName.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterName.java @@ -42,6 +42,11 @@ import java.util.Objects; import java.util.function.Predicate; +/** + * Cluster Name + * + * @opensearch.internal + */ public class ClusterName implements Writeable { public static final Setting CLUSTER_NAME_SETTING = new Setting<>("cluster.name", "opensearch", (s) -> { diff --git a/server/src/main/java/org/opensearch/cluster/ClusterState.java b/server/src/main/java/org/opensearch/cluster/ClusterState.java index 3eaac99bad998..4010002561930 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterState.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterState.java @@ -97,6 +97,8 @@ * make sure that the correct diffs are applied. If uuids don’t match, the {@link ClusterStateDiff#apply} method * throws the {@link IncompatibleClusterStateVersionException}, which causes the publishing mechanism to send * a full version of the cluster state to the node on which this exception was thrown. + * + * @opensearch.internal */ public class ClusterState implements ToXContentFragment, Diffable { diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateApplier.java b/server/src/main/java/org/opensearch/cluster/ClusterStateApplier.java index 6ba7cba0b463d..140e6426bb801 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateApplier.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateApplier.java @@ -37,6 +37,8 @@ /** * A component that is in charge of applying an incoming cluster state to the node internal data structures. * The single apply method is called before the cluster state becomes visible via {@link ClusterService#state()}. + * + * @opensearch.internal */ public interface ClusterStateApplier { diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateListener.java b/server/src/main/java/org/opensearch/cluster/ClusterStateListener.java index bbd41508fd630..01a8e51a3d13e 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateListener.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateListener.java @@ -35,7 +35,7 @@ /** * A listener to be notified when a cluster state changes. * - * + * @opensearch.internal */ public interface ClusterStateListener { diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateObserver.java b/server/src/main/java/org/opensearch/cluster/ClusterStateObserver.java index 4f3372b4e9069..5e99ebe229c6a 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateObserver.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateObserver.java @@ -50,6 +50,8 @@ * A utility class which simplifies interacting with the cluster state in cases where * one tries to take action based on the current state but may want to wait for a new state * and retry upon failure. + * + * @opensearch.internal */ public class ClusterStateObserver { diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskConfig.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskConfig.java index ec038df7a9096..8d775b92a0431 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskConfig.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskConfig.java @@ -37,6 +37,8 @@ /** * Cluster state update task configuration for timeout and priority + * + * @opensearch.internal */ public interface ClusterStateTaskConfig { /** diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java index 48d3dd7d03cb5..8d40f447abe98 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java @@ -37,6 +37,11 @@ import java.util.List; import java.util.Map; +/** + * Interface that updates the cluster state based on the task + * + * @opensearch.internal + */ public interface ClusterStateTaskExecutor { /** * Update the cluster state based on the current state and the given tasks. Return the *same instance* if no state diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java index d5b9eebbc3b5d..91137a7efae92 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java @@ -35,6 +35,11 @@ import java.util.List; +/** + * Interface to implement a cluster state change listener + * + * @opensearch.internal + */ public interface ClusterStateTaskListener { /** diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java b/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java index 9393663b309fc..f9de49a1f7e58 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java @@ -40,6 +40,8 @@ /** * A task that can update the cluster state. + * + * @opensearch.internal */ public abstract class ClusterStateUpdateTask implements diff --git a/server/src/main/java/org/opensearch/cluster/Diff.java b/server/src/main/java/org/opensearch/cluster/Diff.java index 165fb750f3f53..9af6afed0d13b 100644 --- a/server/src/main/java/org/opensearch/cluster/Diff.java +++ b/server/src/main/java/org/opensearch/cluster/Diff.java @@ -36,6 +36,8 @@ /** * Represents difference between states of cluster state parts + * + * @opensearch.internal */ public interface Diff extends Writeable { diff --git a/server/src/main/java/org/opensearch/cluster/Diffable.java b/server/src/main/java/org/opensearch/cluster/Diffable.java index 3dcac5459d27d..23c30527befa9 100644 --- a/server/src/main/java/org/opensearch/cluster/Diffable.java +++ b/server/src/main/java/org/opensearch/cluster/Diffable.java @@ -36,6 +36,8 @@ /** * Cluster state part, changes in which can be serialized + * + * @opensearch.internal */ public interface Diffable extends Writeable { diff --git a/server/src/main/java/org/opensearch/cluster/DiffableUtils.java b/server/src/main/java/org/opensearch/cluster/DiffableUtils.java index 76da490eca1f3..88a240938c468 100644 --- a/server/src/main/java/org/opensearch/cluster/DiffableUtils.java +++ b/server/src/main/java/org/opensearch/cluster/DiffableUtils.java @@ -53,6 +53,11 @@ import java.util.Map; import java.util.Set; +/** + * Utility class for a diffable + * + * @opensearch.internal + */ public final class DiffableUtils { private DiffableUtils() {} diff --git a/server/src/main/java/org/opensearch/cluster/DiskUsage.java b/server/src/main/java/org/opensearch/cluster/DiskUsage.java index e7f04ffb749b9..69bca6ae5b2c5 100644 --- a/server/src/main/java/org/opensearch/cluster/DiskUsage.java +++ b/server/src/main/java/org/opensearch/cluster/DiskUsage.java @@ -45,6 +45,8 @@ /** * Encapsulation class used to represent the amount of disk used on a node. + * + * @opensearch.internal */ public class DiskUsage implements ToXContentFragment, Writeable { final String nodeId; diff --git a/server/src/main/java/org/opensearch/cluster/EmptyClusterInfoService.java b/server/src/main/java/org/opensearch/cluster/EmptyClusterInfoService.java index cf2ce4c3f9ae0..27d1c706eb012 100644 --- a/server/src/main/java/org/opensearch/cluster/EmptyClusterInfoService.java +++ b/server/src/main/java/org/opensearch/cluster/EmptyClusterInfoService.java @@ -36,6 +36,8 @@ /** * {@link ClusterInfoService} that provides empty maps for disk usage and shard sizes + * + * @opensearch.internal */ public class EmptyClusterInfoService implements ClusterInfoService { public static final EmptyClusterInfoService INSTANCE = new EmptyClusterInfoService(); diff --git a/server/src/main/java/org/opensearch/cluster/IncompatibleClusterStateVersionException.java b/server/src/main/java/org/opensearch/cluster/IncompatibleClusterStateVersionException.java index 5ce71c651ae61..8dd988b0fba5d 100644 --- a/server/src/main/java/org/opensearch/cluster/IncompatibleClusterStateVersionException.java +++ b/server/src/main/java/org/opensearch/cluster/IncompatibleClusterStateVersionException.java @@ -39,6 +39,8 @@ /** * Thrown by {@link Diff#apply} method + * + * @opensearch.internal */ public class IncompatibleClusterStateVersionException extends OpenSearchException { public IncompatibleClusterStateVersionException(String msg) { diff --git a/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java index 5b1c026e5259b..ac70e42149086 100644 --- a/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java @@ -83,6 +83,8 @@ * * Every time the timer runs, gathers information about the disk usage and * shard sizes across the cluster. + * + * @opensearch.internal */ public class InternalClusterInfoService implements ClusterInfoService, ClusterStateListener { diff --git a/server/src/main/java/org/opensearch/cluster/LocalClusterUpdateTask.java b/server/src/main/java/org/opensearch/cluster/LocalClusterUpdateTask.java index ffcd63b3b57c1..dfa02b60ee9dc 100644 --- a/server/src/main/java/org/opensearch/cluster/LocalClusterUpdateTask.java +++ b/server/src/main/java/org/opensearch/cluster/LocalClusterUpdateTask.java @@ -39,6 +39,8 @@ /** * Used to apply state updates on nodes that are not necessarily cluster-manager + * + * @opensearch.internal */ public abstract class LocalClusterUpdateTask implements diff --git a/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java b/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java index d4456b379237c..612141807ab14 100644 --- a/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java +++ b/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java @@ -34,6 +34,8 @@ /** * Enables listening to cluster-manager changes events of the local node (when the local node becomes the cluster-manager, and when the local * node cease being a cluster-manager). + * + * @opensearch.internal */ public interface LocalNodeMasterListener extends ClusterStateListener { diff --git a/server/src/main/java/org/opensearch/cluster/MasterNodeChangePredicate.java b/server/src/main/java/org/opensearch/cluster/MasterNodeChangePredicate.java index 8eeaedd83cb26..7ff5161c443a1 100644 --- a/server/src/main/java/org/opensearch/cluster/MasterNodeChangePredicate.java +++ b/server/src/main/java/org/opensearch/cluster/MasterNodeChangePredicate.java @@ -36,6 +36,11 @@ import java.util.function.Predicate; +/** + * Utility class to build a predicate that accepts cluster state changes + * + * @opensearch.internal + */ public final class MasterNodeChangePredicate { private MasterNodeChangePredicate() { diff --git a/server/src/main/java/org/opensearch/cluster/MergableCustomMetadata.java b/server/src/main/java/org/opensearch/cluster/MergableCustomMetadata.java index 521fbe52848f8..b9898e79e3e6f 100644 --- a/server/src/main/java/org/opensearch/cluster/MergableCustomMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/MergableCustomMetadata.java @@ -40,6 +40,8 @@ * Custom metadata can be merged using {@link #merge(Metadata.Custom)}. * * @param type of custom meta data + * + * @opensearch.internal */ public interface MergableCustomMetadata { diff --git a/server/src/main/java/org/opensearch/cluster/NamedDiff.java b/server/src/main/java/org/opensearch/cluster/NamedDiff.java index 7ab73b965188c..2b8dfa14f9ea6 100644 --- a/server/src/main/java/org/opensearch/cluster/NamedDiff.java +++ b/server/src/main/java/org/opensearch/cluster/NamedDiff.java @@ -37,6 +37,8 @@ /** * Diff that also support NamedWriteable interface + * + * @opensearch.internal */ public interface NamedDiff> extends Diff, NamedWriteable { /** diff --git a/server/src/main/java/org/opensearch/cluster/NamedDiffable.java b/server/src/main/java/org/opensearch/cluster/NamedDiffable.java index 7073053d1d840..b47e180cf583e 100644 --- a/server/src/main/java/org/opensearch/cluster/NamedDiffable.java +++ b/server/src/main/java/org/opensearch/cluster/NamedDiffable.java @@ -36,5 +36,7 @@ /** * Diff that also support {@link VersionedNamedWriteable} interface + * + * @opensearch.internal */ public interface NamedDiffable extends Diffable, VersionedNamedWriteable {} diff --git a/server/src/main/java/org/opensearch/cluster/NamedDiffableValueSerializer.java b/server/src/main/java/org/opensearch/cluster/NamedDiffableValueSerializer.java index d7bd39de42884..ecf42e413459c 100644 --- a/server/src/main/java/org/opensearch/cluster/NamedDiffableValueSerializer.java +++ b/server/src/main/java/org/opensearch/cluster/NamedDiffableValueSerializer.java @@ -39,6 +39,8 @@ /** * Value Serializer for named diffables + * + * @opensearch.internal */ public class NamedDiffableValueSerializer> extends DiffableUtils.DiffableValueSerializer { diff --git a/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java b/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java index 696df6278dbb6..c642d3652c47a 100644 --- a/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java +++ b/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java @@ -80,6 +80,8 @@ *

      * This component does not block on disconnections at all, because a disconnection might need to wait for an ongoing (background) connection * attempt to complete first. + * + * @opensearch.internal */ public class NodeConnectionsService extends AbstractLifecycleComponent { private static final Logger logger = LogManager.getLogger(NodeConnectionsService.class); diff --git a/server/src/main/java/org/opensearch/cluster/NotMasterException.java b/server/src/main/java/org/opensearch/cluster/NotMasterException.java index c8ec32ed77eb9..a855f3b665ac3 100644 --- a/server/src/main/java/org/opensearch/cluster/NotMasterException.java +++ b/server/src/main/java/org/opensearch/cluster/NotMasterException.java @@ -40,6 +40,8 @@ * Thrown when a node join request or a cluster-manager ping reaches a node which is not * currently acting as a cluster-manager or when a cluster state update task is to be executed * on a node that is no longer cluster-manager. + * + * @opensearch.internal */ public class NotMasterException extends OpenSearchException { diff --git a/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java b/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java index bae464606c0f2..0f69acf12272a 100644 --- a/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java @@ -45,6 +45,11 @@ import java.util.Collections; import java.util.List; +/** + * Information passed during repository cleanup + * + * @opensearch.internal + */ public final class RepositoryCleanupInProgress extends AbstractNamedDiffable implements ClusterState.Custom { public static final RepositoryCleanupInProgress EMPTY = new RepositoryCleanupInProgress(Collections.emptyList()); diff --git a/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java b/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java index 205084261f2a9..45d9c8b373298 100644 --- a/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java @@ -54,6 +54,8 @@ /** * Meta data about restore processes that are currently executing + * + * @opensearch.internal */ public class RestoreInProgress extends AbstractNamedDiffable implements Custom, Iterable { diff --git a/server/src/main/java/org/opensearch/cluster/SnapshotDeletionsInProgress.java b/server/src/main/java/org/opensearch/cluster/SnapshotDeletionsInProgress.java index 67ba285f45130..bf2ab3f269357 100644 --- a/server/src/main/java/org/opensearch/cluster/SnapshotDeletionsInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/SnapshotDeletionsInProgress.java @@ -57,6 +57,8 @@ /** * A class that represents the snapshot deletions that are in progress in the cluster. + * + * @opensearch.internal */ public class SnapshotDeletionsInProgress extends AbstractNamedDiffable implements Custom { diff --git a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java index d0d5aea9d036b..033c0bafc971d 100644 --- a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java @@ -71,6 +71,8 @@ /** * Meta data about snapshots that are currently executing + * + * @opensearch.internal */ public class SnapshotsInProgress extends AbstractNamedDiffable implements Custom { diff --git a/server/src/main/java/org/opensearch/cluster/TimeoutClusterStateListener.java b/server/src/main/java/org/opensearch/cluster/TimeoutClusterStateListener.java index d7465fa00a50c..eb31fa2b7e69d 100644 --- a/server/src/main/java/org/opensearch/cluster/TimeoutClusterStateListener.java +++ b/server/src/main/java/org/opensearch/cluster/TimeoutClusterStateListener.java @@ -37,7 +37,7 @@ /** * An exception to cluster state listener that allows for timeouts and for post added notifications. * - * + * @opensearch.internal */ public interface TimeoutClusterStateListener extends ClusterStateListener { diff --git a/server/src/main/java/org/opensearch/cluster/ack/AckedRequest.java b/server/src/main/java/org/opensearch/cluster/ack/AckedRequest.java index 97d628e3231c9..a3f74cb45a880 100644 --- a/server/src/main/java/org/opensearch/cluster/ack/AckedRequest.java +++ b/server/src/main/java/org/opensearch/cluster/ack/AckedRequest.java @@ -36,6 +36,8 @@ /** * Identifies a cluster state update request with acknowledgement support + * + * @opensearch.internal */ public interface AckedRequest { diff --git a/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateRequest.java b/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateRequest.java index 0931086ab3ff0..dd5769d7c7f89 100644 --- a/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateRequest.java +++ b/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateRequest.java @@ -37,6 +37,8 @@ /** * Base class to be used when needing to update the cluster state * Contains the basic fields that are always needed + * + * @opensearch.internal */ public abstract class ClusterStateUpdateRequest> implements AckedRequest { diff --git a/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateResponse.java b/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateResponse.java index 3a79e15380165..db26496c6f263 100644 --- a/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateResponse.java +++ b/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateResponse.java @@ -34,6 +34,8 @@ /** * Base response returned after a cluster state update + * + * @opensearch.internal */ public class ClusterStateUpdateResponse { diff --git a/server/src/main/java/org/opensearch/cluster/ack/CreateIndexClusterStateUpdateResponse.java b/server/src/main/java/org/opensearch/cluster/ack/CreateIndexClusterStateUpdateResponse.java index 9c9beb1cd986e..52b04b9961ff8 100644 --- a/server/src/main/java/org/opensearch/cluster/ack/CreateIndexClusterStateUpdateResponse.java +++ b/server/src/main/java/org/opensearch/cluster/ack/CreateIndexClusterStateUpdateResponse.java @@ -34,6 +34,8 @@ /** * A cluster state update response with specific fields for index creation. + * + * @opensearch.internal */ public class CreateIndexClusterStateUpdateResponse extends ClusterStateUpdateResponse { diff --git a/server/src/main/java/org/opensearch/cluster/ack/IndicesClusterStateUpdateRequest.java b/server/src/main/java/org/opensearch/cluster/ack/IndicesClusterStateUpdateRequest.java index 7410378760a3a..c235b132d8f71 100644 --- a/server/src/main/java/org/opensearch/cluster/ack/IndicesClusterStateUpdateRequest.java +++ b/server/src/main/java/org/opensearch/cluster/ack/IndicesClusterStateUpdateRequest.java @@ -35,6 +35,8 @@ /** * Base cluster state update request that allows to execute update against multiple indices + * + * @opensearch.internal */ public abstract class IndicesClusterStateUpdateRequest> extends ClusterStateUpdateRequest { diff --git a/server/src/main/java/org/opensearch/cluster/ack/OpenIndexClusterStateUpdateResponse.java b/server/src/main/java/org/opensearch/cluster/ack/OpenIndexClusterStateUpdateResponse.java index 70915ea162a8a..faf9e3e9bebbc 100644 --- a/server/src/main/java/org/opensearch/cluster/ack/OpenIndexClusterStateUpdateResponse.java +++ b/server/src/main/java/org/opensearch/cluster/ack/OpenIndexClusterStateUpdateResponse.java @@ -33,6 +33,8 @@ /** * A cluster state update response with specific fields for index opening. + * + * @opensearch.internal */ public class OpenIndexClusterStateUpdateResponse extends ClusterStateUpdateResponse { diff --git a/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java b/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java index cf1f2d3141ccd..eddf873d56ac7 100644 --- a/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java +++ b/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java @@ -58,6 +58,8 @@ /** * Called by shards in the cluster when their mapping was dynamically updated and it needs to be updated * in the cluster state meta data (and broadcast to all members). + * + * @opensearch.internal */ public class MappingUpdatedAction { diff --git a/server/src/main/java/org/opensearch/cluster/action/index/NodeMappingRefreshAction.java b/server/src/main/java/org/opensearch/cluster/action/index/NodeMappingRefreshAction.java index b40665a1bcf1b..9cce2ff042dcd 100644 --- a/server/src/main/java/org/opensearch/cluster/action/index/NodeMappingRefreshAction.java +++ b/server/src/main/java/org/opensearch/cluster/action/index/NodeMappingRefreshAction.java @@ -53,6 +53,11 @@ import java.io.IOException; +/** + * Transport action for refreshing the Node Mapping + * + * @opensearch.internal + */ public class NodeMappingRefreshAction { private static final Logger logger = LogManager.getLogger(NodeMappingRefreshAction.class); diff --git a/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java index fd6a5367146a4..47ad61913a947 100644 --- a/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java @@ -87,6 +87,11 @@ import java.util.function.Predicate; import java.util.function.Supplier; +/** + * Transport action for retrieving the shard state + * + * @opensearch.internal + */ public class ShardStateAction { private static final Logger logger = LogManager.getLogger(ShardStateAction.class); diff --git a/server/src/main/java/org/opensearch/cluster/block/ClusterBlock.java b/server/src/main/java/org/opensearch/cluster/block/ClusterBlock.java index 9da2bdeaabd8e..1314596f925d6 100644 --- a/server/src/main/java/org/opensearch/cluster/block/ClusterBlock.java +++ b/server/src/main/java/org/opensearch/cluster/block/ClusterBlock.java @@ -45,6 +45,11 @@ import java.util.Locale; import java.util.Objects; +/** + * Blocks the cluster for concurrency + * + * @opensearch.internal + */ public class ClusterBlock implements Writeable, ToXContentFragment { private final int id; diff --git a/server/src/main/java/org/opensearch/cluster/block/ClusterBlockException.java b/server/src/main/java/org/opensearch/cluster/block/ClusterBlockException.java index 8a6401b985b90..c507b76168e82 100644 --- a/server/src/main/java/org/opensearch/cluster/block/ClusterBlockException.java +++ b/server/src/main/java/org/opensearch/cluster/block/ClusterBlockException.java @@ -46,6 +46,11 @@ import static java.util.Collections.unmodifiableSet; +/** + * Internal exception on obtaining a cluster block + * + * @opensearch.internal + */ public class ClusterBlockException extends OpenSearchException { private final Set blocks; diff --git a/server/src/main/java/org/opensearch/cluster/block/ClusterBlockLevel.java b/server/src/main/java/org/opensearch/cluster/block/ClusterBlockLevel.java index fa8f7f19b752a..06181f809be22 100644 --- a/server/src/main/java/org/opensearch/cluster/block/ClusterBlockLevel.java +++ b/server/src/main/java/org/opensearch/cluster/block/ClusterBlockLevel.java @@ -34,6 +34,11 @@ import java.util.EnumSet; +/** + * What level to block the cluster + * + * @opensearch.internal + */ public enum ClusterBlockLevel { READ, WRITE, diff --git a/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java b/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java index a9bde418e962c..b889688bdd390 100644 --- a/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java +++ b/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java @@ -59,6 +59,8 @@ /** * Represents current cluster level blocks to block dirty operations done against the cluster. + * + * @opensearch.internal */ public class ClusterBlocks extends AbstractDiffable { public static final ClusterBlocks EMPTY_CLUSTER_BLOCK = new ClusterBlocks(emptySet(), ImmutableOpenMap.of()); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ApplyCommitRequest.java b/server/src/main/java/org/opensearch/cluster/coordination/ApplyCommitRequest.java index 2ace3e86b31de..3536a11f9e2e0 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ApplyCommitRequest.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ApplyCommitRequest.java @@ -41,6 +41,8 @@ * A cluster-manager node sends this request to its peers to inform them that it could commit the * cluster state with the given term and version. Peers that have accepted the given cluster * state will then consider it as committed and proceed to apply the state locally. + * + * @opensearch.internal */ public class ApplyCommitRequest extends TermVersionRequest { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java b/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java index 979b36110b6a3..cdf673c00fe56 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java @@ -69,6 +69,11 @@ import static org.opensearch.discovery.SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING; import static org.opensearch.discovery.SettingsBasedSeedHostsProvider.LEGACY_DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING; +/** + * Service for bootstrapping the OpenSearch cluster + * + * @opensearch.internal + */ public class ClusterBootstrapService { public static final Setting> INITIAL_MASTER_NODES_SETTING = Setting.listSetting( diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java b/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java index 9cfd3a6fc3697..ee175b5f6fd24 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java @@ -59,6 +59,11 @@ import static org.opensearch.cluster.coordination.ClusterBootstrapService.INITIAL_CLUSTER_MANAGER_NODES_SETTING; import static org.opensearch.monitor.StatusInfo.Status.UNHEALTHY; +/** + * Helper for cluster failure events + * + * @opensearch.internal + */ public class ClusterFormationFailureHelper { private static final Logger logger = LogManager.getLogger(ClusterFormationFailureHelper.class); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java b/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java index ef35c6f8b3249..6e932afb34ab1 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java @@ -37,6 +37,11 @@ import org.opensearch.common.Nullable; import org.opensearch.common.unit.TimeValue; +/** + * Publishes the cluster state + * + * @opensearch.internal + */ public interface ClusterStatePublisher { /** * Publish all the changes to the cluster from the cluster-manager (can be called just by the cluster-manager). The publish diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationMetadata.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationMetadata.java index 56f8d24b6a5c0..3f24f59179641 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationMetadata.java @@ -52,6 +52,11 @@ import java.util.Set; import java.util.stream.Collectors; +/** + * Metadata for cluster coordination + * + * @opensearch.internal + */ public class CoordinationMetadata implements Writeable, ToXContentFragment { public static final CoordinationMetadata EMPTY_METADATA = builder().build(); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java index 9713c841caaf7..eff55e6d88193 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java @@ -53,6 +53,8 @@ /** * The core class of the cluster state coordination algorithm, directly implementing the * formal model + * + * @opensearch.internal */ public class CoordinationState { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationStateRejectedException.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationStateRejectedException.java index 078424a079f02..ece8f3e6d7345 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationStateRejectedException.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationStateRejectedException.java @@ -43,6 +43,8 @@ * Occurrences of this exception don't always signal failures, but can often be just caused by the * asynchronous, distributed nature of the system. They will, for example, naturally happen during * leader election, if multiple nodes are trying to become leader at the same time. + * + * @opensearch.internal */ public class CoordinationStateRejectedException extends OpenSearchException { public CoordinationStateRejectedException(String msg, Object... args) { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index ef578300cdbe2..52052b3c1adde 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -110,6 +110,11 @@ import static org.opensearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; import static org.opensearch.monitor.StatusInfo.Status.UNHEALTHY; +/** + * The main lifecycle resource coordinator + * + * @opensearch.internal + */ public class Coordinator extends AbstractLifecycleComponent implements Discovery { public static final long ZEN1_BWC_TERM = 0; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/DetachClusterCommand.java b/server/src/main/java/org/opensearch/cluster/coordination/DetachClusterCommand.java index afc8a68a74e9c..d17ef31f1b818 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/DetachClusterCommand.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/DetachClusterCommand.java @@ -41,6 +41,11 @@ import java.io.IOException; import java.nio.file.Path; +/** + * Command to detach a node from the cluster + * + * @opensearch.internal + */ public class DetachClusterCommand extends OpenSearchNodeCommand { static final String NODE_DETACHED_MSG = "Node was successfully detached from the cluster"; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ElectionSchedulerFactory.java b/server/src/main/java/org/opensearch/cluster/coordination/ElectionSchedulerFactory.java index f0ec9681d76cc..dfaed3a598acc 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ElectionSchedulerFactory.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ElectionSchedulerFactory.java @@ -56,6 +56,8 @@ * randomly at reasonably high frequency and backing off (linearly) until one of them succeeds. We also place an upper bound on the backoff * so that if elections are failing due to a network partition that lasts for a long time then when the partition heals there is an election * attempt reasonably quickly. + * + * @opensearch.internal */ public class ElectionSchedulerFactory { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ElectionStrategy.java b/server/src/main/java/org/opensearch/cluster/coordination/ElectionStrategy.java index f3f7fd0c1b072..c6ea4649faaf0 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ElectionStrategy.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ElectionStrategy.java @@ -38,6 +38,8 @@ /** * Allows plugging in a custom election strategy, restricting the notion of an election quorum. * Custom additional quorum restrictions can be defined by implementing the {@link #satisfiesAdditionalQuorumConstraints} method. + * + * @opensearch.internal */ public abstract class ElectionStrategy { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/FailedToCommitClusterStateException.java b/server/src/main/java/org/opensearch/cluster/coordination/FailedToCommitClusterStateException.java index 59cd94aecdd57..db431a1a018c8 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/FailedToCommitClusterStateException.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/FailedToCommitClusterStateException.java @@ -38,6 +38,8 @@ /** * Thrown when failing to publish a cluster state. See {@link ClusterStatePublisher} for more details. + * + * @opensearch.internal */ public class FailedToCommitClusterStateException extends OpenSearchException { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java b/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java index c5fcee712b683..24eac6ac06a8c 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java @@ -76,6 +76,8 @@ * follower has failed the leader will remove it from the cluster. We are fairly lenient, possibly allowing multiple checks to fail before * considering a follower to be faulty, to allow for a brief network partition or a long GC cycle to occur without triggering the removal of * a node and the consequent shard reallocation. + * + * @opensearch.internal */ public class FollowersChecker { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/InMemoryPersistedState.java b/server/src/main/java/org/opensearch/cluster/coordination/InMemoryPersistedState.java index d9c10f7fe0af9..67ef82ee7b2e9 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/InMemoryPersistedState.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/InMemoryPersistedState.java @@ -33,6 +33,11 @@ import org.opensearch.cluster.ClusterState; +/** + * Persist state in memory + * + * @opensearch.internal + */ public class InMemoryPersistedState implements CoordinationState.PersistedState { private long currentTerm; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Join.java b/server/src/main/java/org/opensearch/cluster/coordination/Join.java index 50225c70620c5..d039ccc8a1127 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Join.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Join.java @@ -46,6 +46,8 @@ * information about the current state of the node that provided the vote, so that * the receiver of the vote can determine if it has a more up-to-date state than the * source node. + * + * @opensearch.internal */ public class Join implements Writeable { private final DiscoveryNode sourceNode; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java index 693a997d318cd..54894c4e28196 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java @@ -84,6 +84,11 @@ import static org.opensearch.monitor.StatusInfo.Status.UNHEALTHY; +/** + * Helper utility class for joining the cluster + * + * @opensearch.internal + */ public class JoinHelper { private static final Logger logger = LogManager.getLogger(JoinHelper.class); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinRequest.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinRequest.java index 84adf834d85e8..d5a5228f485f3 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinRequest.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinRequest.java @@ -41,6 +41,11 @@ import java.util.Objects; import java.util.Optional; +/** + * Request for a node to join the cluster + * + * @opensearch.internal + */ public class JoinRequest extends TransportRequest { /** diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index f0edeeb9319c5..aaaa73e891073 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -62,6 +62,11 @@ import static org.opensearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; +/** + * Main executor for Nodes joining the OpenSearch cluster + * + * @opensearch.internal + */ public class JoinTaskExecutor implements ClusterStateTaskExecutor { private final AllocationService allocationService; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/LagDetector.java b/server/src/main/java/org/opensearch/cluster/coordination/LagDetector.java index 70a1c4f3ec220..3aef8d11bcca5 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/LagDetector.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/LagDetector.java @@ -56,6 +56,8 @@ * A publication can succeed and complete before all nodes have applied the published state and acknowledged it; however we need every node * eventually either to apply the published state (or a later state) or be removed from the cluster. This component achieves this by * removing any lagging nodes from the cluster after a timeout. + * + * @opensearch.internal */ public class LagDetector { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java b/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java index fcf54aff7f478..7cb306bfd89f6 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java @@ -75,6 +75,8 @@ * fairly lenient, possibly allowing multiple checks to fail before considering the leader to be faulty, to allow for the leader to * temporarily stand down on occasion, e.g. if it needs to move to a higher term. On deciding that the leader has failed a follower will * become a candidate and attempt to become a leader itself. + * + * @opensearch.internal */ public class LeaderChecker { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/NoMasterBlockService.java b/server/src/main/java/org/opensearch/cluster/coordination/NoMasterBlockService.java index f020ae4081f06..533a2337bd76e 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/NoMasterBlockService.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/NoMasterBlockService.java @@ -41,6 +41,11 @@ import java.util.EnumSet; +/** + * Service to block the master node + * + * @opensearch.internal + */ public class NoMasterBlockService { public static final int NO_MASTER_BLOCK_ID = 2; public static final ClusterBlock NO_MASTER_BLOCK_WRITES = new ClusterBlock( diff --git a/server/src/main/java/org/opensearch/cluster/coordination/NodeHealthCheckFailureException.java b/server/src/main/java/org/opensearch/cluster/coordination/NodeHealthCheckFailureException.java index 273a220d1eadb..d0919aa9e8686 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/NodeHealthCheckFailureException.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/NodeHealthCheckFailureException.java @@ -40,6 +40,8 @@ /** * This exception is thrown if the File system is reported unhealthy by @{@link org.opensearch.monitor.fs.FsHealthService} * and this nodes needs to be removed from the cluster + * + * @opensearch.internal */ public class NodeHealthCheckFailureException extends OpenSearchException { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java index e8ab2f8d53d3f..3625366fa4de1 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java @@ -43,6 +43,11 @@ import java.util.List; +/** + * Update cluster state when node is removed from the cluster + * + * @opensearch.internal + */ public class NodeRemovalClusterStateTaskExecutor implements ClusterStateTaskExecutor, diff --git a/server/src/main/java/org/opensearch/cluster/coordination/NodeToolCli.java b/server/src/main/java/org/opensearch/cluster/coordination/NodeToolCli.java index 72a3b3969e2e2..3db2171b4cca0 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/NodeToolCli.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/NodeToolCli.java @@ -44,6 +44,12 @@ // Even if we avoid making a static reference to Logger class, there is no nice way to avoid declaring // UNSAFE_BOOTSTRAP, which depends on ClusterService, which in turn has static Logger. // TODO execute CommandLoggingConfigurator.configureLoggingWithoutConfig() in the constructor of commands, not in beforeMain + +/** + * Command Line Interface tool for Nodes + * + * @opensearch.internal + */ public class NodeToolCli extends MultiCommand { public NodeToolCli() { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/OpenSearchNodeCommand.java b/server/src/main/java/org/opensearch/cluster/coordination/OpenSearchNodeCommand.java index bc7bd3abbca86..9110f1789521e 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/OpenSearchNodeCommand.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/OpenSearchNodeCommand.java @@ -70,6 +70,11 @@ import java.util.Map; import java.util.Objects; +/** + * Main set of node commands + * + * @opensearch.internal + */ public abstract class OpenSearchNodeCommand extends EnvironmentAwareCommand { private static final Logger logger = LogManager.getLogger(OpenSearchNodeCommand.class); protected static final String DELIMITER = "------------------------------------------------------------------------\n"; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java b/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java index e667052ca5fdd..8e2e6fde3a485 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java @@ -42,6 +42,11 @@ import java.util.Objects; import java.util.Optional; +/** + * Response from peer nodes + * + * @opensearch.internal + */ public class PeersResponse extends TransportResponse { private final Optional clusterManagerNode; private final List knownPeers; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PendingClusterStateStats.java b/server/src/main/java/org/opensearch/cluster/coordination/PendingClusterStateStats.java index 92dd926b2fa0a..d6f767d3e6235 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PendingClusterStateStats.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PendingClusterStateStats.java @@ -42,6 +42,8 @@ /** * Class encapsulating stats about the PendingClusterStatsQueue + * + * @opensearch.internal */ public class PendingClusterStateStats implements Writeable, ToXContentFragment { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PreVoteCollector.java b/server/src/main/java/org/opensearch/cluster/coordination/PreVoteCollector.java index 1b934b5f000cd..c635dee173792 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PreVoteCollector.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PreVoteCollector.java @@ -58,6 +58,11 @@ import static org.opensearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; import static org.opensearch.monitor.StatusInfo.Status.UNHEALTHY; +/** + * Collects information prior to a promotion vote + * + * @opensearch.internal + */ public class PreVoteCollector { private static final Logger logger = LogManager.getLogger(PreVoteCollector.class); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PreVoteRequest.java b/server/src/main/java/org/opensearch/cluster/coordination/PreVoteRequest.java index e99b2716a263f..c133a51239335 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PreVoteRequest.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PreVoteRequest.java @@ -40,6 +40,11 @@ import java.io.IOException; import java.util.Objects; +/** + * Requests pre vote information collection + * + * @opensearch.internal + */ public class PreVoteRequest extends TransportRequest { private final DiscoveryNode sourceNode; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PreVoteResponse.java b/server/src/main/java/org/opensearch/cluster/coordination/PreVoteResponse.java index 09259867ebf53..f18726c8af229 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PreVoteResponse.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PreVoteResponse.java @@ -39,6 +39,11 @@ import java.io.IOException; import java.util.Objects; +/** + * Response for a PreVoteRequest + * + * @opensearch.internal + */ public class PreVoteResponse extends TransportResponse { private final long currentTerm; private final long lastAcceptedTerm; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Publication.java b/server/src/main/java/org/opensearch/cluster/coordination/Publication.java index 5529c3248345c..3580de423ac95 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Publication.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Publication.java @@ -52,6 +52,11 @@ import java.util.function.LongSupplier; import java.util.stream.Collectors; +/** + * Publication task + * + * @opensearch.internal + */ public abstract class Publication { protected final Logger logger = LogManager.getLogger(getClass()); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java index 9a1a392348660..7591a09b07740 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java @@ -72,6 +72,11 @@ import java.util.function.Consumer; import java.util.function.Function; +/** + * Transport handler for publication + * + * @opensearch.internal + */ public class PublicationTransportHandler { private static final Logger logger = LogManager.getLogger(PublicationTransportHandler.class); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublishClusterStateStats.java b/server/src/main/java/org/opensearch/cluster/coordination/PublishClusterStateStats.java index b4adad898271e..1adb43ca68ce4 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublishClusterStateStats.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublishClusterStateStats.java @@ -42,6 +42,8 @@ /** * Class encapsulating stats about the PublishClusterStateAction + * + * @opensearch.internal */ public class PublishClusterStateStats implements Writeable, ToXContentObject { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublishRequest.java b/server/src/main/java/org/opensearch/cluster/coordination/PublishRequest.java index 86ae9ce8bc081..e7c3e2d2c965b 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublishRequest.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublishRequest.java @@ -38,6 +38,8 @@ /** * Request which is used by the cluster-manager node to publish cluster state changes. * Actual serialization of this request is done by {@link PublicationTransportHandler} + * + * @opensearch.internal */ public class PublishRequest { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublishResponse.java b/server/src/main/java/org/opensearch/cluster/coordination/PublishResponse.java index 9d219331c0d0e..5d809617019ac 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublishResponse.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublishResponse.java @@ -40,6 +40,8 @@ /** * Response to a {@link PublishRequest}, carrying the term and version of the request. * Typically wrapped in a {@link PublishWithJoinResponse}. + * + * @opensearch.internal */ public class PublishResponse implements Writeable { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublishWithJoinResponse.java b/server/src/main/java/org/opensearch/cluster/coordination/PublishWithJoinResponse.java index 430e539fd3255..f02f917e67800 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublishWithJoinResponse.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublishWithJoinResponse.java @@ -41,6 +41,8 @@ /** * Response to a {@link PublishRequest}. Encapsulates both a {@link PublishResponse} * and an optional {@link Join}. + * + * @opensearch.internal */ public class PublishWithJoinResponse extends TransportResponse { private final PublishResponse publishResponse; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java b/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java index 1c26dff45775f..931f8ff228d9c 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java @@ -47,6 +47,8 @@ /** * Computes the optimal configuration of voting nodes in the cluster. + * + * @opensearch.internal */ public class Reconfigurator { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/RemoveCustomsCommand.java b/server/src/main/java/org/opensearch/cluster/coordination/RemoveCustomsCommand.java index 83f3298862776..5e7614d86cecd 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/RemoveCustomsCommand.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/RemoveCustomsCommand.java @@ -48,6 +48,11 @@ import java.nio.file.Path; import java.util.List; +/** + * Removes custom metadata + * + * @opensearch.internal + */ public class RemoveCustomsCommand extends OpenSearchNodeCommand { static final String CUSTOMS_REMOVED_MSG = "Customs were successfully removed from the cluster state"; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/RemoveSettingsCommand.java b/server/src/main/java/org/opensearch/cluster/coordination/RemoveSettingsCommand.java index 298dfde7858cd..da7adec6c4f11 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/RemoveSettingsCommand.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/RemoveSettingsCommand.java @@ -48,6 +48,11 @@ import java.nio.file.Path; import java.util.List; +/** + * Removes custom settings + * + * @opensearch.internal + */ public class RemoveSettingsCommand extends OpenSearchNodeCommand { static final String SETTINGS_REMOVED_MSG = "Settings were successfully removed from the cluster state"; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/StartJoinRequest.java b/server/src/main/java/org/opensearch/cluster/coordination/StartJoinRequest.java index efdbd37a9c0b6..09ebcd7fd074e 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/StartJoinRequest.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/StartJoinRequest.java @@ -41,6 +41,8 @@ /** * Represents the action of requesting a join vote (see {@link Join}) from a node. * The source node represents the node that is asking for join votes. + * + * @opensearch.internal */ public class StartJoinRequest extends TransportRequest { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/TermVersionRequest.java b/server/src/main/java/org/opensearch/cluster/coordination/TermVersionRequest.java index cea8c43302a23..0b210ef21f45b 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/TermVersionRequest.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/TermVersionRequest.java @@ -39,6 +39,11 @@ import java.io.IOException; +/** + * Get's the version of a term + * + * @opensearch.internal + */ abstract class TermVersionRequest extends TransportRequest implements Writeable { protected final DiscoveryNode sourceNode; protected final long term; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java b/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java index 6b31c39d71eb3..1f17844adf4fe 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java @@ -54,6 +54,11 @@ import java.util.Locale; import java.util.Objects; +/** + * Tool to run an unsafe bootstrap + * + * @opensearch.internal + */ public class UnsafeBootstrapMasterCommand extends OpenSearchNodeCommand { static final String CLUSTER_STATE_TERM_VERSION_MSG_FORMAT = "Current node cluster state (term, version) pair is (%s, %s)"; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ValidateJoinRequest.java b/server/src/main/java/org/opensearch/cluster/coordination/ValidateJoinRequest.java index afdd124bddef2..3f8c00da9a76c 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ValidateJoinRequest.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ValidateJoinRequest.java @@ -38,6 +38,11 @@ import java.io.IOException; +/** + * Transport request to validate node join + * + * @opensearch.internal + */ public class ValidateJoinRequest extends TransportRequest { private ClusterState state; diff --git a/server/src/main/java/org/opensearch/cluster/health/ClusterHealthStatus.java b/server/src/main/java/org/opensearch/cluster/health/ClusterHealthStatus.java index 07a66a3731ff6..4daf0cb6adbb2 100644 --- a/server/src/main/java/org/opensearch/cluster/health/ClusterHealthStatus.java +++ b/server/src/main/java/org/opensearch/cluster/health/ClusterHealthStatus.java @@ -38,6 +38,11 @@ import java.io.IOException; +/** + * Cluster health status + * + * @opensearch.internal + */ public enum ClusterHealthStatus implements Writeable { GREEN((byte) 0), YELLOW((byte) 1), diff --git a/server/src/main/java/org/opensearch/cluster/health/ClusterIndexHealth.java b/server/src/main/java/org/opensearch/cluster/health/ClusterIndexHealth.java index ffe54e42355f8..b2234d23be91c 100644 --- a/server/src/main/java/org/opensearch/cluster/health/ClusterIndexHealth.java +++ b/server/src/main/java/org/opensearch/cluster/health/ClusterIndexHealth.java @@ -58,6 +58,11 @@ import static org.opensearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; import static org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +/** + * Cluster Index Health Information + * + * @opensearch.internal + */ public final class ClusterIndexHealth implements Iterable, Writeable, ToXContentFragment { private static final String STATUS = "status"; private static final String NUMBER_OF_SHARDS = "number_of_shards"; diff --git a/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java b/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java index fc6c6eb4f9bdc..ba6035abbb42f 100644 --- a/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java +++ b/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java @@ -54,6 +54,11 @@ import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg; import static org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +/** + * Cluster shard health information + * + * @opensearch.internal + */ public final class ClusterShardHealth implements Writeable, ToXContentFragment { private static final String STATUS = "status"; private static final String ACTIVE_SHARDS = "active_shards"; diff --git a/server/src/main/java/org/opensearch/cluster/health/ClusterStateHealth.java b/server/src/main/java/org/opensearch/cluster/health/ClusterStateHealth.java index c6b214d31707e..4c8be0f2d73f0 100644 --- a/server/src/main/java/org/opensearch/cluster/health/ClusterStateHealth.java +++ b/server/src/main/java/org/opensearch/cluster/health/ClusterStateHealth.java @@ -49,6 +49,11 @@ import java.util.Map; import java.util.Objects; +/** + * Cluster state health information + * + * @opensearch.internal + */ public final class ClusterStateHealth implements Iterable, Writeable { private final int numberOfNodes; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/AliasAction.java b/server/src/main/java/org/opensearch/cluster/metadata/AliasAction.java index eede77917a2fd..be69090e0c33c 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/AliasAction.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/AliasAction.java @@ -38,6 +38,8 @@ /** * Individual operation to perform on the cluster state as part of an {@link IndicesAliasesRequest}. + * + * @opensearch.internal */ public abstract class AliasAction { private final String index; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/AliasMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/AliasMetadata.java index e6deb22477bae..375317b32b293 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/AliasMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/AliasMetadata.java @@ -58,6 +58,11 @@ import static java.util.Collections.emptySet; +/** + * Metadata for index aliases + * + * @opensearch.internal + */ public class AliasMetadata extends AbstractDiffable implements ToXContentFragment { private final String alias; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/AliasValidator.java b/server/src/main/java/org/opensearch/cluster/metadata/AliasValidator.java index c024358c5d1d3..769342dceddc5 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/AliasValidator.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/AliasValidator.java @@ -55,6 +55,8 @@ /** * Validator for an alias, to be used before adding an alias to the index metadata * and make sure the alias is valid + * + * @opensearch.internal */ public class AliasValidator { /** diff --git a/server/src/main/java/org/opensearch/cluster/metadata/AutoExpandReplicas.java b/server/src/main/java/org/opensearch/cluster/metadata/AutoExpandReplicas.java index a7538d0a5b4f8..108c05eb78b79 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/AutoExpandReplicas.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/AutoExpandReplicas.java @@ -52,6 +52,8 @@ * This class acts as a functional wrapper around the {@code index.auto_expand_replicas} setting. * This setting or rather it's value is expanded into a min and max value which requires special handling * based on the number of datanodes in the cluster. This class handles all the parsing and streamlines the access to these values. + * + * @opensearch.internal */ public final class AutoExpandReplicas { // the value we recognize in the "max" position to mean all the nodes diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ClusterNameExpressionResolver.java b/server/src/main/java/org/opensearch/cluster/metadata/ClusterNameExpressionResolver.java index 72f6f142ed290..afa0ec64b72b5 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/ClusterNameExpressionResolver.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/ClusterNameExpressionResolver.java @@ -43,6 +43,8 @@ /** * Resolves cluster names from an expression. The expression must be the exact match of a cluster * name or must be a wildcard expression. + * + * @opensearch.internal */ public final class ClusterNameExpressionResolver { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplate.java b/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplate.java index c3ba1a2f7a45b..dccd01eb7b95f 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplate.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplate.java @@ -53,6 +53,8 @@ * component template is expected to be valid on its own. For example, if a component template * contains a field "foo", it's expected to contain all the necessary settings/mappings/etc for the * "foo" field. These component templates make up the individual pieces composing an index template. + * + * @opensearch.internal */ public class ComponentTemplate extends AbstractDiffable implements ToXContentObject { private static final ParseField TEMPLATE = new ParseField("template"); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplateMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplateMetadata.java index 0d2496b5812be..4850e212b832f 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplateMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplateMetadata.java @@ -54,6 +54,8 @@ /** * {@link ComponentTemplateMetadata} is a custom {@link Metadata} implementation for storing a map * of component templates and their names. + * + * @opensearch.internal */ public class ComponentTemplateMetadata implements Metadata.Custom { public static final String TYPE = "component_template"; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java index b9d71adc89c1c..5b16733e12fbb 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java @@ -64,6 +64,8 @@ * An index template is comprised of a set of index patterns, an optional template, and a list of * ids corresponding to component templates that should be composed in order when creating a new * index. + * + * @opensearch.internal */ public class ComposableIndexTemplate extends AbstractDiffable implements ToXContentObject { private static final ParseField INDEX_PATTERNS = new ParseField("index_patterns"); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplateMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplateMetadata.java index 46712a3b529b2..a0e228f5d3ea5 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplateMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplateMetadata.java @@ -54,6 +54,8 @@ /** * The {@link ComposableIndexTemplateMetadata} class is a custom {@link Metadata.Custom} implementation that * stores a map of ids to {@link ComposableIndexTemplate} templates. + * + * @opensearch.internal */ public class ComposableIndexTemplateMetadata implements Metadata.Custom { public static final String TYPE = "index_template"; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/DataStream.java b/server/src/main/java/org/opensearch/cluster/metadata/DataStream.java index eaf1b35ca2ce3..dffcb11619be4 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/DataStream.java @@ -51,6 +51,11 @@ import java.util.Map; import java.util.Objects; +/** + * Primary DataStream class + * + * @opensearch.internal + */ public final class DataStream extends AbstractDiffable implements ToXContentObject { public static final String BACKING_INDEX_PREFIX = ".ds-"; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/DataStreamMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/DataStreamMetadata.java index d94b6f0c8cffc..6045f745e1584 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/DataStreamMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/DataStreamMetadata.java @@ -53,6 +53,8 @@ /** * Custom {@link Metadata} implementation for storing a map of {@link DataStream}s and their names. + * + * @opensearch.internal */ public class DataStreamMetadata implements Metadata.Custom { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java b/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java index 2c35fbf4ae67e..542af9c54283a 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java @@ -49,6 +49,8 @@ /** * This is a {@code Map} that implements AbstractDiffable so it * can be used for cluster state purposes + * + * @opensearch.internal */ public class DiffableStringMap extends AbstractMap implements Diffable { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstraction.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstraction.java index 653755664cbc0..77c585ca875aa 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstraction.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstraction.java @@ -52,6 +52,8 @@ * An index abstraction is a reference to one or more concrete indices. * An index abstraction has a unique name and encapsulates all the {@link IndexMetadata} instances it is pointing to. * Also depending on type it may refer to a single or many concrete indices and may or may not have a write index. + * + * @opensearch.internal */ public interface IndexAbstraction { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstractionResolver.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstractionResolver.java index 4045ea938e373..a83c778a4b83a 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstractionResolver.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstractionResolver.java @@ -43,6 +43,11 @@ import java.util.List; import java.util.Set; +/** + * Utility class to resolve index abstractions + * + * @opensearch.internal + */ public class IndexAbstractionResolver { private final IndexNameExpressionResolver indexNameExpressionResolver; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java index c0b207158c9bc..de9d616d79b0b 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java @@ -67,6 +67,8 @@ * nodes and a node could be removed from the cluster for a period of time, the * tombstones remain in the cluster state for a fixed period of time, after which * they are purged. + * + * @opensearch.internal */ public final class IndexGraveyard implements Metadata.Custom { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 9139cbac2b0be..0b3b2116f6cee 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -96,6 +96,11 @@ import static org.opensearch.common.settings.Settings.readSettingsFromStream; import static org.opensearch.common.settings.Settings.writeSettingsToStream; +/** + * Index metadata information + * + * @opensearch.internal + */ public class IndexMetadata implements Diffable, ToXContentFragment { public static final ClusterBlock INDEX_READ_ONLY_BLOCK = new ClusterBlock( diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java index bd8535d18c9cb..4a6df1bc0a53c 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java @@ -76,6 +76,11 @@ import java.util.stream.Stream; import java.util.stream.StreamSupport; +/** + * Resolves index name from an expression + * + * @opensearch.internal + */ public class IndexNameExpressionResolver { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(IndexNameExpressionResolver.class); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java index 810365589ae1f..367a15560200f 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java @@ -62,6 +62,11 @@ import java.util.Objects; import java.util.Set; +/** + * Metadata for Index Templates + * + * @opensearch.internal + */ public class IndexTemplateMetadata extends AbstractDiffable { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(IndexTemplateMetadata.class); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Manifest.java b/server/src/main/java/org/opensearch/cluster/metadata/Manifest.java index 8205886cf9df6..b14e970360dce 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Manifest.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Manifest.java @@ -55,6 +55,8 @@ * When new version of metadata is written it's assigned some generation long value. * Global metadata generation could be obtained by calling {@link #getGlobalGeneration()}. * Index metadata generation could be obtained by calling {@link #getIndexGenerations()}. + * + * @opensearch.internal */ public class Manifest implements ToXContentFragment { // TODO revisit missing and unknown constants once Zen2 BWC is ready diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java index 620542f8f1bde..35ee222541771 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java @@ -56,6 +56,8 @@ /** * Mapping configuration for a type. + * + * @opensearch.internal */ public class MappingMetadata extends AbstractDiffable { public static final MappingMetadata EMPTY_MAPPINGS = new MappingMetadata(MapperService.SINGLE_MAPPING_NAME, Collections.emptyMap()); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index 7cf3700402b6b..bec72b696acdd 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -99,6 +99,11 @@ import static org.opensearch.common.settings.Settings.readSettingsFromStream; import static org.opensearch.common.settings.Settings.writeSettingsToStream; +/** + * Metadata information + * + * @opensearch.internal + */ public class Metadata implements Iterable, Diffable, ToXContentFragment { private static final Logger logger = LogManager.getLogger(Metadata.class); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java index 13e470eed617d..66fe3c5ce61dd 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java @@ -62,6 +62,11 @@ import java.util.Map; import java.util.concurrent.atomic.AtomicReference; +/** + * Creates a data stream of metadata + * + * @opensearch.internal + */ public class MetadataCreateDataStreamService { private static final Logger logger = LogManager.getLogger(MetadataCreateDataStreamService.class); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 7f2be879f3637..642b0f7b8d36f 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -123,6 +123,8 @@ /** * Service responsible for submitting create index requests + * + * @opensearch.internal */ public class MetadataCreateIndexService { private static final Logger logger = LogManager.getLogger(MetadataCreateIndexService.class); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataDeleteIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataDeleteIndexService.java index 4805cb215fc09..66f5edf3da129 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataDeleteIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataDeleteIndexService.java @@ -62,6 +62,8 @@ /** * Deletes indices. + * + * @opensearch.internal */ public class MetadataDeleteIndexService { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexAliasesService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexAliasesService.java index a490bae65ca00..8d6939a57240c 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexAliasesService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexAliasesService.java @@ -64,6 +64,8 @@ /** * Service responsible for submitting add and remove aliases requests + * + * @opensearch.internal */ public class MetadataIndexAliasesService { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java index 25071a60bdfb5..d8715d07a31a3 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java @@ -112,6 +112,8 @@ /** * Service responsible for submitting open/close index requests as well as for adding index blocks + * + * @opensearch.internal */ public class MetadataIndexStateService { private static final Logger logger = LogManager.getLogger(MetadataIndexStateService.class); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java index 896679206aec5..c14170e358f4c 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java @@ -97,6 +97,8 @@ /** * Service responsible for submitting index templates updates + * + * @opensearch.internal */ public class MetadataIndexTemplateService { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexUpgradeService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexUpgradeService.java index eda4833a36c96..85ef4c36285d2 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexUpgradeService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexUpgradeService.java @@ -64,6 +64,8 @@ * to upgrade the existing index metadata to the latest version of the cluster. It typically * occurs during cluster upgrade, when dangling indices are imported into the cluster or indices * are restored from a repository. + * + * @opensearch.internal */ public class MetadataIndexUpgradeService { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataMappingService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataMappingService.java index 3795961d39143..7f67c45fc80e5 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataMappingService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataMappingService.java @@ -69,6 +69,8 @@ /** * Service responsible for submitting mapping changes + * + * @opensearch.internal */ public class MetadataMappingService { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java index 1b7aee48e3232..1390860271577 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -74,6 +74,8 @@ /** * Service responsible for submitting update index settings requests + * + * @opensearch.internal */ public class MetadataUpdateSettingsService { private static final Logger logger = LogManager.getLogger(MetadataUpdateSettingsService.class); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ProcessClusterEventTimeoutException.java b/server/src/main/java/org/opensearch/cluster/metadata/ProcessClusterEventTimeoutException.java index 079c4c680e42d..cda032aa4628f 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/ProcessClusterEventTimeoutException.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/ProcessClusterEventTimeoutException.java @@ -39,6 +39,11 @@ import java.io.IOException; +/** + * Exception thrown when there is a timeout processing cluster events + * + * @opensearch.internal + */ public class ProcessClusterEventTimeoutException extends OpenSearchException { public ProcessClusterEventTimeoutException(TimeValue timeValue, String source) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java index 3076ede7ced14..2dc3015376a72 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java @@ -55,6 +55,8 @@ /** * Contains metadata about registered snapshot repositories + * + * @opensearch.internal */ public class RepositoriesMetadata extends AbstractNamedDiffable implements Custom { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/RepositoryMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/RepositoryMetadata.java index d839e4bafade7..5cfdfe075c74f 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/RepositoryMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/RepositoryMetadata.java @@ -44,6 +44,8 @@ /** * Metadata about registered repository + * + * @opensearch.internal */ public class RepositoryMetadata implements Writeable { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/SystemIndexMetadataUpgradeService.java b/server/src/main/java/org/opensearch/cluster/metadata/SystemIndexMetadataUpgradeService.java index f07b74575950c..85568d69639cb 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/SystemIndexMetadataUpgradeService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/SystemIndexMetadataUpgradeService.java @@ -48,6 +48,8 @@ /** * A service responsible for updating the metadata used by system indices. + * + * @opensearch.internal */ public class SystemIndexMetadataUpgradeService implements ClusterStateListener { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Template.java b/server/src/main/java/org/opensearch/cluster/metadata/Template.java index 0a070ced198c6..0b134cad7b8b2 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Template.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Template.java @@ -58,6 +58,8 @@ * A template consists of optional settings, mappings, or alias configuration for an index, however, * it is entirely independent from an index. It's a building block forming part of a regular index * template and a {@link ComponentTemplate}. + * + * @opensearch.internal */ public class Template extends AbstractDiffable