diff --git a/docs/changelog/117939.yaml b/docs/changelog/117939.yaml deleted file mode 100644 index d41111f099f9..000000000000 --- a/docs/changelog/117939.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 117939 -summary: Adding default endpoint for Elastic Rerank -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/reference/connector/docs/connectors-box.asciidoc b/docs/reference/connector/docs/connectors-box.asciidoc index 07e4308d67c2..3e95f15d16cc 100644 --- a/docs/reference/connector/docs/connectors-box.asciidoc +++ b/docs/reference/connector/docs/connectors-box.asciidoc @@ -54,7 +54,7 @@ For additional operations, see <>. ====== Box Free Account [discrete#es-connectors-box-create-oauth-custom-app] -======= Create Box User Authentication (OAuth 2.0) Custom App +*Create Box User Authentication (OAuth 2.0) Custom App* You'll need to create an OAuth app in the Box developer console by following these steps: @@ -64,7 +64,7 @@ You'll need to create an OAuth app in the Box developer console by following the 4. Once the app is created, *Client ID* and *Client secret* values are available in the configuration tab. Keep these handy. [discrete#es-connectors-box-connector-generate-a-refresh-token] -======= Generate a refresh Token +*Generate a refresh Token* To generate a refresh token, follow these steps: @@ -97,7 +97,7 @@ Save the refresh token from the response. You'll need this for the connector con ====== Box Enterprise Account [discrete#es-connectors-box-connector-create-box-server-authentication-client-credentials-grant-custom-app] -======= Create Box Server Authentication (Client Credentials Grant) Custom App +*Create Box Server Authentication (Client Credentials Grant) Custom App* 1. Register a new app in the https://app.box.com/developers/console[Box dev console] with custom App and select Server Authentication (Client Credentials Grant). 2. Check following permissions: @@ -224,7 +224,7 @@ For additional operations, see <>. ====== Box Free Account [discrete#es-connectors-box-client-create-oauth-custom-app] -======= Create Box User Authentication (OAuth 2.0) Custom App +*Create Box User Authentication (OAuth 2.0) Custom App* You'll need to create an OAuth app in the Box developer console by following these steps: @@ -234,7 +234,7 @@ You'll need to create an OAuth app in the Box developer console by following the 4. Once the app is created, *Client ID* and *Client secret* values are available in the configuration tab. Keep these handy. [discrete#es-connectors-box-client-connector-generate-a-refresh-token] -======= Generate a refresh Token +*Generate a refresh Token* To generate a refresh token, follow these steps: @@ -267,7 +267,7 @@ Save the refresh token from the response. You'll need this for the connector con ====== Box Enterprise Account [discrete#es-connectors-box-client-connector-create-box-server-authentication-client-credentials-grant-custom-app] -======= Create Box Server Authentication (Client Credentials Grant) Custom App +*Create Box Server Authentication (Client Credentials Grant) Custom App* 1. Register a new app in the https://app.box.com/developers/console[Box dev console] with custom App and select Server Authentication (Client Credentials Grant). 2. Check following permissions: diff --git a/docs/reference/connector/docs/connectors-content-extraction.asciidoc b/docs/reference/connector/docs/connectors-content-extraction.asciidoc index 5d2a9550a7c3..a87d38c9bf53 100644 --- a/docs/reference/connector/docs/connectors-content-extraction.asciidoc +++ b/docs/reference/connector/docs/connectors-content-extraction.asciidoc @@ -183,7 +183,7 @@ Be aware that the self-managed connector will download files with randomized fil For that reason, we recommend using a dedicated directory for self-hosted extraction. [discrete#es-connectors-content-extraction-data-extraction-service-file-pointers-configuration-example] -======= Example +*Example* 1. For this example, we will be using `/app/files` as both our local directory and our container directory. When you run the extraction service docker container, you can mount the directory as a volume using the command-line option `-v /app/files:/app/files`. @@ -228,7 +228,7 @@ When using self-hosted extraction from a dockerized self-managed connector, ther * The self-managed connector and the extraction service will also need to share a volume. You can decide what directory inside these docker containers the volume will be mounted onto, but the directory must be the same for both docker containers. [discrete#es-connectors-content-extraction-data-extraction-service-file-pointers-configuration-dockerized-example] -======= Example +*Example* 1. First, set up a volume for the two docker containers to share. This will be where files are downloaded into and then extracted from. diff --git a/docs/reference/connector/docs/connectors-dropbox.asciidoc b/docs/reference/connector/docs/connectors-dropbox.asciidoc index 1f80a0ab4e95..295b7e293662 100644 --- a/docs/reference/connector/docs/connectors-dropbox.asciidoc +++ b/docs/reference/connector/docs/connectors-dropbox.asciidoc @@ -190,7 +190,7 @@ When both are provided, priority is given to `file_categories`. We have some examples below for illustration. [discrete#es-connectors-dropbox-sync-rules-advanced-example-1] -======= Example: Query only +*Example: Query only* [source,js] ---- @@ -206,7 +206,7 @@ We have some examples below for illustration. // NOTCONSOLE [discrete#es-connectors-dropbox-sync-rules-advanced-example-2] -======= Example: Query with file extension filter +*Example: Query with file extension filter* [source,js] ---- @@ -225,7 +225,7 @@ We have some examples below for illustration. // NOTCONSOLE [discrete#es-connectors-dropbox-sync-rules-advanced-example-3] -======= Example: Query with file category filter +*Example: Query with file category filter* [source,js] ---- @@ -248,7 +248,7 @@ We have some examples below for illustration. // NOTCONSOLE [discrete#es-connectors-dropbox-sync-rules-advanced-limitations] -======= Limitations +*Limitations* * Content extraction is not supported for Dropbox *Paper* files when advanced sync rules are enabled. @@ -474,7 +474,7 @@ When both are provided, priority is given to `file_categories`. We have some examples below for illustration. [discrete#es-connectors-dropbox-client-sync-rules-advanced-example-1] -======= Example: Query only +*Example: Query only* [source,js] ---- @@ -490,7 +490,7 @@ We have some examples below for illustration. // NOTCONSOLE [discrete#es-connectors-dropbox-client-sync-rules-advanced-example-2] -======= Example: Query with file extension filter +*Example: Query with file extension filter* [source,js] ---- @@ -509,7 +509,7 @@ We have some examples below for illustration. // NOTCONSOLE [discrete#es-connectors-dropbox-client-sync-rules-advanced-example-3] -======= Example: Query with file category filter +*Example: Query with file category filter* [source,js] ---- @@ -532,7 +532,7 @@ We have some examples below for illustration. // NOTCONSOLE [discrete#es-connectors-dropbox-client-sync-rules-advanced-limitations] -======= Limitations +*Limitations* * Content extraction is not supported for Dropbox *Paper* files when advanced sync rules are enabled. diff --git a/docs/reference/connector/docs/connectors-github.asciidoc b/docs/reference/connector/docs/connectors-github.asciidoc index aa683e4bb082..df577d83e812 100644 --- a/docs/reference/connector/docs/connectors-github.asciidoc +++ b/docs/reference/connector/docs/connectors-github.asciidoc @@ -210,7 +210,7 @@ Advanced sync rules are defined through a source-specific DSL JSON snippet. The following sections provide examples of advanced sync rules for this connector. [discrete#es-connectors-github-sync-rules-advanced-branch] -======= Indexing document and files based on branch name configured via branch key +*Indexing document and files based on branch name configured via branch key* [source,js] ---- @@ -226,7 +226,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-github-sync-rules-advanced-issue-key] -======= Indexing document based on issue query related to bugs via issue key +*Indexing document based on issue query related to bugs via issue key* [source,js] ---- @@ -242,7 +242,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-github-sync-rules-advanced-pr-key] -======= Indexing document based on PR query related to open PR's via PR key +*Indexing document based on PR query related to open PR's via PR key* [source,js] ---- @@ -258,7 +258,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-github-sync-rules-advanced-issue-query-branch-name] -======= Indexing document and files based on queries and branch name +*Indexing document and files based on queries and branch name* [source,js] ---- @@ -283,7 +283,7 @@ Check the Elasticsearch index for the actual document count. ==== [discrete#es-connectors-github-sync-rules-advanced-overlapping] -======= Advanced rules for overlapping +*Advanced rules for overlapping* [source,js] ---- @@ -550,7 +550,7 @@ Advanced sync rules are defined through a source-specific DSL JSON snippet. The following sections provide examples of advanced sync rules for this connector. [discrete#es-connectors-github-client-sync-rules-advanced-branch] -======= Indexing document and files based on branch name configured via branch key +*Indexing document and files based on branch name configured via branch key* [source,js] ---- @@ -566,7 +566,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-github-client-sync-rules-advanced-issue-key] -======= Indexing document based on issue query related to bugs via issue key +*Indexing document based on issue query related to bugs via issue key* [source,js] ---- @@ -582,7 +582,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-github-client-sync-rules-advanced-pr-key] -======= Indexing document based on PR query related to open PR's via PR key +*Indexing document based on PR query related to open PR's via PR key* [source,js] ---- @@ -598,7 +598,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-github-client-sync-rules-advanced-issue-query-branch-name] -======= Indexing document and files based on queries and branch name +*Indexing document and files based on queries and branch name* [source,js] ---- @@ -623,7 +623,7 @@ Check the Elasticsearch index for the actual document count. ==== [discrete#es-connectors-github-client-sync-rules-advanced-overlapping] -======= Advanced rules for overlapping +*Advanced rules for overlapping* [source,js] ---- diff --git a/docs/reference/connector/docs/connectors-ms-sql.asciidoc b/docs/reference/connector/docs/connectors-ms-sql.asciidoc index 47fb282b1687..d706af8ca804 100644 --- a/docs/reference/connector/docs/connectors-ms-sql.asciidoc +++ b/docs/reference/connector/docs/connectors-ms-sql.asciidoc @@ -196,7 +196,7 @@ Here are a few examples of advanced sync rules for this connector. ==== [discrete#es-connectors-ms-sql-sync-rules-advanced-queries] -======= Example: Two queries +*Example: Two queries* These rules fetch all records from both the `employee` and `customer` tables. The data from these tables will be synced separately to Elasticsearch. @@ -220,7 +220,7 @@ These rules fetch all records from both the `employee` and `customer` tables. Th // NOTCONSOLE [discrete#es-connectors-ms-sql-sync-rules-example-one-where] -======= Example: One WHERE query +*Example: One WHERE query* This rule fetches only the records from the `employee` table where the `emp_id` is greater than 5. Only these filtered records will be synced to Elasticsearch. @@ -236,7 +236,7 @@ This rule fetches only the records from the `employee` table where the `emp_id` // NOTCONSOLE [discrete#es-connectors-ms-sql-sync-rules-example-one-join] -======= Example: One JOIN query +*Example: One JOIN query* This rule fetches records by performing an INNER JOIN between the `employee` and `customer` tables on the condition that the `emp_id` in `employee` matches the `c_id` in `customer`. The result of this combined data will be synced to Elasticsearch. @@ -484,7 +484,7 @@ Here are a few examples of advanced sync rules for this connector. ==== [discrete#es-connectors-ms-sql-client-sync-rules-advanced-queries] -======= Example: Two queries +*Example: Two queries* These rules fetch all records from both the `employee` and `customer` tables. The data from these tables will be synced separately to Elasticsearch. @@ -508,7 +508,7 @@ These rules fetch all records from both the `employee` and `customer` tables. Th // NOTCONSOLE [discrete#es-connectors-ms-sql-client-sync-rules-example-one-where] -======= Example: One WHERE query +*Example: One WHERE query* This rule fetches only the records from the `employee` table where the `emp_id` is greater than 5. Only these filtered records will be synced to Elasticsearch. @@ -524,7 +524,7 @@ This rule fetches only the records from the `employee` table where the `emp_id` // NOTCONSOLE [discrete#es-connectors-ms-sql-client-sync-rules-example-one-join] -======= Example: One JOIN query +*Example: One JOIN query* This rule fetches records by performing an INNER JOIN between the `employee` and `customer` tables on the condition that the `emp_id` in `employee` matches the `c_id` in `customer`. The result of this combined data will be synced to Elasticsearch. diff --git a/docs/reference/connector/docs/connectors-network-drive.asciidoc b/docs/reference/connector/docs/connectors-network-drive.asciidoc index 91c9d3b28c38..909e3440c9f0 100644 --- a/docs/reference/connector/docs/connectors-network-drive.asciidoc +++ b/docs/reference/connector/docs/connectors-network-drive.asciidoc @@ -174,7 +174,7 @@ Advanced sync rules for this connector use *glob patterns*. The following sections provide examples of advanced sync rules for this connector. [discrete#es-connectors-network-drive-indexing-files-and-folders-recursively-within-folders] -======= Indexing files and folders recursively within folders +*Indexing files and folders recursively within folders* [source,js] ---- @@ -190,7 +190,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-network-drive-indexing-files-and-folders-directly-inside-folder] -======= Indexing files and folders directly inside folder +*Indexing files and folders directly inside folder* [source,js] ---- @@ -203,7 +203,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-network-drive-indexing-files-and-folders-directly-inside-a-set-of-folders] -======= Indexing files and folders directly inside a set of folders +*Indexing files and folders directly inside a set of folders* [source,js] ---- @@ -216,7 +216,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-network-drive-excluding-files-and-folders-that-match-a-pattern] -======= Excluding files and folders that match a pattern +*Excluding files and folders that match a pattern* [source,js] ---- @@ -432,7 +432,7 @@ Advanced sync rules for this connector use *glob patterns*. The following sections provide examples of advanced sync rules for this connector. [discrete#es-connectors-network-drive-client-indexing-files-and-folders-recursively-within-folders] -======= Indexing files and folders recursively within folders +*Indexing files and folders recursively within folders* [source,js] ---- @@ -448,7 +448,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-network-drive-client-indexing-files-and-folders-directly-inside-folder] -======= Indexing files and folders directly inside folder +*Indexing files and folders directly inside folder* [source,js] ---- @@ -461,7 +461,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-network-drive-client-indexing-files-and-folders-directly-inside-a-set-of-folders] -======= Indexing files and folders directly inside a set of folders +*Indexing files and folders directly inside a set of folders* [source,js] ---- @@ -474,7 +474,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-network-drive-client-excluding-files-and-folders-that-match-a-pattern] -======= Excluding files and folders that match a pattern +*Excluding files and folders that match a pattern* [source,js] ---- diff --git a/docs/reference/connector/docs/connectors-notion.asciidoc b/docs/reference/connector/docs/connectors-notion.asciidoc index 2d7a71bff20d..7c08c5d81e03 100644 --- a/docs/reference/connector/docs/connectors-notion.asciidoc +++ b/docs/reference/connector/docs/connectors-notion.asciidoc @@ -140,7 +140,7 @@ Advanced sync rules for Notion take the following parameters: ====== Examples [discrete] -======= Example 1 +*Example 1* Indexing every page where the title contains `Demo Page`: @@ -160,7 +160,7 @@ Indexing every page where the title contains `Demo Page`: // NOTCONSOLE [discrete] -======= Example 2 +*Example 2* Indexing every database where the title contains `Demo Database`: @@ -180,7 +180,7 @@ Indexing every database where the title contains `Demo Database`: // NOTCONSOLE [discrete] -======= Example 3 +*Example 3* Indexing every database where the title contains `Demo Database` and every page where the title contains `Demo Page`: @@ -206,7 +206,7 @@ Indexing every database where the title contains `Demo Database` and every page // NOTCONSOLE [discrete] -======= Example 4 +*Example 4* Indexing all pages in the workspace: @@ -226,7 +226,7 @@ Indexing all pages in the workspace: // NOTCONSOLE [discrete] -======= Example 5 +*Example 5* Indexing all the pages and databases connected to the workspace: @@ -243,7 +243,7 @@ Indexing all the pages and databases connected to the workspace: // NOTCONSOLE [discrete] -======= Example 6 +*Example 6* Indexing all the rows of a database where the record is `true` for the column `Task completed` and its property(datatype) is a checkbox: @@ -266,7 +266,7 @@ Indexing all the rows of a database where the record is `true` for the column `T // NOTCONSOLE [discrete] -======= Example 7 +*Example 7* Indexing all rows of a specific database: @@ -283,7 +283,7 @@ Indexing all rows of a specific database: // NOTCONSOLE [discrete] -======= Example 8 +*Example 8* Indexing all blocks defined in `searches` and `database_query_filters`: @@ -498,7 +498,7 @@ Advanced sync rules for Notion take the following parameters: ====== Examples [discrete] -======= Example 1 +*Example 1* Indexing every page where the title contains `Demo Page`: @@ -518,7 +518,7 @@ Indexing every page where the title contains `Demo Page`: // NOTCONSOLE [discrete] -======= Example 2 +*Example 2* Indexing every database where the title contains `Demo Database`: @@ -538,7 +538,7 @@ Indexing every database where the title contains `Demo Database`: // NOTCONSOLE [discrete] -======= Example 3 +*Example 3* Indexing every database where the title contains `Demo Database` and every page where the title contains `Demo Page`: @@ -564,7 +564,7 @@ Indexing every database where the title contains `Demo Database` and every page // NOTCONSOLE [discrete] -======= Example 4 +*Example 4* Indexing all pages in the workspace: @@ -584,7 +584,7 @@ Indexing all pages in the workspace: // NOTCONSOLE [discrete] -======= Example 5 +*Example 5* Indexing all the pages and databases connected to the workspace: @@ -601,7 +601,7 @@ Indexing all the pages and databases connected to the workspace: // NOTCONSOLE [discrete] -======= Example 6 +*Example 6* Indexing all the rows of a database where the record is `true` for the column `Task completed` and its property(datatype) is a checkbox: @@ -624,7 +624,7 @@ Indexing all the rows of a database where the record is `true` for the column `T // NOTCONSOLE [discrete] -======= Example 7 +*Example 7* Indexing all rows of a specific database: @@ -641,7 +641,7 @@ Indexing all rows of a specific database: // NOTCONSOLE [discrete] -======= Example 8 +*Example 8* Indexing all blocks defined in `searches` and `database_query_filters`: diff --git a/docs/reference/connector/docs/connectors-onedrive.asciidoc b/docs/reference/connector/docs/connectors-onedrive.asciidoc index 7d1a21aeb78d..44ac96e2ad99 100644 --- a/docs/reference/connector/docs/connectors-onedrive.asciidoc +++ b/docs/reference/connector/docs/connectors-onedrive.asciidoc @@ -160,7 +160,7 @@ A <> is required for advanced sync rul Here are a few examples of advanced sync rules for this connector. [discrete#es-connectors-onedrive-sync-rules-advanced-examples-1] -======= Example 1 +*Example 1* This rule skips indexing for files with `.xlsx` and `.docx` extensions. All other files and folders will be indexed. @@ -176,7 +176,7 @@ All other files and folders will be indexed. // NOTCONSOLE [discrete#es-connectors-onedrive-sync-rules-advanced-examples-2] -======= Example 2 +*Example 2* This rule focuses on indexing files and folders owned by `user1-domain@onmicrosoft.com` and `user2-domain@onmicrosoft.com` but excludes files with `.py` extension. @@ -192,7 +192,7 @@ This rule focuses on indexing files and folders owned by `user1-domain@onmicroso // NOTCONSOLE [discrete#es-connectors-onedrive-sync-rules-advanced-examples-3] -======= Example 3 +*Example 3* This rule indexes only the files and folders directly inside the root folder, excluding any `.md` files. @@ -208,7 +208,7 @@ This rule indexes only the files and folders directly inside the root folder, ex // NOTCONSOLE [discrete#es-connectors-onedrive-sync-rules-advanced-examples-4] -======= Example 4 +*Example 4* This rule indexes files and folders owned by `user1-domain@onmicrosoft.com` and `user3-domain@onmicrosoft.com` that are directly inside the `abc` folder, which is a subfolder of any folder under the `hello` directory in the root. Files with extensions `.pdf` and `.py` are excluded. @@ -225,7 +225,7 @@ This rule indexes files and folders owned by `user1-domain@onmicrosoft.com` and // NOTCONSOLE [discrete#es-connectors-onedrive-sync-rules-advanced-examples-5] -======= Example 5 +*Example 5* This example contains two rules. The first rule indexes all files and folders owned by `user1-domain@onmicrosoft.com` and `user2-domain@onmicrosoft.com`. @@ -245,7 +245,7 @@ The second rule indexes files for all other users, but skips files with a `.py` // NOTCONSOLE [discrete#es-connectors-onedrive-sync-rules-advanced-examples-6] -======= Example 6 +*Example 6* This example contains two rules. The first rule indexes all files owned by `user1-domain@onmicrosoft.com` and `user2-domain@onmicrosoft.com`, excluding `.md` files. @@ -449,7 +449,7 @@ A <> is required for advanced sync rul Here are a few examples of advanced sync rules for this connector. [discrete#es-connectors-onedrive-client-sync-rules-advanced-examples-1] -======= Example 1 +*Example 1* This rule skips indexing for files with `.xlsx` and `.docx` extensions. All other files and folders will be indexed. @@ -465,7 +465,7 @@ All other files and folders will be indexed. // NOTCONSOLE [discrete#es-connectors-onedrive-client-sync-rules-advanced-examples-2] -======= Example 2 +*Example 2* This rule focuses on indexing files and folders owned by `user1-domain@onmicrosoft.com` and `user2-domain@onmicrosoft.com` but excludes files with `.py` extension. @@ -481,7 +481,7 @@ This rule focuses on indexing files and folders owned by `user1-domain@onmicroso // NOTCONSOLE [discrete#es-connectors-onedrive-client-sync-rules-advanced-examples-3] -======= Example 3 +*Example 3* This rule indexes only the files and folders directly inside the root folder, excluding any `.md` files. @@ -497,7 +497,7 @@ This rule indexes only the files and folders directly inside the root folder, ex // NOTCONSOLE [discrete#es-connectors-onedrive-client-sync-rules-advanced-examples-4] -======= Example 4 +*Example 4* This rule indexes files and folders owned by `user1-domain@onmicrosoft.com` and `user3-domain@onmicrosoft.com` that are directly inside the `abc` folder, which is a subfolder of any folder under the `hello` directory in the root. Files with extensions `.pdf` and `.py` are excluded. @@ -514,7 +514,7 @@ This rule indexes files and folders owned by `user1-domain@onmicrosoft.com` and // NOTCONSOLE [discrete#es-connectors-onedrive-client-sync-rules-advanced-examples-5] -======= Example 5 +*Example 5* This example contains two rules. The first rule indexes all files and folders owned by `user1-domain@onmicrosoft.com` and `user2-domain@onmicrosoft.com`. @@ -534,7 +534,7 @@ The second rule indexes files for all other users, but skips files with a `.py` // NOTCONSOLE [discrete#es-connectors-onedrive-client-sync-rules-advanced-examples-6] -======= Example 6 +*Example 6* This example contains two rules. The first rule indexes all files owned by `user1-domain@onmicrosoft.com` and `user2-domain@onmicrosoft.com`, excluding `.md` files. diff --git a/docs/reference/connector/docs/connectors-postgresql.asciidoc b/docs/reference/connector/docs/connectors-postgresql.asciidoc index 1fe28f867337..aa6cb7f29e63 100644 --- a/docs/reference/connector/docs/connectors-postgresql.asciidoc +++ b/docs/reference/connector/docs/connectors-postgresql.asciidoc @@ -188,7 +188,7 @@ Advanced sync rules are defined through a source-specific DSL JSON snippet. Here is some example data that will be used in the following examples. [discrete#connectors-postgresql-sync-rules-advanced-example-data-1] -======= `employee` table +*`employee` table* [cols="3*", options="header"] |=== @@ -199,7 +199,7 @@ Here is some example data that will be used in the following examples. |=== [discrete#connectors-postgresql-sync-rules-advanced-example-2] -======= `customer` table +*`customer` table* [cols="3*", options="header"] |=== @@ -213,7 +213,7 @@ Here is some example data that will be used in the following examples. ====== Advanced sync rules examples [discrete#connectors-postgresql-sync-rules-advanced-examples-1] -======= Multiple table queries +*Multiple table queries* [source,js] ---- @@ -235,7 +235,7 @@ Here is some example data that will be used in the following examples. // NOTCONSOLE [discrete#connectors-postgresql-sync-rules-advanced-examples-1-id-columns] -======= Multiple table queries with `id_columns` +*Multiple table queries with `id_columns`* In 8.15.0, we added a new optional `id_columns` field in our advanced sync rules for the PostgreSQL connector. Use the `id_columns` field to ingest tables which do not have a primary key. Include the names of unique fields so that the connector can use them to generate unique IDs for documents. @@ -264,7 +264,7 @@ Use the `id_columns` field to ingest tables which do not have a primary key. Inc This example uses the `id_columns` field to specify the unique fields `emp_id` and `c_id` for the `employee` and `customer` tables, respectively. [discrete#connectors-postgresql-sync-rules-advanced-examples-2] -======= Filtering data with `WHERE` clause +*Filtering data with `WHERE` clause* [source,js] ---- @@ -278,7 +278,7 @@ This example uses the `id_columns` field to specify the unique fields `emp_id` a // NOTCONSOLE [discrete#connectors-postgresql-sync-rules-advanced-examples-3] -======= `JOIN` operations +*`JOIN` operations* [source,js] ---- @@ -494,7 +494,7 @@ Advanced sync rules are defined through a source-specific DSL JSON snippet. Here is some example data that will be used in the following examples. [discrete#es-connectors-postgresql-client-sync-rules-advanced-example-data-1] -======= `employee` table +*`employee` table* [cols="3*", options="header"] |=== @@ -505,7 +505,7 @@ Here is some example data that will be used in the following examples. |=== [discrete#es-connectors-postgresql-client-sync-rules-advanced-example-2] -======= `customer` table +*`customer` table* [cols="3*", options="header"] |=== @@ -519,7 +519,7 @@ Here is some example data that will be used in the following examples. ====== Advanced sync rules examples [discrete#es-connectors-postgresql-client-sync-rules-advanced-examples-1] -======== Multiple table queries +*Multiple table queries* [source,js] ---- @@ -541,7 +541,7 @@ Here is some example data that will be used in the following examples. // NOTCONSOLE [discrete#es-connectors-postgresql-client-sync-rules-advanced-examples-1-id-columns] -======== Multiple table queries with `id_columns` +*Multiple table queries with `id_columns`* In 8.15.0, we added a new optional `id_columns` field in our advanced sync rules for the PostgreSQL connector. Use the `id_columns` field to ingest tables which do not have a primary key. Include the names of unique fields so that the connector can use them to generate unique IDs for documents. @@ -570,7 +570,7 @@ Use the `id_columns` field to ingest tables which do not have a primary key. Inc This example uses the `id_columns` field to specify the unique fields `emp_id` and `c_id` for the `employee` and `customer` tables, respectively. [discrete#es-connectors-postgresql-client-sync-rules-advanced-examples-2] -======== Filtering data with `WHERE` clause +*Filtering data with `WHERE` clause* [source,js] ---- @@ -584,7 +584,7 @@ This example uses the `id_columns` field to specify the unique fields `emp_id` a // NOTCONSOLE [discrete#es-connectors-postgresql-client-sync-rules-advanced-examples-3] -======== `JOIN` operations +*`JOIN` operations* [source,js] ---- diff --git a/docs/reference/connector/docs/connectors-s3.asciidoc b/docs/reference/connector/docs/connectors-s3.asciidoc index b4d08d388463..90c070f7b804 100644 --- a/docs/reference/connector/docs/connectors-s3.asciidoc +++ b/docs/reference/connector/docs/connectors-s3.asciidoc @@ -118,7 +118,7 @@ The connector will fetch file and folder data that matches the string. Defaults to `""` (syncs all bucket objects). [discrete#es-connectors-s3-sync-rules-advanced-examples] -======= Advanced sync rules examples +*Advanced sync rules examples* *Fetching files and folders recursively by prefix* @@ -336,7 +336,7 @@ The connector will fetch file and folder data that matches the string. Defaults to `""` (syncs all bucket objects). [discrete#es-connectors-s3-client-sync-rules-advanced-examples] -======= Advanced sync rules examples +*Advanced sync rules examples* *Fetching files and folders recursively by prefix* diff --git a/docs/reference/connector/docs/connectors-salesforce.asciidoc b/docs/reference/connector/docs/connectors-salesforce.asciidoc index 3676f7663089..c640751de92c 100644 --- a/docs/reference/connector/docs/connectors-salesforce.asciidoc +++ b/docs/reference/connector/docs/connectors-salesforce.asciidoc @@ -227,7 +227,7 @@ They take the following parameters: Allowed values are *SOQL* and *SOSL*. [discrete#es-connectors-salesforce-sync-rules-advanced-fetch-query-language] -======= Fetch documents based on the query and language specified +*Fetch documents based on the query and language specified* **Example**: Fetch documents using SOQL query @@ -256,7 +256,7 @@ Allowed values are *SOQL* and *SOSL*. // NOTCONSOLE [discrete#es-connectors-salesforce-sync-rules-advanced-fetch-objects] -======= Fetch standard and custom objects using SOQL and SOSL queries +*Fetch standard and custom objects using SOQL and SOSL queries* **Example**: Fetch documents for standard objects via SOQL and SOSL query. @@ -293,7 +293,7 @@ Allowed values are *SOQL* and *SOSL*. // NOTCONSOLE [discrete#es-connectors-salesforce-sync-rules-advanced-fetch-standard-custom-fields] -======= Fetch documents with standard and custom fields +*Fetch documents with standard and custom fields* **Example**: Fetch documents with all standard and custom fields for Account object. @@ -626,7 +626,7 @@ They take the following parameters: Allowed values are *SOQL* and *SOSL*. [discrete#es-connectors-salesforce-client-sync-rules-advanced-fetch-query-language] -======= Fetch documents based on the query and language specified +*Fetch documents based on the query and language specified* **Example**: Fetch documents using SOQL query @@ -655,7 +655,7 @@ Allowed values are *SOQL* and *SOSL*. // NOTCONSOLE [discrete#es-connectors-salesforce-client-sync-rules-advanced-fetch-objects] -======= Fetch standard and custom objects using SOQL and SOSL queries +*Fetch standard and custom objects using SOQL and SOSL queries* **Example**: Fetch documents for standard objects via SOQL and SOSL query. @@ -692,7 +692,7 @@ Allowed values are *SOQL* and *SOSL*. // NOTCONSOLE [discrete#es-connectors-salesforce-client-sync-rules-advanced-fetch-standard-custom-fields] -======= Fetch documents with standard and custom fields +*Fetch documents with standard and custom fields* **Example**: Fetch documents with all standard and custom fields for Account object. diff --git a/docs/reference/connector/docs/connectors-servicenow.asciidoc b/docs/reference/connector/docs/connectors-servicenow.asciidoc index a02c418f11d7..3dc98ed9a44c 100644 --- a/docs/reference/connector/docs/connectors-servicenow.asciidoc +++ b/docs/reference/connector/docs/connectors-servicenow.asciidoc @@ -167,7 +167,7 @@ Advanced sync rules are defined through a source-specific DSL JSON snippet. The following sections provide examples of advanced sync rules for this connector. [discrete#es-connectors-servicenow-sync-rules-number-incident-service] -======= Indexing document based on incident number for Incident service +*Indexing document based on incident number for Incident service* [source,js] ---- @@ -181,7 +181,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-servicenow-sync-rules-active-false-user-service] -======= Indexing document based on user activity state for User service +*Indexing document based on user activity state for User service* [source,js] ---- @@ -195,7 +195,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-servicenow-sync-rules-author-administrator-knowledge-service] -======= Indexing document based on author name for Knowledge service +*Indexing document based on author name for Knowledge service* [source,js] ---- @@ -407,7 +407,7 @@ Advanced sync rules are defined through a source-specific DSL JSON snippet. The following sections provide examples of advanced sync rules for this connector. [discrete#es-connectors-servicenow-client-sync-rules-number-incident-service] -======= Indexing document based on incident number for Incident service +*Indexing document based on incident number for Incident service* [source,js] ---- @@ -421,7 +421,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-servicenow-client-sync-rules-active-false-user-service] -======= Indexing document based on user activity state for User service +*Indexing document based on user activity state for User service* [source,js] ---- @@ -435,7 +435,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-servicenow-client-sync-rules-author-administrator-knowledge-service] -======= Indexing document based on author name for Knowledge service +*Indexing document based on author name for Knowledge service* [source,js] ---- diff --git a/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc b/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc index 21d0890e436c..02f598c16f63 100644 --- a/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc +++ b/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc @@ -277,7 +277,7 @@ Example: This rule will not extract content of any drive items (files in document libraries) that haven't been modified for 60 days or more. [discrete#es-connectors-sharepoint-online-sync-rules-limitations] -======= Limitations of sync rules with incremental syncs +*Limitations of sync rules with incremental syncs* Changing sync rules after Sharepoint Online content has already been indexed can bring unexpected results, when using <>. @@ -288,7 +288,7 @@ Incremental syncs ensure _updates_ from 3rd-party system, but do not modify exis Let's take a look at several examples where incremental syncs might lead to inconsistent data on your index. [discrete#es-connectors-sharepoint-online-sync-rules-limitations-restrictive-added] -======== Example: Restrictive basic sync rule added after a full sync +*Example: Restrictive basic sync rule added after a full sync* Imagine your Sharepoint Online drive contains the following drive items: @@ -322,7 +322,7 @@ If no files were changed, incremental sync will not receive information about ch After a *full sync*, the index will be updated and files that are excluded by sync rules will be removed. [discrete#es-connectors-sharepoint-online-sync-rules-limitations-restrictive-removed] -======== Example: Restrictive basic sync rules removed after a full sync +*Example: Restrictive basic sync rules removed after a full sync* Imagine that Sharepoint Online drive has the following drive items: @@ -354,7 +354,7 @@ Afterwards, we can remove the filtering rule and run an incremental sync. If no Only a *full sync* will include the items previously ignored by the sync rule. [discrete#es-connectors-sharepoint-online-sync-rules-limitations-restrictive-changed] -======== Example: Advanced sync rules edge case +*Example: Advanced sync rules edge case* Advanced sync rules can be applied to limit which documents will have content extracted. For example, it's possible to set a rule so that documents older than 180 days won't have content extracted. @@ -763,7 +763,7 @@ Example: This rule will not extract content of any drive items (files in document libraries) that haven't been modified for 60 days or more. [discrete#es-connectors-sharepoint-online-client-sync-rules-limitations] -======= Limitations of sync rules with incremental syncs +*Limitations of sync rules with incremental syncs* Changing sync rules after Sharepoint Online content has already been indexed can bring unexpected results, when using <>. @@ -774,7 +774,7 @@ Incremental syncs ensure _updates_ from 3rd-party system, but do not modify exis Let's take a look at several examples where incremental syncs might lead to inconsistent data on your index. [discrete#es-connectors-sharepoint-online-client-sync-rules-limitations-restrictive-added] -======== Example: Restrictive basic sync rule added after a full sync +*Example: Restrictive basic sync rule added after a full sync* Imagine your Sharepoint Online drive contains the following drive items: @@ -808,7 +808,7 @@ If no files were changed, incremental sync will not receive information about ch After a *full sync*, the index will be updated and files that are excluded by sync rules will be removed. [discrete#es-connectors-sharepoint-online-client-sync-rules-limitations-restrictive-removed] -======== Example: Restrictive basic sync rules removed after a full sync +*Example: Restrictive basic sync rules removed after a full sync* Imagine that Sharepoint Online drive has the following drive items: @@ -840,7 +840,7 @@ Afterwards, we can remove the filtering rule and run an incremental sync. If no Only a *full sync* will include the items previously ignored by the sync rule. [discrete#es-connectors-sharepoint-online-client-sync-rules-limitations-restrictive-changed] -======== Example: Advanced sync rules edge case +*Example: Advanced sync rules edge case* Advanced sync rules can be applied to limit which documents will have content extracted. For example, it's possible to set a rule so that documents older than 180 days won't have content extracted. diff --git a/muted-tests.yml b/muted-tests.yml index 4523db7239be..dcaa415a6796 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -157,9 +157,6 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=snapshot/10_basic/Create a source only snapshot and then restore it} issue: https://github.com/elastic/elasticsearch/issues/117295 -- class: org.elasticsearch.xpack.searchablesnapshots.RetrySearchIntegTests - method: testRetryPointInTime - issue: https://github.com/elastic/elasticsearch/issues/117116 - class: org.elasticsearch.xpack.inference.DefaultEndPointsIT method: testInferDeploysDefaultElser issue: https://github.com/elastic/elasticsearch/issues/114913 diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 1a1219825bbb..40a209c5f0f1 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -54,11 +54,9 @@ static TransportVersion def(int id) { public static final TransportVersion ZERO = def(0); public static final TransportVersion V_7_0_0 = def(7_00_00_99); public static final TransportVersion V_7_0_1 = def(7_00_01_99); - public static final TransportVersion V_7_1_0 = def(7_01_00_99); public static final TransportVersion V_7_2_0 = def(7_02_00_99); public static final TransportVersion V_7_2_1 = def(7_02_01_99); public static final TransportVersion V_7_3_0 = def(7_03_00_99); - public static final TransportVersion V_7_3_2 = def(7_03_02_99); public static final TransportVersion V_7_4_0 = def(7_04_00_99); public static final TransportVersion V_7_5_0 = def(7_05_00_99); public static final TransportVersion V_7_6_0 = def(7_06_00_99); diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 8bc81fef2157..a2bf70bf6e08 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -128,6 +128,7 @@ protected Collection> nodePlugins() { @After public void assertConsistentHistoryInLuceneIndex() throws Exception { + internalCluster().beforeIndexDeletion(); internalCluster().assertConsistentHistoryBetweenTranslogAndLuceneIndex(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java index 2793ddea3bd0..33f1a9a469b6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java @@ -55,10 +55,8 @@ public SecurityFeatureSetUsage(StreamInput in) throws IOException { realmsUsage = in.readGenericMap(); rolesStoreUsage = in.readGenericMap(); sslUsage = in.readGenericMap(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_2_0)) { - tokenServiceUsage = in.readGenericMap(); - apiKeyServiceUsage = in.readGenericMap(); - } + tokenServiceUsage = in.readGenericMap(); + apiKeyServiceUsage = in.readGenericMap(); auditUsage = in.readGenericMap(); ipFilterUsage = in.readGenericMap(); anonymousUsage = in.readGenericMap(); @@ -125,10 +123,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeGenericMap(realmsUsage); out.writeGenericMap(rolesStoreUsage); out.writeGenericMap(sslUsage); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_2_0)) { - out.writeGenericMap(tokenServiceUsage); - out.writeGenericMap(apiKeyServiceUsage); - } + out.writeGenericMap(tokenServiceUsage); + out.writeGenericMap(apiKeyServiceUsage); out.writeGenericMap(auditUsage); out.writeGenericMap(ipFilterUsage); out.writeGenericMap(anonymousUsage); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/TokensInvalidationResult.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/TokensInvalidationResult.java index 8fe018a82546..59c16fc8a7a7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/TokensInvalidationResult.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/TokensInvalidationResult.java @@ -59,9 +59,6 @@ public TokensInvalidationResult(StreamInput in) throws IOException { this.invalidatedTokens = in.readStringCollectionAsList(); this.previouslyInvalidatedTokens = in.readStringCollectionAsList(); this.errors = in.readCollectionAsList(StreamInput::readException); - if (in.getTransportVersion().before(TransportVersions.V_7_2_0)) { - in.readVInt(); - } if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { this.restStatus = RestStatus.readFrom(in); } @@ -111,9 +108,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(invalidatedTokens); out.writeStringCollection(previouslyInvalidatedTokens); out.writeCollection(errors, StreamOutput::writeException); - if (out.getTransportVersion().before(TransportVersions.V_7_2_0)) { - out.writeVInt(5); - } if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { RestStatus.writeTo(out, restStatus); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java index cc589b53eaa1..5e19b26b8f4d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java @@ -331,6 +331,8 @@ static RoleDescriptor kibanaSystem(String name) { ".logs-endpoint.diagnostic.collection-*", "logs-apm-*", "logs-apm.*-*", + "logs-cloud_security_posture.findings-*", + "logs-cloud_security_posture.vulnerabilities-*", "metrics-apm-*", "metrics-apm.*-*", "traces-apm-*", diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index eeffa1db5485..b69b0ece8996 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -1586,10 +1586,8 @@ public void testKibanaSystemRole() { final IndexAbstraction indexAbstraction = mockIndexAbstraction(cspIndex); assertThat(kibanaRole.indices().allowedIndicesMatcher("indices:foo").test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher("indices:bar").test(indexAbstraction), is(false)); - assertThat( - kibanaRole.indices().allowedIndicesMatcher(TransportDeleteIndexAction.TYPE.name()).test(indexAbstraction), - is(false) - ); + // Ensure privileges necessary for ILM policies in Cloud Security Posture Package + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteIndexAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test(indexAbstraction), is(true)); assertThat( kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(indexAbstraction), @@ -1613,10 +1611,9 @@ public void testKibanaSystemRole() { final IndexAbstraction indexAbstraction = mockIndexAbstraction(cspIndex); assertThat(kibanaRole.indices().allowedIndicesMatcher("indices:foo").test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher("indices:bar").test(indexAbstraction), is(false)); - assertThat( - kibanaRole.indices().allowedIndicesMatcher(TransportDeleteIndexAction.TYPE.name()).test(indexAbstraction), - is(false) - ); + // Ensure privileges necessary for ILM policies in Cloud Security Posture Package + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteIndexAction.TYPE.name()).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteIndexAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test(indexAbstraction), is(true)); assertThat( kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(indexAbstraction), @@ -1710,6 +1707,7 @@ public void testKibanaSystemRole() { kibanaRole.indices().allowedIndicesMatcher("indices:monitor/" + randomAlphaOfLengthBetween(3, 8)).test(indexAbstraction), is(true) ); + }); // cloud_defend diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/DefaultEndPointsIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/DefaultEndPointsIT.java index 068b3e1f4ce0..ba3e48e11928 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/DefaultEndPointsIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/DefaultEndPointsIT.java @@ -57,9 +57,6 @@ public void testGet() throws IOException { var e5Model = getModel(ElasticsearchInternalService.DEFAULT_E5_ID); assertDefaultE5Config(e5Model); - - var rerankModel = getModel(ElasticsearchInternalService.DEFAULT_RERANK_ID); - assertDefaultRerankConfig(rerankModel); } @SuppressWarnings("unchecked") @@ -128,42 +125,6 @@ private static void assertDefaultE5Config(Map modelConfig) { assertDefaultChunkingSettings(modelConfig); } - @SuppressWarnings("unchecked") - public void testInferDeploysDefaultRerank() throws IOException { - var model = getModel(ElasticsearchInternalService.DEFAULT_RERANK_ID); - assertDefaultRerankConfig(model); - - var inputs = List.of("Hello World", "Goodnight moon"); - var query = "but why"; - var queryParams = Map.of("timeout", "120s"); - var results = infer(ElasticsearchInternalService.DEFAULT_RERANK_ID, TaskType.RERANK, inputs, query, queryParams); - var embeddings = (List>) results.get("rerank"); - assertThat(results.toString(), embeddings, hasSize(2)); - } - - @SuppressWarnings("unchecked") - private static void assertDefaultRerankConfig(Map modelConfig) { - assertEquals(modelConfig.toString(), ElasticsearchInternalService.DEFAULT_RERANK_ID, modelConfig.get("inference_id")); - assertEquals(modelConfig.toString(), ElasticsearchInternalService.NAME, modelConfig.get("service")); - assertEquals(modelConfig.toString(), TaskType.RERANK.toString(), modelConfig.get("task_type")); - - var serviceSettings = (Map) modelConfig.get("service_settings"); - assertThat(modelConfig.toString(), serviceSettings.get("model_id"), is(".rerank-v1")); - assertEquals(modelConfig.toString(), 1, serviceSettings.get("num_threads")); - - var adaptiveAllocations = (Map) serviceSettings.get("adaptive_allocations"); - assertThat( - modelConfig.toString(), - adaptiveAllocations, - Matchers.is(Map.of("enabled", true, "min_number_of_allocations", 0, "max_number_of_allocations", 32)) - ); - - var chunkingSettings = (Map) modelConfig.get("chunking_settings"); - assertNull(chunkingSettings); - var taskSettings = (Map) modelConfig.get("task_settings"); - assertThat(modelConfig.toString(), taskSettings, Matchers.is(Map.of("return_documents", true))); - } - @SuppressWarnings("unchecked") private static void assertDefaultChunkingSettings(Map modelConfig) { var chunkingSettings = (Map) modelConfig.get("chunking_settings"); @@ -198,7 +159,6 @@ public void onFailure(Exception exception) { var request = createInferenceRequest( Strings.format("_inference/%s", ElasticsearchInternalService.DEFAULT_ELSER_ID), inputs, - null, queryParams ); client().performRequestAsync(request, listener); diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java index 1716057cdfe4..07ce2fe00642 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java @@ -336,7 +336,7 @@ private List getInternalAsList(String endpoint) throws IOException { protected Map infer(String modelId, List input) throws IOException { var endpoint = Strings.format("_inference/%s", modelId); - return inferInternal(endpoint, input, null, Map.of()); + return inferInternal(endpoint, input, Map.of()); } protected Deque streamInferOnMockService(String modelId, TaskType taskType, List input) throws Exception { @@ -352,7 +352,7 @@ protected Deque unifiedCompletionInferOnMockService(String mode private Deque callAsync(String endpoint, List input) throws Exception { var request = new Request("POST", endpoint); - request.setJsonEntity(jsonBody(input, null)); + request.setJsonEntity(jsonBody(input)); return execAsyncCall(request); } @@ -394,60 +394,33 @@ private String createUnifiedJsonBody(List input, String role) throws IOE protected Map infer(String modelId, TaskType taskType, List input) throws IOException { var endpoint = Strings.format("_inference/%s/%s", taskType, modelId); - return inferInternal(endpoint, input, null, Map.of()); + return inferInternal(endpoint, input, Map.of()); } protected Map infer(String modelId, TaskType taskType, List input, Map queryParameters) throws IOException { var endpoint = Strings.format("_inference/%s/%s?error_trace", taskType, modelId); - return inferInternal(endpoint, input, null, queryParameters); + return inferInternal(endpoint, input, queryParameters); } - protected Map infer( - String modelId, - TaskType taskType, - List input, - String query, - Map queryParameters - ) throws IOException { - var endpoint = Strings.format("_inference/%s/%s?error_trace", taskType, modelId); - return inferInternal(endpoint, input, query, queryParameters); - } - - protected Request createInferenceRequest( - String endpoint, - List input, - @Nullable String query, - Map queryParameters - ) { + protected Request createInferenceRequest(String endpoint, List input, Map queryParameters) { var request = new Request("POST", endpoint); - request.setJsonEntity(jsonBody(input, query)); + request.setJsonEntity(jsonBody(input)); if (queryParameters.isEmpty() == false) { request.addParameters(queryParameters); } return request; } - private Map inferInternal( - String endpoint, - List input, - @Nullable String query, - Map queryParameters - ) throws IOException { - var request = createInferenceRequest(endpoint, input, query, queryParameters); + private Map inferInternal(String endpoint, List input, Map queryParameters) throws IOException { + var request = createInferenceRequest(endpoint, input, queryParameters); var response = client().performRequest(request); assertOkOrCreated(response); return entityAsMap(response); } - private String jsonBody(List input, @Nullable String query) { - final StringBuilder bodyBuilder = new StringBuilder("{"); - - if (query != null) { - bodyBuilder.append("\"query\":\"").append(query).append("\","); - } - - bodyBuilder.append("\"input\": ["); + private String jsonBody(List input) { + var bodyBuilder = new StringBuilder("{\"input\": ["); for (var in : input) { bodyBuilder.append('"').append(in).append('"').append(','); } diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java index 2099ec8287a7..1e19491aeaa6 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java @@ -49,7 +49,7 @@ public void testCRUD() throws IOException { } var getAllModels = getAllModels(); - int numModels = 12; + int numModels = 11; assertThat(getAllModels, hasSize(numModels)); var getSparseModels = getModels("_all", TaskType.SPARSE_EMBEDDING); @@ -537,7 +537,7 @@ private static String expectedResult(String input) { } public void testGetZeroModels() throws IOException { - var models = getModels("_all", TaskType.COMPLETION); + var models = getModels("_all", TaskType.RERANK); assertThat(models, empty()); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java index a4187f4c4fa9..b83c098ca808 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java @@ -63,12 +63,12 @@ import org.elasticsearch.xpack.inference.services.elastic.ElasticInferenceServiceSparseEmbeddingsServiceSettings; import org.elasticsearch.xpack.inference.services.elasticsearch.CustomElandInternalServiceSettings; import org.elasticsearch.xpack.inference.services.elasticsearch.CustomElandInternalTextEmbeddingServiceSettings; +import org.elasticsearch.xpack.inference.services.elasticsearch.CustomElandRerankTaskSettings; import org.elasticsearch.xpack.inference.services.elasticsearch.ElasticRerankerServiceSettings; import org.elasticsearch.xpack.inference.services.elasticsearch.ElasticsearchInternalServiceSettings; import org.elasticsearch.xpack.inference.services.elasticsearch.ElserInternalServiceSettings; import org.elasticsearch.xpack.inference.services.elasticsearch.ElserMlNodeTaskSettings; import org.elasticsearch.xpack.inference.services.elasticsearch.MultilingualE5SmallInternalServiceSettings; -import org.elasticsearch.xpack.inference.services.elasticsearch.RerankTaskSettings; import org.elasticsearch.xpack.inference.services.googleaistudio.completion.GoogleAiStudioCompletionServiceSettings; import org.elasticsearch.xpack.inference.services.googleaistudio.embeddings.GoogleAiStudioEmbeddingsServiceSettings; import org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiSecretSettings; @@ -518,7 +518,9 @@ private static void addCustomElandWriteables(final List namedWriteables) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandRerankModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandRerankModel.java index 6388bb33bb78..f620b15680c8 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandRerankModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandRerankModel.java @@ -17,7 +17,7 @@ import java.util.HashMap; import java.util.Map; -import static org.elasticsearch.xpack.inference.services.elasticsearch.RerankTaskSettings.RETURN_DOCUMENTS; +import static org.elasticsearch.xpack.inference.services.elasticsearch.CustomElandRerankTaskSettings.RETURN_DOCUMENTS; public class CustomElandRerankModel extends CustomElandModel { @@ -26,7 +26,7 @@ public CustomElandRerankModel( TaskType taskType, String service, CustomElandInternalServiceSettings serviceSettings, - RerankTaskSettings taskSettings + CustomElandRerankTaskSettings taskSettings ) { super(inferenceEntityId, taskType, service, serviceSettings, taskSettings); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/RerankTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandRerankTaskSettings.java similarity index 79% rename from x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/RerankTaskSettings.java rename to x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandRerankTaskSettings.java index 3c25f7a6a901..a0be1661b860 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/RerankTaskSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandRerankTaskSettings.java @@ -26,14 +26,14 @@ /** * Defines the task settings for internal rerank service. */ -public class RerankTaskSettings implements TaskSettings { +public class CustomElandRerankTaskSettings implements TaskSettings { public static final String NAME = "custom_eland_rerank_task_settings"; public static final String RETURN_DOCUMENTS = "return_documents"; - static final RerankTaskSettings DEFAULT_SETTINGS = new RerankTaskSettings(Boolean.TRUE); + static final CustomElandRerankTaskSettings DEFAULT_SETTINGS = new CustomElandRerankTaskSettings(Boolean.TRUE); - public static RerankTaskSettings defaultsFromMap(Map map) { + public static CustomElandRerankTaskSettings defaultsFromMap(Map map) { ValidationException validationException = new ValidationException(); if (map == null || map.isEmpty()) { @@ -49,7 +49,7 @@ public static RerankTaskSettings defaultsFromMap(Map map) { returnDocuments = true; } - return new RerankTaskSettings(returnDocuments); + return new CustomElandRerankTaskSettings(returnDocuments); } /** @@ -57,13 +57,13 @@ public static RerankTaskSettings defaultsFromMap(Map map) { * @param map source map * @return Task settings */ - public static RerankTaskSettings fromMap(Map map) { + public static CustomElandRerankTaskSettings fromMap(Map map) { if (map == null || map.isEmpty()) { return DEFAULT_SETTINGS; } Boolean returnDocuments = extractOptionalBoolean(map, RETURN_DOCUMENTS, new ValidationException()); - return new RerankTaskSettings(returnDocuments); + return new CustomElandRerankTaskSettings(returnDocuments); } /** @@ -74,17 +74,20 @@ public static RerankTaskSettings fromMap(Map map) { * @param requestTaskSettings the settings passed in within the task_settings field of the request * @return Either {@code originalSettings} or {@code requestTaskSettings} */ - public static RerankTaskSettings of(RerankTaskSettings originalSettings, RerankTaskSettings requestTaskSettings) { + public static CustomElandRerankTaskSettings of( + CustomElandRerankTaskSettings originalSettings, + CustomElandRerankTaskSettings requestTaskSettings + ) { return requestTaskSettings.returnDocuments() != null ? requestTaskSettings : originalSettings; } private final Boolean returnDocuments; - public RerankTaskSettings(StreamInput in) throws IOException { + public CustomElandRerankTaskSettings(StreamInput in) throws IOException { this(in.readOptionalBoolean()); } - public RerankTaskSettings(@Nullable Boolean doReturnDocuments) { + public CustomElandRerankTaskSettings(@Nullable Boolean doReturnDocuments) { if (doReturnDocuments == null) { this.returnDocuments = true; } else { @@ -130,7 +133,7 @@ public Boolean returnDocuments() { public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - RerankTaskSettings that = (RerankTaskSettings) o; + CustomElandRerankTaskSettings that = (CustomElandRerankTaskSettings) o; return Objects.equals(returnDocuments, that.returnDocuments); } @@ -141,7 +144,7 @@ public int hashCode() { @Override public TaskSettings updatedTaskSettings(Map newSettings) { - RerankTaskSettings updatedSettings = RerankTaskSettings.fromMap(new HashMap<>(newSettings)); + CustomElandRerankTaskSettings updatedSettings = CustomElandRerankTaskSettings.fromMap(new HashMap<>(newSettings)); return of(this, updatedSettings); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticRerankerModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticRerankerModel.java index 276bce6dbe8f..115cc9f05599 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticRerankerModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticRerankerModel.java @@ -9,6 +9,7 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.TaskType; import org.elasticsearch.xpack.core.ml.action.CreateTrainedModelAssignmentAction; @@ -21,9 +22,9 @@ public ElasticRerankerModel( TaskType taskType, String service, ElasticRerankerServiceSettings serviceSettings, - RerankTaskSettings taskSettings + ChunkingSettings chunkingSettings ) { - super(inferenceEntityId, taskType, service, serviceSettings, taskSettings); + super(inferenceEntityId, taskType, service, serviceSettings, chunkingSettings); } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java index 5f613d6be586..8cb91782e238 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java @@ -103,7 +103,6 @@ public class ElasticsearchInternalService extends BaseElasticsearchInternalServi public static final int EMBEDDING_MAX_BATCH_SIZE = 10; public static final String DEFAULT_ELSER_ID = ".elser-2-elasticsearch"; public static final String DEFAULT_E5_ID = ".multilingual-e5-small-elasticsearch"; - public static final String DEFAULT_RERANK_ID = ".rerank-v1-elasticsearch"; private static final EnumSet supportedTaskTypes = EnumSet.of( TaskType.RERANK, @@ -228,7 +227,7 @@ public void parseRequestConfig( ) ); } else if (RERANKER_ID.equals(modelId)) { - rerankerCase(inferenceEntityId, taskType, config, serviceSettingsMap, taskSettingsMap, modelListener); + rerankerCase(inferenceEntityId, taskType, config, serviceSettingsMap, chunkingSettings, modelListener); } else { customElandCase(inferenceEntityId, taskType, serviceSettingsMap, taskSettingsMap, chunkingSettings, modelListener); } @@ -311,7 +310,7 @@ private static CustomElandModel createCustomElandModel( taskType, NAME, elandServiceSettings(serviceSettings, context), - RerankTaskSettings.fromMap(taskSettings) + CustomElandRerankTaskSettings.fromMap(taskSettings) ); default -> throw new ElasticsearchStatusException(TaskType.unsupportedTaskTypeErrorMsg(taskType, NAME), RestStatus.BAD_REQUEST); }; @@ -334,7 +333,7 @@ private void rerankerCase( TaskType taskType, Map config, Map serviceSettingsMap, - Map taskSettingsMap, + ChunkingSettings chunkingSettings, ActionListener modelListener ) { @@ -349,7 +348,7 @@ private void rerankerCase( taskType, NAME, new ElasticRerankerServiceSettings(esServiceSettingsBuilder.build()), - RerankTaskSettings.fromMap(taskSettingsMap) + chunkingSettings ) ); } @@ -515,14 +514,6 @@ public Model parsePersistedConfig(String inferenceEntityId, TaskType taskType, M ElserMlNodeTaskSettings.DEFAULT, chunkingSettings ); - } else if (modelId.equals(RERANKER_ID)) { - return new ElasticRerankerModel( - inferenceEntityId, - taskType, - NAME, - new ElasticRerankerServiceSettings(ElasticsearchInternalServiceSettings.fromPersistedMap(serviceSettingsMap)), - RerankTaskSettings.fromMap(taskSettingsMap) - ); } else { return createCustomElandModel( inferenceEntityId, @@ -674,23 +665,21 @@ public void inferRerank( ) { var request = buildInferenceRequest(model.mlNodeDeploymentId(), new TextSimilarityConfigUpdate(query), inputs, inputType, timeout); - var returnDocs = Boolean.TRUE; - if (model.getTaskSettings() instanceof RerankTaskSettings modelSettings) { - var requestSettings = RerankTaskSettings.fromMap(requestTaskSettings); - returnDocs = RerankTaskSettings.of(modelSettings, requestSettings).returnDocuments(); - } + var modelSettings = (CustomElandRerankTaskSettings) model.getTaskSettings(); + var requestSettings = CustomElandRerankTaskSettings.fromMap(requestTaskSettings); + Boolean returnDocs = CustomElandRerankTaskSettings.of(modelSettings, requestSettings).returnDocuments(); Function inputSupplier = returnDocs == Boolean.TRUE ? inputs::get : i -> null; - ActionListener mlResultsListener = listener.delegateFailureAndWrap( - (l, inferenceResult) -> l.onResponse(textSimilarityResultsToRankedDocs(inferenceResult.getInferenceResults(), inputSupplier)) - ); - - var maybeDeployListener = mlResultsListener.delegateResponse( - (l, exception) -> maybeStartDeployment(model, exception, request, mlResultsListener) + client.execute( + InferModelAction.INSTANCE, + request, + listener.delegateFailureAndWrap( + (l, inferenceResult) -> l.onResponse( + textSimilarityResultsToRankedDocs(inferenceResult.getInferenceResults(), inputSupplier) + ) + ) ); - - client.execute(InferModelAction.INSTANCE, request, maybeDeployListener); } public void chunkedInfer( @@ -834,8 +823,7 @@ private RankedDocsResults textSimilarityResultsToRankedDocs( public List defaultConfigIds() { return List.of( new DefaultConfigId(DEFAULT_ELSER_ID, TaskType.SPARSE_EMBEDDING, this), - new DefaultConfigId(DEFAULT_E5_ID, TaskType.TEXT_EMBEDDING, this), - new DefaultConfigId(DEFAULT_RERANK_ID, TaskType.RERANK, this) + new DefaultConfigId(DEFAULT_E5_ID, TaskType.TEXT_EMBEDDING, this) ); } @@ -928,19 +916,12 @@ private List defaultConfigs(boolean useLinuxOptimizedModel) { ), ChunkingSettingsBuilder.DEFAULT_SETTINGS ); - var defaultRerank = new ElasticRerankerModel( - DEFAULT_RERANK_ID, - TaskType.RERANK, - NAME, - new ElasticRerankerServiceSettings(null, 1, RERANKER_ID, new AdaptiveAllocationsSettings(Boolean.TRUE, 0, 32)), - RerankTaskSettings.DEFAULT_SETTINGS - ); - return List.of(defaultElser, defaultE5, defaultRerank); + return List.of(defaultElser, defaultE5); } @Override boolean isDefaultId(String inferenceId) { - return DEFAULT_ELSER_ID.equals(inferenceId) || DEFAULT_E5_ID.equals(inferenceId) || DEFAULT_RERANK_ID.equals(inferenceId); + return DEFAULT_ELSER_ID.equals(inferenceId) || DEFAULT_E5_ID.equals(inferenceId); } static EmbeddingRequestChunker.EmbeddingType embeddingTypeFromTaskTypeAndSettings( diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/RerankTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandRerankTaskSettingsTests.java similarity index 53% rename from x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/RerankTaskSettingsTests.java rename to x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandRerankTaskSettingsTests.java index 255454a1ed62..4207896fc54f 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/RerankTaskSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandRerankTaskSettingsTests.java @@ -22,7 +22,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.sameInstance; -public class RerankTaskSettingsTests extends AbstractWireSerializingTestCase { +public class CustomElandRerankTaskSettingsTests extends AbstractWireSerializingTestCase { public void testIsEmpty() { var randomSettings = createRandom(); @@ -35,9 +35,9 @@ public void testUpdatedTaskSettings() { var newSettings = createRandom(); Map newSettingsMap = new HashMap<>(); if (newSettings.returnDocuments() != null) { - newSettingsMap.put(RerankTaskSettings.RETURN_DOCUMENTS, newSettings.returnDocuments()); + newSettingsMap.put(CustomElandRerankTaskSettings.RETURN_DOCUMENTS, newSettings.returnDocuments()); } - RerankTaskSettings updatedSettings = (RerankTaskSettings) initialSettings.updatedTaskSettings( + CustomElandRerankTaskSettings updatedSettings = (CustomElandRerankTaskSettings) initialSettings.updatedTaskSettings( Collections.unmodifiableMap(newSettingsMap) ); if (newSettings.returnDocuments() == null) { @@ -48,37 +48,37 @@ public void testUpdatedTaskSettings() { } public void testDefaultsFromMap_MapIsNull_ReturnsDefaultSettings() { - var rerankTaskSettings = RerankTaskSettings.defaultsFromMap(null); + var customElandRerankTaskSettings = CustomElandRerankTaskSettings.defaultsFromMap(null); - assertThat(rerankTaskSettings, sameInstance(RerankTaskSettings.DEFAULT_SETTINGS)); + assertThat(customElandRerankTaskSettings, sameInstance(CustomElandRerankTaskSettings.DEFAULT_SETTINGS)); } public void testDefaultsFromMap_MapIsEmpty_ReturnsDefaultSettings() { - var rerankTaskSettings = RerankTaskSettings.defaultsFromMap(new HashMap<>()); + var customElandRerankTaskSettings = CustomElandRerankTaskSettings.defaultsFromMap(new HashMap<>()); - assertThat(rerankTaskSettings, sameInstance(RerankTaskSettings.DEFAULT_SETTINGS)); + assertThat(customElandRerankTaskSettings, sameInstance(CustomElandRerankTaskSettings.DEFAULT_SETTINGS)); } public void testDefaultsFromMap_ExtractedReturnDocumentsNull_SetsReturnDocumentToTrue() { - var rerankTaskSettings = RerankTaskSettings.defaultsFromMap(new HashMap<>()); + var customElandRerankTaskSettings = CustomElandRerankTaskSettings.defaultsFromMap(new HashMap<>()); - assertThat(rerankTaskSettings.returnDocuments(), is(Boolean.TRUE)); + assertThat(customElandRerankTaskSettings.returnDocuments(), is(Boolean.TRUE)); } public void testFromMap_MapIsNull_ReturnsDefaultSettings() { - var rerankTaskSettings = RerankTaskSettings.fromMap(null); + var customElandRerankTaskSettings = CustomElandRerankTaskSettings.fromMap(null); - assertThat(rerankTaskSettings, sameInstance(RerankTaskSettings.DEFAULT_SETTINGS)); + assertThat(customElandRerankTaskSettings, sameInstance(CustomElandRerankTaskSettings.DEFAULT_SETTINGS)); } public void testFromMap_MapIsEmpty_ReturnsDefaultSettings() { - var rerankTaskSettings = RerankTaskSettings.fromMap(new HashMap<>()); + var customElandRerankTaskSettings = CustomElandRerankTaskSettings.fromMap(new HashMap<>()); - assertThat(rerankTaskSettings, sameInstance(RerankTaskSettings.DEFAULT_SETTINGS)); + assertThat(customElandRerankTaskSettings, sameInstance(CustomElandRerankTaskSettings.DEFAULT_SETTINGS)); } public void testToXContent_WritesAllValues() throws IOException { - var serviceSettings = new RerankTaskSettings(Boolean.TRUE); + var serviceSettings = new CustomElandRerankTaskSettings(Boolean.TRUE); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); serviceSettings.toXContent(builder, null); @@ -89,30 +89,30 @@ public void testToXContent_WritesAllValues() throws IOException { } public void testOf_PrefersNonNullRequestTaskSettings() { - var originalSettings = new RerankTaskSettings(Boolean.FALSE); - var requestTaskSettings = new RerankTaskSettings(Boolean.TRUE); + var originalSettings = new CustomElandRerankTaskSettings(Boolean.FALSE); + var requestTaskSettings = new CustomElandRerankTaskSettings(Boolean.TRUE); - var taskSettings = RerankTaskSettings.of(originalSettings, requestTaskSettings); + var taskSettings = CustomElandRerankTaskSettings.of(originalSettings, requestTaskSettings); assertThat(taskSettings, sameInstance(requestTaskSettings)); } - private static RerankTaskSettings createRandom() { - return new RerankTaskSettings(randomOptionalBoolean()); + private static CustomElandRerankTaskSettings createRandom() { + return new CustomElandRerankTaskSettings(randomOptionalBoolean()); } @Override - protected Writeable.Reader instanceReader() { - return RerankTaskSettings::new; + protected Writeable.Reader instanceReader() { + return CustomElandRerankTaskSettings::new; } @Override - protected RerankTaskSettings createTestInstance() { + protected CustomElandRerankTaskSettings createTestInstance() { return createRandom(); } @Override - protected RerankTaskSettings mutateInstance(RerankTaskSettings instance) throws IOException { - return randomValueOtherThan(instance, RerankTaskSettingsTests::createRandom); + protected CustomElandRerankTaskSettings mutateInstance(CustomElandRerankTaskSettings instance) throws IOException { + return randomValueOtherThan(instance, CustomElandRerankTaskSettingsTests::createRandom); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java index 17e6583f11c8..306509ea60cf 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java @@ -534,13 +534,16 @@ public void testParseRequestConfig_Rerank() { ) ); var returnDocs = randomBoolean(); - settings.put(ModelConfigurations.TASK_SETTINGS, new HashMap<>(Map.of(RerankTaskSettings.RETURN_DOCUMENTS, returnDocs))); + settings.put( + ModelConfigurations.TASK_SETTINGS, + new HashMap<>(Map.of(CustomElandRerankTaskSettings.RETURN_DOCUMENTS, returnDocs)) + ); ActionListener modelListener = ActionListener.wrap(model -> { assertThat(model, instanceOf(CustomElandRerankModel.class)); - assertThat(model.getTaskSettings(), instanceOf(RerankTaskSettings.class)); + assertThat(model.getTaskSettings(), instanceOf(CustomElandRerankTaskSettings.class)); assertThat(model.getServiceSettings(), instanceOf(CustomElandInternalServiceSettings.class)); - assertEquals(returnDocs, ((RerankTaskSettings) model.getTaskSettings()).returnDocuments()); + assertEquals(returnDocs, ((CustomElandRerankTaskSettings) model.getTaskSettings()).returnDocuments()); }, e -> { fail("Model parsing failed " + e.getMessage()); }); service.parseRequestConfig(randomInferenceEntityId, TaskType.RERANK, settings, modelListener); @@ -580,9 +583,9 @@ public void testParseRequestConfig_Rerank_DefaultTaskSettings() { ActionListener modelListener = ActionListener.wrap(model -> { assertThat(model, instanceOf(CustomElandRerankModel.class)); - assertThat(model.getTaskSettings(), instanceOf(RerankTaskSettings.class)); + assertThat(model.getTaskSettings(), instanceOf(CustomElandRerankTaskSettings.class)); assertThat(model.getServiceSettings(), instanceOf(CustomElandInternalServiceSettings.class)); - assertEquals(Boolean.TRUE, ((RerankTaskSettings) model.getTaskSettings()).returnDocuments()); + assertEquals(Boolean.TRUE, ((CustomElandRerankTaskSettings) model.getTaskSettings()).returnDocuments()); }, e -> { fail("Model parsing failed " + e.getMessage()); }); service.parseRequestConfig(randomInferenceEntityId, TaskType.RERANK, settings, modelListener); @@ -1246,11 +1249,14 @@ public void testParsePersistedConfig_Rerank() { ); settings.put(ElasticsearchInternalServiceSettings.MODEL_ID, "foo"); var returnDocs = randomBoolean(); - settings.put(ModelConfigurations.TASK_SETTINGS, new HashMap<>(Map.of(RerankTaskSettings.RETURN_DOCUMENTS, returnDocs))); + settings.put( + ModelConfigurations.TASK_SETTINGS, + new HashMap<>(Map.of(CustomElandRerankTaskSettings.RETURN_DOCUMENTS, returnDocs)) + ); var model = service.parsePersistedConfig(randomInferenceEntityId, TaskType.RERANK, settings); - assertThat(model.getTaskSettings(), instanceOf(RerankTaskSettings.class)); - assertEquals(returnDocs, ((RerankTaskSettings) model.getTaskSettings()).returnDocuments()); + assertThat(model.getTaskSettings(), instanceOf(CustomElandRerankTaskSettings.class)); + assertEquals(returnDocs, ((CustomElandRerankTaskSettings) model.getTaskSettings()).returnDocuments()); } // without task settings @@ -1273,8 +1279,8 @@ public void testParsePersistedConfig_Rerank() { settings.put(ElasticsearchInternalServiceSettings.MODEL_ID, "foo"); var model = service.parsePersistedConfig(randomInferenceEntityId, TaskType.RERANK, settings); - assertThat(model.getTaskSettings(), instanceOf(RerankTaskSettings.class)); - assertTrue(((RerankTaskSettings) model.getTaskSettings()).returnDocuments()); + assertThat(model.getTaskSettings(), instanceOf(CustomElandRerankTaskSettings.class)); + assertTrue(((CustomElandRerankTaskSettings) model.getTaskSettings()).returnDocuments()); } } @@ -1329,7 +1335,7 @@ private CustomElandModel getCustomElandModel(TaskType taskType) { taskType, ElasticsearchInternalService.NAME, new CustomElandInternalServiceSettings(1, 4, "custom-model", null), - RerankTaskSettings.DEFAULT_SETTINGS + CustomElandRerankTaskSettings.DEFAULT_SETTINGS ); } else if (taskType == TaskType.TEXT_EMBEDDING) { var serviceSettings = new CustomElandInternalTextEmbeddingServiceSettings(1, 4, "custom-model", null); @@ -1522,30 +1528,20 @@ public void testEmbeddingTypeFromTaskTypeAndSettings() { ) ); - var e1 = expectThrows( + var e = expectThrows( ElasticsearchStatusException.class, () -> ElasticsearchInternalService.embeddingTypeFromTaskTypeAndSettings( TaskType.COMPLETION, new ElasticsearchInternalServiceSettings(1, 1, "foo", null) ) ); - assertThat(e1.getMessage(), containsString("Chunking is not supported for task type [completion]")); - - var e2 = expectThrows( - ElasticsearchStatusException.class, - () -> ElasticsearchInternalService.embeddingTypeFromTaskTypeAndSettings( - TaskType.RERANK, - new ElasticsearchInternalServiceSettings(1, 1, "foo", null) - ) - ); - assertThat(e2.getMessage(), containsString("Chunking is not supported for task type [rerank]")); + assertThat(e.getMessage(), containsString("Chunking is not supported for task type [completion]")); } public void testIsDefaultId() { var service = createService(mock(Client.class)); assertTrue(service.isDefaultId(".elser-2-elasticsearch")); assertTrue(service.isDefaultId(".multilingual-e5-small-elasticsearch")); - assertTrue(service.isDefaultId(".rerank-v1-elasticsearch")); assertFalse(service.isDefaultId("foo")); } diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java index d4bbd4495df2..23e414c0dc1b 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java @@ -42,7 +42,6 @@ import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.NodeRoles; -import org.elasticsearch.test.junit.annotations.TestIssueLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotAction; @@ -788,11 +787,6 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() * Can match against searchable snapshots is tested via both the Search API and the SearchShards (transport-only) API. * The latter is a way to do only a can-match rather than all search phases. */ - @TestIssueLogging( - issueUrl = "https://github.com/elastic/elasticsearch/issues/97878", - value = "org.elasticsearch.snapshots:DEBUG,org.elasticsearch.indices.recovery:DEBUG,org.elasticsearch.action.search:DEBUG" - ) - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105339") public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCoordinatingNode() throws Exception { internalCluster().startMasterOnlyNode(); internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java index fef1a98ca67e..b56ea7ae3e45 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java @@ -327,8 +327,8 @@ public void testInvalidateNotValidAccessTokens() throws Exception { ResponseException.class, () -> invalidateAccessToken( tokenService.prependVersionAndEncodeAccessToken( - TransportVersions.V_7_3_2, - tokenService.getRandomTokenBytes(TransportVersions.V_7_3_2, randomBoolean()).v1() + TransportVersions.MINIMUM_COMPATIBLE, + tokenService.getRandomTokenBytes(TransportVersions.MINIMUM_COMPATIBLE, randomBoolean()).v1() ) ) ); @@ -347,7 +347,7 @@ public void testInvalidateNotValidAccessTokens() throws Exception { byte[] longerAccessToken = new byte[randomIntBetween(17, 24)]; random().nextBytes(longerAccessToken); invalidateResponse = invalidateAccessToken( - tokenService.prependVersionAndEncodeAccessToken(TransportVersions.V_7_3_2, longerAccessToken) + tokenService.prependVersionAndEncodeAccessToken(TransportVersions.MINIMUM_COMPATIBLE, longerAccessToken) ); assertThat(invalidateResponse.invalidated(), equalTo(0)); assertThat(invalidateResponse.previouslyInvalidated(), equalTo(0)); @@ -365,7 +365,7 @@ public void testInvalidateNotValidAccessTokens() throws Exception { byte[] shorterAccessToken = new byte[randomIntBetween(12, 15)]; random().nextBytes(shorterAccessToken); invalidateResponse = invalidateAccessToken( - tokenService.prependVersionAndEncodeAccessToken(TransportVersions.V_7_3_2, shorterAccessToken) + tokenService.prependVersionAndEncodeAccessToken(TransportVersions.MINIMUM_COMPATIBLE, shorterAccessToken) ); assertThat(invalidateResponse.invalidated(), equalTo(0)); assertThat(invalidateResponse.previouslyInvalidated(), equalTo(0)); @@ -394,8 +394,8 @@ public void testInvalidateNotValidAccessTokens() throws Exception { invalidateResponse = invalidateAccessToken( tokenService.prependVersionAndEncodeAccessToken( - TransportVersions.V_7_3_2, - tokenService.getRandomTokenBytes(TransportVersions.V_7_3_2, randomBoolean()).v1() + TransportVersions.MINIMUM_COMPATIBLE, + tokenService.getRandomTokenBytes(TransportVersions.MINIMUM_COMPATIBLE, randomBoolean()).v1() ) ); assertThat(invalidateResponse.invalidated(), equalTo(0)); @@ -420,8 +420,8 @@ public void testInvalidateNotValidRefreshTokens() throws Exception { ResponseException.class, () -> invalidateRefreshToken( TokenService.prependVersionAndEncodeRefreshToken( - TransportVersions.V_7_3_2, - tokenService.getRandomTokenBytes(TransportVersions.V_7_3_2, true).v2() + TransportVersions.MINIMUM_COMPATIBLE, + tokenService.getRandomTokenBytes(TransportVersions.MINIMUM_COMPATIBLE, true).v2() ) ) ); @@ -441,7 +441,7 @@ public void testInvalidateNotValidRefreshTokens() throws Exception { byte[] longerRefreshToken = new byte[randomIntBetween(17, 24)]; random().nextBytes(longerRefreshToken); invalidateResponse = invalidateRefreshToken( - TokenService.prependVersionAndEncodeRefreshToken(TransportVersions.V_7_3_2, longerRefreshToken) + TokenService.prependVersionAndEncodeRefreshToken(TransportVersions.MINIMUM_COMPATIBLE, longerRefreshToken) ); assertThat(invalidateResponse.invalidated(), equalTo(0)); assertThat(invalidateResponse.previouslyInvalidated(), equalTo(0)); @@ -459,7 +459,7 @@ public void testInvalidateNotValidRefreshTokens() throws Exception { byte[] shorterRefreshToken = new byte[randomIntBetween(12, 15)]; random().nextBytes(shorterRefreshToken); invalidateResponse = invalidateRefreshToken( - TokenService.prependVersionAndEncodeRefreshToken(TransportVersions.V_7_3_2, shorterRefreshToken) + TokenService.prependVersionAndEncodeRefreshToken(TransportVersions.MINIMUM_COMPATIBLE, shorterRefreshToken) ); assertThat(invalidateResponse.invalidated(), equalTo(0)); assertThat(invalidateResponse.previouslyInvalidated(), equalTo(0)); @@ -488,8 +488,8 @@ public void testInvalidateNotValidRefreshTokens() throws Exception { invalidateResponse = invalidateRefreshToken( TokenService.prependVersionAndEncodeRefreshToken( - TransportVersions.V_7_3_2, - tokenService.getRandomTokenBytes(TransportVersions.V_7_3_2, true).v2() + TransportVersions.MINIMUM_COMPATIBLE, + tokenService.getRandomTokenBytes(TransportVersions.MINIMUM_COMPATIBLE, true).v2() ) ); assertThat(invalidateResponse.invalidated(), equalTo(0)); @@ -758,18 +758,11 @@ public void testAuthenticateWithWrongToken() throws Exception { assertAuthenticateWithToken(response.accessToken(), TEST_USER_NAME); // Now attempt to authenticate with an invalid access token string assertUnauthorizedToken(randomAlphaOfLengthBetween(0, 128)); - // Now attempt to authenticate with an invalid access token with valid structure (pre 7.2) + // Now attempt to authenticate with an invalid access token with valid structure (after 8.0 pre 8.10) assertUnauthorizedToken( tokenService.prependVersionAndEncodeAccessToken( - TransportVersions.V_7_1_0, - tokenService.getRandomTokenBytes(TransportVersions.V_7_1_0, randomBoolean()).v1() - ) - ); - // Now attempt to authenticate with an invalid access token with valid structure (after 7.2 pre 8.10) - assertUnauthorizedToken( - tokenService.prependVersionAndEncodeAccessToken( - TransportVersions.V_7_4_0, - tokenService.getRandomTokenBytes(TransportVersions.V_7_4_0, randomBoolean()).v1() + TransportVersions.V_8_0_0, + tokenService.getRandomTokenBytes(TransportVersions.V_8_0_0, randomBoolean()).v1() ) ); // Now attempt to authenticate with an invalid access token with valid structure (current version) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java index 4f7ba7808b82..900436a1fd87 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java @@ -48,9 +48,7 @@ import org.elasticsearch.common.cache.CacheBuilder; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.InputStreamStreamInput; -import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -59,7 +57,6 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.Streams; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; @@ -93,10 +90,8 @@ import org.elasticsearch.xpack.security.support.SecurityIndexManager; import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; import java.io.Closeable; import java.io.IOException; -import java.io.OutputStream; import java.io.UncheckedIOException; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; @@ -132,7 +127,6 @@ import javax.crypto.Cipher; import javax.crypto.CipherInputStream; -import javax.crypto.CipherOutputStream; import javax.crypto.NoSuchPaddingException; import javax.crypto.SecretKey; import javax.crypto.SecretKeyFactory; @@ -201,14 +195,8 @@ public class TokenService { // UUIDs are 16 bytes encoded base64 without padding, therefore the length is (16 / 3) * 4 + ((16 % 3) * 8 + 5) / 6 chars private static final int TOKEN_LENGTH = 22; private static final String TOKEN_DOC_ID_PREFIX = TOKEN_DOC_TYPE + "_"; - static final int LEGACY_MINIMUM_BYTES = VERSION_BYTES + SALT_BYTES + IV_BYTES + 1; static final int MINIMUM_BYTES = VERSION_BYTES + TOKEN_LENGTH + 1; - static final int LEGACY_MINIMUM_BASE64_BYTES = Double.valueOf(Math.ceil((4 * LEGACY_MINIMUM_BYTES) / 3)).intValue(); public static final int MINIMUM_BASE64_BYTES = Double.valueOf(Math.ceil((4 * MINIMUM_BYTES) / 3)).intValue(); - static final TransportVersion VERSION_HASHED_TOKENS = TransportVersions.V_7_2_0; - static final TransportVersion VERSION_TOKENS_INDEX_INTRODUCED = TransportVersions.V_7_2_0; - static final TransportVersion VERSION_ACCESS_TOKENS_AS_UUIDS = TransportVersions.V_7_2_0; - static final TransportVersion VERSION_MULTIPLE_CONCURRENT_REFRESHES = TransportVersions.V_7_2_0; static final TransportVersion VERSION_CLIENT_AUTH_FOR_REFRESH = TransportVersions.V_8_2_0; static final TransportVersion VERSION_GET_TOKEN_DOC_FOR_REFRESH = TransportVersions.V_8_10_X; @@ -273,8 +261,7 @@ public TokenService( /** * Creates an access token and optionally a refresh token as well, based on the provided authentication and metadata with - * auto-generated values. The created tokens are stored in the security index for versions up to - * {@link #VERSION_TOKENS_INDEX_INTRODUCED} and to a specific security tokens index for later versions. + * auto-generated values. The created tokens are stored a specific security tokens index. */ public void createOAuth2Tokens( Authentication authentication, @@ -291,8 +278,7 @@ public void createOAuth2Tokens( /** * Creates an access token and optionally a refresh token as well from predefined values, based on the provided authentication and - * metadata. The created tokens are stored in the security index for versions up to {@link #VERSION_TOKENS_INDEX_INTRODUCED} and to a - * specific security tokens index for later versions. + * metadata. The created tokens are stored in a specific security tokens index. */ // public for testing public void createOAuth2Tokens( @@ -314,21 +300,15 @@ public void createOAuth2Tokens( * * @param accessTokenBytes The predefined seed value for the access token. This will then be *
    - *
  • Encrypted before stored for versions before {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • - *
  • Hashed before stored for versions after {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • - *
  • Stored in the security index for versions up to {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • - *
  • Stored in a specific security tokens index for versions after - * {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Hashed before stored
  • + *
  • Stored in a specific security tokens index
  • *
  • Prepended with a version ID and Base64 encoded before returned to the caller of the APIs
  • *
* @param refreshTokenBytes The predefined seed value for the access token. This will then be *
    - *
  • Hashed before stored for versions after {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • - *
  • Stored in the security index for versions up to {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • - *
  • Stored in a specific security tokens index for versions after - * {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • - *
  • Prepended with a version ID and encoded with Base64 before returned to the caller of the APIs - * for versions after {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Hashed before stored
  • + *
  • Stored in a specific security tokens index
  • + *
  • Prepended with a version ID and Base64 encoded before returned to the caller of the APIs
  • *
* @param tokenVersion The version of the nodes with which these tokens will be compatible. * @param authentication The authentication object representing the user for which the tokens are created @@ -384,7 +364,7 @@ private void createOAuth2Tokens( } else { refreshTokenToStore = refreshTokenToReturn = null; } - } else if (tokenVersion.onOrAfter(VERSION_HASHED_TOKENS)) { + } else { assert accessTokenBytes.length == RAW_TOKEN_BYTES_LENGTH; userTokenId = hashTokenString(Strings.BASE_64_NO_PADDING_URL_ENCODER.encodeToString(accessTokenBytes)); accessTokenToStore = null; @@ -395,18 +375,6 @@ private void createOAuth2Tokens( } else { refreshTokenToStore = refreshTokenToReturn = null; } - } else { - assert accessTokenBytes.length == RAW_TOKEN_BYTES_LENGTH; - userTokenId = Strings.BASE_64_NO_PADDING_URL_ENCODER.encodeToString(accessTokenBytes); - accessTokenToStore = null; - if (refreshTokenBytes != null) { - assert refreshTokenBytes.length == RAW_TOKEN_BYTES_LENGTH; - refreshTokenToStore = refreshTokenToReturn = Strings.BASE_64_NO_PADDING_URL_ENCODER.encodeToString( - refreshTokenBytes - ); - } else { - refreshTokenToStore = refreshTokenToReturn = null; - } } UserToken userToken = new UserToken(userTokenId, tokenVersion, tokenAuth, getExpirationTime(), metadata); tokenDocument = createTokenDocument(userToken, accessTokenToStore, refreshTokenToStore, originatingClientAuth); @@ -419,23 +387,22 @@ private void createOAuth2Tokens( final RefreshPolicy tokenCreationRefreshPolicy = tokenVersion.onOrAfter(VERSION_GET_TOKEN_DOC_FOR_REFRESH) ? RefreshPolicy.NONE : RefreshPolicy.WAIT_UNTIL; - final SecurityIndexManager tokensIndex = getTokensIndexForVersion(tokenVersion); logger.debug( () -> format( "Using refresh policy [%s] when creating token doc [%s] in the security index [%s]", tokenCreationRefreshPolicy, documentId, - tokensIndex.aliasName() + securityTokensIndex.aliasName() ) ); - final IndexRequest indexTokenRequest = client.prepareIndex(tokensIndex.aliasName()) + final IndexRequest indexTokenRequest = client.prepareIndex(securityTokensIndex.aliasName()) .setId(documentId) .setOpType(OpType.CREATE) .setSource(tokenDocument, XContentType.JSON) .setRefreshPolicy(tokenCreationRefreshPolicy) .request(); - tokensIndex.prepareIndexIfNeededThenExecute( - ex -> listener.onFailure(traceLog("prepare tokens index [" + tokensIndex.aliasName() + "]", documentId, ex)), + securityTokensIndex.prepareIndexIfNeededThenExecute( + ex -> listener.onFailure(traceLog("prepare tokens index [" + securityTokensIndex.aliasName() + "]", documentId, ex)), () -> executeAsyncWithOrigin( client, SECURITY_ORIGIN, @@ -554,17 +521,16 @@ private void getTokenDocById( @Nullable String storedRefreshToken, ActionListener listener ) { - final SecurityIndexManager tokensIndex = getTokensIndexForVersion(tokenVersion); - final SecurityIndexManager frozenTokensIndex = tokensIndex.defensiveCopy(); + final SecurityIndexManager frozenTokensIndex = securityTokensIndex.defensiveCopy(); if (frozenTokensIndex.isAvailable(PRIMARY_SHARDS) == false) { - logger.warn("failed to get access token [{}] because index [{}] is not available", tokenId, tokensIndex.aliasName()); + logger.warn("failed to get access token [{}] because index [{}] is not available", tokenId, securityTokensIndex.aliasName()); listener.onFailure(frozenTokensIndex.getUnavailableReason(PRIMARY_SHARDS)); return; } - final GetRequest getRequest = client.prepareGet(tokensIndex.aliasName(), getTokenDocumentId(tokenId)).request(); + final GetRequest getRequest = client.prepareGet(securityTokensIndex.aliasName(), getTokenDocumentId(tokenId)).request(); final Consumer onFailure = ex -> listener.onFailure(traceLog("get token from id", tokenId, ex)); - tokensIndex.checkIndexVersionThenExecute( - ex -> listener.onFailure(traceLog("prepare tokens index [" + tokensIndex.aliasName() + "]", tokenId, ex)), + securityTokensIndex.checkIndexVersionThenExecute( + ex -> listener.onFailure(traceLog("prepare tokens index [" + securityTokensIndex.aliasName() + "]", tokenId, ex)), () -> executeAsyncWithOrigin( client.threadPool().getThreadContext(), SECURITY_ORIGIN, @@ -610,7 +576,11 @@ private void getTokenDocById( // if the index or the shard is not there / available we assume that // the token is not valid if (isShardNotAvailableException(e)) { - logger.warn("failed to get token doc [{}] because index [{}] is not available", tokenId, tokensIndex.aliasName()); + logger.warn( + "failed to get token doc [{}] because index [{}] is not available", + tokenId, + securityTokensIndex.aliasName() + ); } else { logger.error(() -> "failed to get token doc [" + tokenId + "]", e); } @@ -650,7 +620,7 @@ void decodeToken(String token, boolean validateUserToken, ActionListener VERSION_ACCESS_TOKENS_UUIDS cluster if (in.available() < MINIMUM_BYTES) { logger.debug("invalid token, smaller than [{}] bytes", MINIMUM_BYTES); @@ -660,41 +630,6 @@ void decodeToken(String token, boolean validateUserToken, ActionListener { - if (decodeKey != null) { - try { - final Cipher cipher = getDecryptionCipher(iv, decodeKey, version, decodedSalt); - final String tokenId = decryptTokenId(encryptedTokenId, cipher, version); - getAndValidateUserToken(tokenId, version, null, validateUserToken, listener); - } catch (IOException | GeneralSecurityException e) { - // could happen with a token that is not ours - logger.warn("invalid token", e); - listener.onResponse(null); - } - } else { - // could happen with a token that is not ours - listener.onResponse(null); - } - }, listener::onFailure)); - } else { - logger.debug(() -> format("invalid key %s key: %s", passphraseHash, keyCache.cache.keySet())); - listener.onResponse(null); - } } } catch (Exception e) { // could happen with a token that is not ours @@ -852,11 +787,7 @@ private void indexInvalidation( final Set idsOfOlderTokens = new HashSet<>(); boolean anyOlderTokensBeforeRefreshViaGet = false; for (UserToken userToken : userTokens) { - if (userToken.getTransportVersion().onOrAfter(VERSION_TOKENS_INDEX_INTRODUCED)) { - idsOfRecentTokens.add(userToken.getId()); - } else { - idsOfOlderTokens.add(userToken.getId()); - } + idsOfRecentTokens.add(userToken.getId()); anyOlderTokensBeforeRefreshViaGet |= userToken.getTransportVersion().before(VERSION_GET_TOKEN_DOC_FOR_REFRESH); } final RefreshPolicy tokensInvalidationRefreshPolicy = anyOlderTokensBeforeRefreshViaGet @@ -1124,7 +1055,7 @@ private void findTokenFromRefreshToken(String refreshToken, Iterator ); getTokenDocById(userTokenId, version, null, storedRefreshToken, listener); } - } else if (version.onOrAfter(VERSION_HASHED_TOKENS)) { + } else { final String unencodedRefreshToken = in.readString(); if (unencodedRefreshToken.length() != TOKEN_LENGTH) { logger.debug("Decoded refresh token [{}] with version [{}] is invalid.", unencodedRefreshToken, version); @@ -1133,9 +1064,6 @@ private void findTokenFromRefreshToken(String refreshToken, Iterator final String hashedRefreshToken = hashTokenString(unencodedRefreshToken); findTokenFromRefreshToken(hashedRefreshToken, securityTokensIndex, backoff, listener); } - } else { - logger.debug("Unrecognized refresh token version [{}].", version); - listener.onResponse(null); } } catch (IOException e) { logger.debug(() -> "Could not decode refresh token [" + refreshToken + "].", e); @@ -1250,7 +1178,6 @@ private void innerRefresh( return; } final RefreshTokenStatus refreshTokenStatus = checkRefreshResult.v1(); - final SecurityIndexManager refreshedTokenIndex = getTokensIndexForVersion(refreshTokenStatus.getTransportVersion()); if (refreshTokenStatus.isRefreshed()) { logger.debug( "Token document [{}] was recently refreshed, when a new token document was generated. Reusing that result.", @@ -1258,31 +1185,29 @@ private void innerRefresh( ); final Tuple parsedTokens = parseTokensFromDocument(tokenDoc.sourceAsMap(), null); Authentication authentication = parsedTokens.v1().getAuthentication(); - decryptAndReturnSupersedingTokens(refreshToken, refreshTokenStatus, refreshedTokenIndex, authentication, listener); + decryptAndReturnSupersedingTokens(refreshToken, refreshTokenStatus, securityTokensIndex, authentication, listener); } else { final TransportVersion newTokenVersion = getTokenVersionCompatibility(); final Tuple newTokenBytes = getRandomTokenBytes(newTokenVersion, true); final Map updateMap = new HashMap<>(); updateMap.put("refreshed", true); - if (newTokenVersion.onOrAfter(VERSION_MULTIPLE_CONCURRENT_REFRESHES)) { - updateMap.put("refresh_time", clock.instant().toEpochMilli()); - try { - final byte[] iv = getRandomBytes(IV_BYTES); - final byte[] salt = getRandomBytes(SALT_BYTES); - String encryptedAccessAndRefreshToken = encryptSupersedingTokens( - newTokenBytes.v1(), - newTokenBytes.v2(), - refreshToken, - iv, - salt - ); - updateMap.put("superseding.encrypted_tokens", encryptedAccessAndRefreshToken); - updateMap.put("superseding.encryption_iv", Base64.getEncoder().encodeToString(iv)); - updateMap.put("superseding.encryption_salt", Base64.getEncoder().encodeToString(salt)); - } catch (GeneralSecurityException e) { - logger.warn("could not encrypt access token and refresh token string", e); - onFailure.accept(invalidGrantException("could not refresh the requested token")); - } + updateMap.put("refresh_time", clock.instant().toEpochMilli()); + try { + final byte[] iv = getRandomBytes(IV_BYTES); + final byte[] salt = getRandomBytes(SALT_BYTES); + String encryptedAccessAndRefreshToken = encryptSupersedingTokens( + newTokenBytes.v1(), + newTokenBytes.v2(), + refreshToken, + iv, + salt + ); + updateMap.put("superseding.encrypted_tokens", encryptedAccessAndRefreshToken); + updateMap.put("superseding.encryption_iv", Base64.getEncoder().encodeToString(iv)); + updateMap.put("superseding.encryption_salt", Base64.getEncoder().encodeToString(salt)); + } catch (GeneralSecurityException e) { + logger.warn("could not encrypt access token and refresh token string", e); + onFailure.accept(invalidGrantException("could not refresh the requested token")); } assert tokenDoc.seqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO : "expected an assigned sequence number"; assert tokenDoc.primaryTerm() != SequenceNumbers.UNASSIGNED_PRIMARY_TERM : "expected an assigned primary term"; @@ -1293,17 +1218,17 @@ private void innerRefresh( "Using refresh policy [%s] when updating token doc [%s] for refresh in the security index [%s]", tokenRefreshUpdateRefreshPolicy, tokenDoc.id(), - refreshedTokenIndex.aliasName() + securityTokensIndex.aliasName() ) ); - final UpdateRequestBuilder updateRequest = client.prepareUpdate(refreshedTokenIndex.aliasName(), tokenDoc.id()) + final UpdateRequestBuilder updateRequest = client.prepareUpdate(securityTokensIndex.aliasName(), tokenDoc.id()) .setDoc("refresh_token", updateMap) .setFetchSource(logger.isDebugEnabled()) .setRefreshPolicy(tokenRefreshUpdateRefreshPolicy) .setIfSeqNo(tokenDoc.seqNo()) .setIfPrimaryTerm(tokenDoc.primaryTerm()); - refreshedTokenIndex.prepareIndexIfNeededThenExecute( - ex -> listener.onFailure(traceLog("prepare index [" + refreshedTokenIndex.aliasName() + "]", ex)), + securityTokensIndex.prepareIndexIfNeededThenExecute( + ex -> listener.onFailure(traceLog("prepare index [" + securityTokensIndex.aliasName() + "]", ex)), () -> executeAsyncWithOrigin( client.threadPool().getThreadContext(), SECURITY_ORIGIN, @@ -1349,7 +1274,7 @@ private void innerRefresh( if (cause instanceof VersionConflictEngineException) { // The document has been updated by another thread, get it again. logger.debug("version conflict while updating document [{}], attempting to get it again", tokenDoc.id()); - getTokenDocAsync(tokenDoc.id(), refreshedTokenIndex, true, new ActionListener<>() { + getTokenDocAsync(tokenDoc.id(), securityTokensIndex, true, new ActionListener<>() { @Override public void onResponse(GetResponse response) { if (response.isExists()) { @@ -1368,7 +1293,7 @@ public void onFailure(Exception e) { logger.info("could not get token document [{}] for refresh, retrying", tokenDoc.id()); client.threadPool() .schedule( - () -> getTokenDocAsync(tokenDoc.id(), refreshedTokenIndex, true, this), + () -> getTokenDocAsync(tokenDoc.id(), securityTokensIndex, true, this), backoff.next(), client.threadPool().generic() ); @@ -1689,17 +1614,13 @@ private static Optional checkMultipleRefreshes( RefreshTokenStatus refreshTokenStatus ) { if (refreshTokenStatus.isRefreshed()) { - if (refreshTokenStatus.getTransportVersion().onOrAfter(VERSION_MULTIPLE_CONCURRENT_REFRESHES)) { - if (refreshRequested.isAfter(refreshTokenStatus.getRefreshInstant().plus(30L, ChronoUnit.SECONDS))) { - return Optional.of(invalidGrantException("token has already been refreshed more than 30 seconds in the past")); - } - if (refreshRequested.isBefore(refreshTokenStatus.getRefreshInstant().minus(30L, ChronoUnit.SECONDS))) { - return Optional.of( - invalidGrantException("token has been refreshed more than 30 seconds in the future, clock skew too great") - ); - } - } else { - return Optional.of(invalidGrantException("token has already been refreshed")); + if (refreshRequested.isAfter(refreshTokenStatus.getRefreshInstant().plus(30L, ChronoUnit.SECONDS))) { + return Optional.of(invalidGrantException("token has already been refreshed more than 30 seconds in the past")); + } + if (refreshRequested.isBefore(refreshTokenStatus.getRefreshInstant().minus(30L, ChronoUnit.SECONDS))) { + return Optional.of( + invalidGrantException("token has been refreshed more than 30 seconds in the future, clock skew too great") + ); } } return Optional.empty(); @@ -1979,21 +1900,6 @@ private void ensureEnabled() { } } - /** - * In version {@code #VERSION_TOKENS_INDEX_INTRODUCED} security tokens were moved into a separate index, away from the other entities in - * the main security index, due to their ephemeral nature. They moved "seamlessly" - without manual user intervention. In this way, new - * tokens are created in the new index, while the existing ones were left in place - to be accessed from the old index - and due to be - * removed automatically by the {@code ExpiredTokenRemover} periodic job. Therefore, in general, when searching for a token we need to - * consider both the new and the old indices. - */ - private SecurityIndexManager getTokensIndexForVersion(TransportVersion version) { - if (version.onOrAfter(VERSION_TOKENS_INDEX_INTRODUCED)) { - return securityTokensIndex; - } else { - return securityMainIndex; - } - } - public TimeValue getExpirationDelay() { return expirationDelay; } @@ -2022,41 +1928,13 @@ public String prependVersionAndEncodeAccessToken(TransportVersion version, byte[ out.writeByteArray(accessTokenBytes); return Base64.getEncoder().encodeToString(out.bytes().toBytesRef().bytes); } - } else if (version.onOrAfter(VERSION_ACCESS_TOKENS_AS_UUIDS)) { + } else { try (BytesStreamOutput out = new BytesStreamOutput(MINIMUM_BASE64_BYTES)) { out.setTransportVersion(version); TransportVersion.writeVersion(version, out); out.writeString(Strings.BASE_64_NO_PADDING_URL_ENCODER.encodeToString(accessTokenBytes)); return Base64.getEncoder().encodeToString(out.bytes().toBytesRef().bytes); } - } else { - // we know that the minimum length is larger than the default of the ByteArrayOutputStream so set the size to this explicitly - try ( - ByteArrayOutputStream os = new ByteArrayOutputStream(LEGACY_MINIMUM_BASE64_BYTES); - OutputStream base64 = Base64.getEncoder().wrap(os); - StreamOutput out = new OutputStreamStreamOutput(base64) - ) { - out.setTransportVersion(version); - KeyAndCache keyAndCache = keyCache.activeKeyCache; - TransportVersion.writeVersion(version, out); - out.writeByteArray(keyAndCache.getSalt().bytes); - out.writeByteArray(keyAndCache.getKeyHash().bytes); - final byte[] initializationVector = getRandomBytes(IV_BYTES); - out.writeByteArray(initializationVector); - try ( - CipherOutputStream encryptedOutput = new CipherOutputStream( - out, - getEncryptionCipher(initializationVector, keyAndCache, version) - ); - StreamOutput encryptedStreamOutput = new OutputStreamStreamOutput(encryptedOutput) - ) { - encryptedStreamOutput.setTransportVersion(version); - encryptedStreamOutput.writeString(Strings.BASE_64_NO_PADDING_URL_ENCODER.encodeToString(accessTokenBytes)); - // StreamOutput needs to be closed explicitly because it wraps CipherOutputStream - encryptedStreamOutput.close(); - return new String(os.toByteArray(), StandardCharsets.UTF_8); - } - } } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java index 75c2507a1dc5..702af7514109 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -126,7 +126,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; @@ -148,7 +147,6 @@ public class TokenServiceTests extends ESTestCase { private SecurityIndexManager securityMainIndex; private SecurityIndexManager securityTokensIndex; private ClusterService clusterService; - private DiscoveryNode pre72OldNode; private DiscoveryNode pre8500040OldNode; private Settings tokenServiceEnabledSettings = Settings.builder() .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true) @@ -228,31 +226,12 @@ public void setupClient() { licenseState = mock(MockLicenseState.class); when(licenseState.isAllowed(Security.TOKEN_SERVICE_FEATURE)).thenReturn(true); - if (randomBoolean()) { - // version 7.2 was an "inflection" point in the Token Service development (access_tokens as UUIDS, multiple concurrent - // refreshes, - // tokens docs on a separate index) - pre72OldNode = addAnother7071DataNode(this.clusterService); - } if (randomBoolean()) { // before refresh tokens used GET, i.e. TokenService#VERSION_GET_TOKEN_DOC_FOR_REFRESH pre8500040OldNode = addAnotherPre8500DataNode(this.clusterService); } } - private static DiscoveryNode addAnother7071DataNode(ClusterService clusterService) { - Version version; - TransportVersion transportVersion; - if (randomBoolean()) { - version = Version.V_7_0_0; - transportVersion = TransportVersions.V_7_0_0; - } else { - version = Version.V_7_1_0; - transportVersion = TransportVersions.V_7_1_0; - } - return addAnotherDataNodeWithVersion(clusterService, version, transportVersion); - } - private static DiscoveryNode addAnotherPre8500DataNode(ClusterService clusterService) { Version version; TransportVersion transportVersion; @@ -301,53 +280,6 @@ public static void shutdownThreadpool() { threadPool = null; } - public void testAttachAndGetToken() throws Exception { - TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); - // This test only makes sense in mixed clusters with pre v7.2.0 nodes where the Token Service Key is used (to encrypt tokens) - if (null == pre72OldNode) { - pre72OldNode = addAnother7071DataNode(this.clusterService); - } - Authentication authentication = AuthenticationTestHelper.builder() - .user(new User("joe", "admin")) - .realmRef(new RealmRef("native_realm", "native", "node1")) - .build(false); - PlainActionFuture tokenFuture = new PlainActionFuture<>(); - Tuple newTokenBytes = tokenService.getRandomTokenBytes(randomBoolean()); - tokenService.createOAuth2Tokens( - newTokenBytes.v1(), - newTokenBytes.v2(), - authentication, - authentication, - Collections.emptyMap(), - tokenFuture - ); - final String accessToken = tokenFuture.get().getAccessToken(); - assertNotNull(accessToken); - mockGetTokenFromAccessTokenBytes(tokenService, newTokenBytes.v1(), authentication, false, null); - - ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - requestContext.putHeader("Authorization", randomFrom("Bearer ", "BEARER ", "bearer ") + accessToken); - - try (ThreadContext.StoredContext ignore = requestContext.newStoredContextPreservingResponseHeaders()) { - PlainActionFuture future = new PlainActionFuture<>(); - final SecureString bearerToken = Authenticator.extractBearerTokenFromHeader(requestContext); - tokenService.tryAuthenticateToken(bearerToken, future); - UserToken serialized = future.get(); - assertAuthentication(authentication, serialized.getAuthentication()); - } - - try (ThreadContext.StoredContext ignore = requestContext.newStoredContextPreservingResponseHeaders()) { - // verify a second separate token service with its own salt can also verify - TokenService anotherService = createTokenService(tokenServiceEnabledSettings, systemUTC()); - anotherService.refreshMetadata(tokenService.getTokenMetadata()); - PlainActionFuture future = new PlainActionFuture<>(); - final SecureString bearerToken = Authenticator.extractBearerTokenFromHeader(requestContext); - anotherService.tryAuthenticateToken(bearerToken, future); - UserToken fromOtherService = future.get(); - assertAuthentication(authentication, fromOtherService.getAuthentication()); - } - } - public void testInvalidAuthorizationHeader() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); @@ -364,89 +296,6 @@ public void testInvalidAuthorizationHeader() throws Exception { } } - public void testPassphraseWorks() throws Exception { - TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); - // This test only makes sense in mixed clusters with pre v7.1.0 nodes where the Key is actually used - if (null == pre72OldNode) { - pre72OldNode = addAnother7071DataNode(this.clusterService); - } - Authentication authentication = AuthenticationTestHelper.builder() - .user(new User("joe", "admin")) - .realmRef(new RealmRef("native_realm", "native", "node1")) - .build(false); - PlainActionFuture tokenFuture = new PlainActionFuture<>(); - Tuple newTokenBytes = tokenService.getRandomTokenBytes(randomBoolean()); - tokenService.createOAuth2Tokens( - newTokenBytes.v1(), - newTokenBytes.v2(), - authentication, - authentication, - Collections.emptyMap(), - tokenFuture - ); - final String accessToken = tokenFuture.get().getAccessToken(); - assertNotNull(accessToken); - mockGetTokenFromAccessTokenBytes(tokenService, newTokenBytes.v1(), authentication, false, null); - - ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, accessToken); - - try (ThreadContext.StoredContext ignore = requestContext.newStoredContextPreservingResponseHeaders()) { - PlainActionFuture future = new PlainActionFuture<>(); - final SecureString bearerToken = Authenticator.extractBearerTokenFromHeader(requestContext); - tokenService.tryAuthenticateToken(bearerToken, future); - UserToken serialized = future.get(); - assertAuthentication(authentication, serialized.getAuthentication()); - } - - try (ThreadContext.StoredContext ignore = requestContext.newStoredContextPreservingResponseHeaders()) { - // verify a second separate token service with its own passphrase cannot verify - TokenService anotherService = createTokenService(tokenServiceEnabledSettings, systemUTC()); - PlainActionFuture future = new PlainActionFuture<>(); - final SecureString bearerToken = Authenticator.extractBearerTokenFromHeader(requestContext); - anotherService.tryAuthenticateToken(bearerToken, future); - assertNull(future.get()); - } - } - - public void testGetTokenWhenKeyCacheHasExpired() throws Exception { - TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); - // This test only makes sense in mixed clusters with pre v7.1.0 nodes where the Key is actually used - if (null == pre72OldNode) { - pre72OldNode = addAnother7071DataNode(this.clusterService); - } - Authentication authentication = AuthenticationTestHelper.builder() - .user(new User("joe", "admin")) - .realmRef(new RealmRef("native_realm", "native", "node1")) - .build(false); - - PlainActionFuture tokenFuture = new PlainActionFuture<>(); - Tuple newTokenBytes = tokenService.getRandomTokenBytes(randomBoolean()); - tokenService.createOAuth2Tokens( - newTokenBytes.v1(), - newTokenBytes.v2(), - authentication, - authentication, - Collections.emptyMap(), - tokenFuture - ); - String accessToken = tokenFuture.get().getAccessToken(); - assertThat(accessToken, notNullValue()); - - tokenService.clearActiveKeyCache(); - - tokenService.createOAuth2Tokens( - newTokenBytes.v1(), - newTokenBytes.v2(), - authentication, - authentication, - Collections.emptyMap(), - tokenFuture - ); - accessToken = tokenFuture.get().getAccessToken(); - assertThat(accessToken, notNullValue()); - } - public void testAuthnWithInvalidatedToken() throws Exception { when(securityMainIndex.indexExists()).thenReturn(true); TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); @@ -820,57 +669,6 @@ public void testMalformedRefreshTokens() throws Exception { } } - public void testNonExistingPre72Token() throws Exception { - TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); - // mock another random token so that we don't find a token in TokenService#getUserTokenFromId - Authentication authentication = AuthenticationTestHelper.builder() - .user(new User("joe", "admin")) - .realmRef(new RealmRef("native_realm", "native", "node1")) - .build(false); - mockGetTokenFromAccessTokenBytes(tokenService, tokenService.getRandomTokenBytes(randomBoolean()).v1(), authentication, false, null); - ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader( - requestContext, - tokenService.prependVersionAndEncodeAccessToken( - TransportVersions.V_7_1_0, - tokenService.getRandomTokenBytes(TransportVersions.V_7_1_0, randomBoolean()).v1() - ) - ); - - try (ThreadContext.StoredContext ignore = requestContext.newStoredContextPreservingResponseHeaders()) { - PlainActionFuture future = new PlainActionFuture<>(); - final SecureString bearerToken = Authenticator.extractBearerTokenFromHeader(requestContext); - tokenService.tryAuthenticateToken(bearerToken, future); - assertNull(future.get()); - } - } - - public void testNonExistingUUIDToken() throws Exception { - TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); - // mock another random token so that we don't find a token in TokenService#getUserTokenFromId - Authentication authentication = AuthenticationTestHelper.builder() - .user(new User("joe", "admin")) - .realmRef(new RealmRef("native_realm", "native", "node1")) - .build(false); - mockGetTokenFromAccessTokenBytes(tokenService, tokenService.getRandomTokenBytes(randomBoolean()).v1(), authentication, false, null); - ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - TransportVersion uuidTokenVersion = randomFrom(TransportVersions.V_7_2_0, TransportVersions.V_7_3_2); - storeTokenHeader( - requestContext, - tokenService.prependVersionAndEncodeAccessToken( - uuidTokenVersion, - tokenService.getRandomTokenBytes(uuidTokenVersion, randomBoolean()).v1() - ) - ); - - try (ThreadContext.StoredContext ignore = requestContext.newStoredContextPreservingResponseHeaders()) { - PlainActionFuture future = new PlainActionFuture<>(); - final SecureString bearerToken = Authenticator.extractBearerTokenFromHeader(requestContext); - tokenService.tryAuthenticateToken(bearerToken, future); - assertNull(future.get()); - } - } - public void testNonExistingLatestTokenVersion() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); // mock another random token so that we don't find a token in TokenService#getUserTokenFromId @@ -925,18 +723,11 @@ public void testIndexNotAvailable() throws Exception { return Void.TYPE; }).when(client).get(any(GetRequest.class), anyActionListener()); - final SecurityIndexManager tokensIndex; - if (pre72OldNode != null) { - tokensIndex = securityMainIndex; - when(securityTokensIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(false); - when(securityTokensIndex.indexExists()).thenReturn(false); - when(securityTokensIndex.defensiveCopy()).thenReturn(securityTokensIndex); - } else { - tokensIndex = securityTokensIndex; - when(securityMainIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(false); - when(securityMainIndex.indexExists()).thenReturn(false); - when(securityMainIndex.defensiveCopy()).thenReturn(securityMainIndex); - } + final SecurityIndexManager tokensIndex = securityTokensIndex; + when(securityMainIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(false); + when(securityMainIndex.indexExists()).thenReturn(false); + when(securityMainIndex.defensiveCopy()).thenReturn(securityMainIndex); + try (ThreadContext.StoredContext ignore = requestContext.newStoredContextPreservingResponseHeaders()) { PlainActionFuture future = new PlainActionFuture<>(); final SecureString bearerToken3 = Authenticator.extractBearerTokenFromHeader(requestContext); @@ -988,7 +779,6 @@ public void testGetAuthenticationWorksWithExpiredUserToken() throws Exception { } public void testSupersedingTokenEncryption() throws Exception { - assumeTrue("Superseding tokens are only created in post 7.2 clusters", pre72OldNode == null); TokenService tokenService = createTokenService(tokenServiceEnabledSettings, Clock.systemUTC()); Authentication authentication = AuthenticationTests.randomAuthentication(null, null); PlainActionFuture tokenFuture = new PlainActionFuture<>(); @@ -1023,13 +813,11 @@ public void testSupersedingTokenEncryption() throws Exception { authentication, tokenFuture ); - if (version.onOrAfter(TokenService.VERSION_ACCESS_TOKENS_AS_UUIDS)) { - // previous versions serialized the access token encrypted and the cipher text was different each time (due to different IVs) - assertThat( - tokenService.prependVersionAndEncodeAccessToken(version, newTokenBytes.v1()), - equalTo(tokenFuture.get().getAccessToken()) - ); - } + + assertThat( + tokenService.prependVersionAndEncodeAccessToken(version, newTokenBytes.v1()), + equalTo(tokenFuture.get().getAccessToken()) + ); assertThat( TokenService.prependVersionAndEncodeRefreshToken(version, newTokenBytes.v2()), equalTo(tokenFuture.get().getRefreshToken()) @@ -1158,10 +946,8 @@ public static String tokenDocIdFromAccessTokenBytes(byte[] accessTokenBytes, Tra MessageDigest userTokenIdDigest = sha256(); userTokenIdDigest.update(accessTokenBytes, RAW_TOKEN_BYTES_LENGTH, RAW_TOKEN_DOC_ID_BYTES_LENGTH); return Base64.getUrlEncoder().withoutPadding().encodeToString(userTokenIdDigest.digest()); - } else if (tokenVersion.onOrAfter(TokenService.VERSION_ACCESS_TOKENS_AS_UUIDS)) { - return TokenService.hashTokenString(Base64.getUrlEncoder().withoutPadding().encodeToString(accessTokenBytes)); } else { - return Base64.getUrlEncoder().withoutPadding().encodeToString(accessTokenBytes); + return TokenService.hashTokenString(Base64.getUrlEncoder().withoutPadding().encodeToString(accessTokenBytes)); } } @@ -1178,12 +964,9 @@ private void mockTokenForRefreshToken( if (userToken.getTransportVersion().onOrAfter(VERSION_GET_TOKEN_DOC_FOR_REFRESH)) { storedAccessToken = Base64.getUrlEncoder().withoutPadding().encodeToString(sha256().digest(accessTokenBytes)); storedRefreshToken = Base64.getUrlEncoder().withoutPadding().encodeToString(sha256().digest(refreshTokenBytes)); - } else if (userToken.getTransportVersion().onOrAfter(TokenService.VERSION_HASHED_TOKENS)) { - storedAccessToken = null; - storedRefreshToken = TokenService.hashTokenString(Base64.getUrlEncoder().withoutPadding().encodeToString(refreshTokenBytes)); } else { storedAccessToken = null; - storedRefreshToken = Base64.getUrlEncoder().withoutPadding().encodeToString(refreshTokenBytes); + storedRefreshToken = TokenService.hashTokenString(Base64.getUrlEncoder().withoutPadding().encodeToString(refreshTokenBytes)); } final RealmRef realmRef = new RealmRef( refreshTokenStatus == null ? randomAlphaOfLength(6) : refreshTokenStatus.getAssociatedRealm(),