From ebcddf594b6720a72fb8f15c668838d7b7242237 Mon Sep 17 00:00:00 2001 From: Anton Rubin Date: Fri, 11 Oct 2024 11:45:41 +0100 Subject: [PATCH 1/3] add lowercase tokenizer docs Signed-off-by: Anton Rubin --- _analyzers/tokenizers/index.md | 2 +- _analyzers/tokenizers/lowercase.md | 93 ++++++++++++++++++++++++++++++ 2 files changed, 94 insertions(+), 1 deletion(-) create mode 100644 _analyzers/tokenizers/lowercase.md diff --git a/_analyzers/tokenizers/index.md b/_analyzers/tokenizers/index.md index d401851f60..1abc5ee7ff 100644 --- a/_analyzers/tokenizers/index.md +++ b/_analyzers/tokenizers/index.md @@ -2,7 +2,7 @@ layout: default title: Tokenizers nav_order: 60 -has_children: false +has_children: true has_toc: false --- diff --git a/_analyzers/tokenizers/lowercase.md b/_analyzers/tokenizers/lowercase.md new file mode 100644 index 0000000000..102d6b1322 --- /dev/null +++ b/_analyzers/tokenizers/lowercase.md @@ -0,0 +1,93 @@ +--- +layout: default +title: Lowercase +parent: Tokenizers +nav_order: 70 +--- + +# Lowercase tokenizer + +The `lowercase` tokenizer breaks text into terms wherever there is whitespace and then lowercases all the terms. Functionally this is identical to configuring `letter` tokenizer with `lowercase` token filter, however using `lowercase` tokenizer is more efficient, as these actions are performed in a single step. + +## Example usage + +The following example request creates a new index named `my-lowercase-index` and configures an analyzer with `lowercase` tokenizer: + +```json +PUT /my-lowercase-index +{ + "settings": { + "analysis": { + "tokenizer": { + "my_lowercase_tokenizer": { + "type": "lowercase" + } + }, + "analyzer": { + "my_lowercase_analyzer": { + "type": "custom", + "tokenizer": "my_lowercase_tokenizer" + } + } + } + } +} +``` +{% include copy-curl.html %} + +## Generated tokens + +Use the following request to examine the tokens generated using the created analyzer: + +```json +POST /my-lowercase-index/_analyze +{ + "analyzer": "my_lowercase_analyzer", + "text": "This is a Test. OpenSearch 123!" +} +``` +{% include copy-curl.html %} + +The response contains the generated tokens: + +```json +{ + "tokens": [ + { + "token": "this", + "start_offset": 0, + "end_offset": 4, + "type": "word", + "position": 0 + }, + { + "token": "is", + "start_offset": 5, + "end_offset": 7, + "type": "word", + "position": 1 + }, + { + "token": "a", + "start_offset": 8, + "end_offset": 9, + "type": "word", + "position": 2 + }, + { + "token": "test", + "start_offset": 10, + "end_offset": 14, + "type": "word", + "position": 3 + }, + { + "token": "opensearch", + "start_offset": 16, + "end_offset": 26, + "type": "word", + "position": 4 + } + ] +} +``` From 68e46ea39536b65ba02d75093f8cd3fdc07d1eda Mon Sep 17 00:00:00 2001 From: Fanit Kolchina Date: Thu, 5 Dec 2024 14:46:35 -0500 Subject: [PATCH 2/3] Doc review Signed-off-by: Fanit Kolchina --- _analyzers/tokenizers/lowercase.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/_analyzers/tokenizers/lowercase.md b/_analyzers/tokenizers/lowercase.md index 102d6b1322..4379e7b6d1 100644 --- a/_analyzers/tokenizers/lowercase.md +++ b/_analyzers/tokenizers/lowercase.md @@ -7,11 +7,11 @@ nav_order: 70 # Lowercase tokenizer -The `lowercase` tokenizer breaks text into terms wherever there is whitespace and then lowercases all the terms. Functionally this is identical to configuring `letter` tokenizer with `lowercase` token filter, however using `lowercase` tokenizer is more efficient, as these actions are performed in a single step. +The `lowercase` tokenizer breaks text into terms on white space and then lowercases all the terms. Functionally, this is identical to configuring a `letter` tokenizer with a `lowercase` token filter. However, using a `lowercase` tokenizer is more efficient because the tokenizer actions are performed in a single step. ## Example usage -The following example request creates a new index named `my-lowercase-index` and configures an analyzer with `lowercase` tokenizer: +The following example request creates a new index named `my-lowercase-index` and configures an analyzer with a `lowercase` tokenizer: ```json PUT /my-lowercase-index @@ -37,7 +37,7 @@ PUT /my-lowercase-index ## Generated tokens -Use the following request to examine the tokens generated using the created analyzer: +Use the following request to examine the tokens generated using the analyzer: ```json POST /my-lowercase-index/_analyze From 52a83a281798cd8e6d6fe07b57dfe4ec96fa8dc6 Mon Sep 17 00:00:00 2001 From: kolchfa-aws <105444904+kolchfa-aws@users.noreply.github.com> Date: Mon, 9 Dec 2024 13:19:46 -0500 Subject: [PATCH 3/3] Apply suggestions from code review Co-authored-by: Nathan Bower Signed-off-by: kolchfa-aws <105444904+kolchfa-aws@users.noreply.github.com> --- _analyzers/tokenizers/lowercase.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_analyzers/tokenizers/lowercase.md b/_analyzers/tokenizers/lowercase.md index 4379e7b6d1..5542ecbf50 100644 --- a/_analyzers/tokenizers/lowercase.md +++ b/_analyzers/tokenizers/lowercase.md @@ -7,7 +7,7 @@ nav_order: 70 # Lowercase tokenizer -The `lowercase` tokenizer breaks text into terms on white space and then lowercases all the terms. Functionally, this is identical to configuring a `letter` tokenizer with a `lowercase` token filter. However, using a `lowercase` tokenizer is more efficient because the tokenizer actions are performed in a single step. +The `lowercase` tokenizer breaks text into terms at white space and then lowercases all the terms. Functionally, this is identical to configuring a `letter` tokenizer with a `lowercase` token filter. However, using a `lowercase` tokenizer is more efficient because the tokenizer actions are performed in a single step. ## Example usage