From 42eb609f4b19f375ab8caf8cc637fa6819b406ce Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 16 Dec 2024 06:03:55 +0000 Subject: [PATCH] Auto-generated API code --- .../0722b302b2b3275a988d858044f99d5d.asciidoc | 10 + ...0c52af573c9401a2a687e86a4beb182b.asciidoc} | 2 +- .../2a67608dadbf220a2f040f3a79d3677d.asciidoc | 35 ++ .../3f9dcf2aa42f3ecfb5ebfe48c1774103.asciidoc | 18 + ...48e142e6c69014e0509d4c9251749d77.asciidoc} | 3 +- .../49b31e23f8b9667b6a7b2734d55fb6ed.asciidoc | 17 - .../5a70db31f587b7ffed5e9bc1445430cb.asciidoc | 22 - ...6fa02c2ad485bbe91f44b321158250f3.asciidoc} | 7 + .../730045fae3743c39b612813a42c330c3.asciidoc | 24 + .../7478ff69113fb53f41ea07cdf911fa67.asciidoc | 33 ++ .../7dd0d9cc6c5982a2c003d301e90feeba.asciidoc | 37 ++ ...80135e8c644e34cc70ce8a4e7915d1a2.asciidoc} | 2 +- .../8c639d3eef5c2de29e12bd9c6a42d3d4.asciidoc | 39 ++ .../8cad5d95a0e7c103f08be53d0b172558.asciidoc | 22 - ...93c77c65f1e11382f8043d0300e87b89.asciidoc} | 2 +- .../b590241c4296299b836fbb5a95bdd2dc.asciidoc | 18 + .../b6d278737d27973e498ac61cda9e5126.asciidoc | 21 + .../bdc55256fa5f701680631a149dbb75a9.asciidoc | 22 + .../bdd28276618235487ac96bd6679bc206.asciidoc | 31 ++ ...cecfaa659af6646b3b67d7b311586fa0.asciidoc} | 2 +- ...d5242b1ab0213f25e5e0742032274ce6.asciidoc} | 2 +- ...df81b88a2192dd6f9912e0c948a44487.asciidoc} | 2 +- ...e77c2f41a7eca765b0c5f734a66d919f.asciidoc} | 2 +- ...ea8c4229afa6dd4f1321355542be9912.asciidoc} | 2 +- docs/reference.asciidoc | 500 +++++++++++++++--- src/api/api/ccr.ts | 26 +- src/api/api/features.ts | 4 +- src/api/api/ilm.ts | 22 +- src/api/api/indices.ts | 40 +- src/api/api/license.ts | 14 +- src/api/api/ml.ts | 8 +- src/api/types.ts | 197 +++++-- 32 files changed, 941 insertions(+), 245 deletions(-) create mode 100644 docs/doc_examples/0722b302b2b3275a988d858044f99d5d.asciidoc rename docs/doc_examples/{160986f49758f4e8345d183a842f6351.asciidoc => 0c52af573c9401a2a687e86a4beb182b.asciidoc} (92%) create mode 100644 docs/doc_examples/2a67608dadbf220a2f040f3a79d3677d.asciidoc create mode 100644 docs/doc_examples/3f9dcf2aa42f3ecfb5ebfe48c1774103.asciidoc rename docs/doc_examples/{9f16fca9813304e398ee052aa857dbcd.asciidoc => 48e142e6c69014e0509d4c9251749d77.asciidoc} (85%) delete mode 100644 docs/doc_examples/49b31e23f8b9667b6a7b2734d55fb6ed.asciidoc delete mode 100644 docs/doc_examples/5a70db31f587b7ffed5e9bc1445430cb.asciidoc rename docs/doc_examples/{f9ee5d55a73f4c1fe7d507609047aefd.asciidoc => 6fa02c2ad485bbe91f44b321158250f3.asciidoc} (76%) create mode 100644 docs/doc_examples/730045fae3743c39b612813a42c330c3.asciidoc create mode 100644 docs/doc_examples/7478ff69113fb53f41ea07cdf911fa67.asciidoc create mode 100644 docs/doc_examples/7dd0d9cc6c5982a2c003d301e90feeba.asciidoc rename docs/doc_examples/{f4d0ef2e0f76babee83d999fe35127f2.asciidoc => 80135e8c644e34cc70ce8a4e7915d1a2.asciidoc} (96%) create mode 100644 docs/doc_examples/8c639d3eef5c2de29e12bd9c6a42d3d4.asciidoc delete mode 100644 docs/doc_examples/8cad5d95a0e7c103f08be53d0b172558.asciidoc rename docs/doc_examples/{8593715fcc70315a0816b435551258e0.asciidoc => 93c77c65f1e11382f8043d0300e87b89.asciidoc} (90%) create mode 100644 docs/doc_examples/b590241c4296299b836fbb5a95bdd2dc.asciidoc create mode 100644 docs/doc_examples/b6d278737d27973e498ac61cda9e5126.asciidoc create mode 100644 docs/doc_examples/bdc55256fa5f701680631a149dbb75a9.asciidoc create mode 100644 docs/doc_examples/bdd28276618235487ac96bd6679bc206.asciidoc rename docs/doc_examples/{b26b5574438e4eaf146b2428bf537c51.asciidoc => cecfaa659af6646b3b67d7b311586fa0.asciidoc} (96%) rename docs/doc_examples/{35fd9549350926f8d57dc1765e2f40d3.asciidoc => d5242b1ab0213f25e5e0742032274ce6.asciidoc} (96%) rename docs/doc_examples/{a225fc8c134cb21a85bc6025dac9368b.asciidoc => df81b88a2192dd6f9912e0c948a44487.asciidoc} (92%) rename docs/doc_examples/{5ba32ebaa7ee28a339c7693696d305ca.asciidoc => e77c2f41a7eca765b0c5f734a66d919f.asciidoc} (93%) rename docs/doc_examples/{bb5a1319c496acc862c670cc7224e59a.asciidoc => ea8c4229afa6dd4f1321355542be9912.asciidoc} (96%) diff --git a/docs/doc_examples/0722b302b2b3275a988d858044f99d5d.asciidoc b/docs/doc_examples/0722b302b2b3275a988d858044f99d5d.asciidoc new file mode 100644 index 000000000..84abd3971 --- /dev/null +++ b/docs/doc_examples/0722b302b2b3275a988d858044f99d5d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getMapping({ + index: "kibana_sample_data_ecommerce", +}); +console.log(response); +---- diff --git a/docs/doc_examples/160986f49758f4e8345d183a842f6351.asciidoc b/docs/doc_examples/0c52af573c9401a2a687e86a4beb182b.asciidoc similarity index 92% rename from docs/doc_examples/160986f49758f4e8345d183a842f6351.asciidoc rename to docs/doc_examples/0c52af573c9401a2a687e86a4beb182b.asciidoc index 3aeb9d6f4..e0413d7dd 100644 --- a/docs/doc_examples/160986f49758f4e8345d183a842f6351.asciidoc +++ b/docs/doc_examples/0c52af573c9401a2a687e86a4beb182b.asciidoc @@ -10,7 +10,7 @@ const response = await client.ingest.putPipeline({ { attachment: { field: "data", - remove_binary: false, + remove_binary: true, }, }, ], diff --git a/docs/doc_examples/2a67608dadbf220a2f040f3a79d3677d.asciidoc b/docs/doc_examples/2a67608dadbf220a2f040f3a79d3677d.asciidoc new file mode 100644 index 000000000..93ccfa9d8 --- /dev/null +++ b/docs/doc_examples/2a67608dadbf220a2f040f3a79d3677d.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "attachment", + description: "Extract attachment information including original binary", + processors: [ + { + attachment: { + field: "data", + remove_binary: false, + }, + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "my_id", + pipeline: "attachment", + document: { + data: "e1xydGYxXGFuc2kNCkxvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0DQpccGFyIH0=", + }, +}); +console.log(response1); + +const response2 = await client.get({ + index: "my-index-000001", + id: "my_id", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/3f9dcf2aa42f3ecfb5ebfe48c1774103.asciidoc b/docs/doc_examples/3f9dcf2aa42f3ecfb5ebfe48c1774103.asciidoc new file mode 100644 index 000000000..7818a3f0c --- /dev/null +++ b/docs/doc_examples/3f9dcf2aa42f3ecfb5ebfe48c1774103.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + order_stats: { + stats: { + field: "taxful_total_price", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9f16fca9813304e398ee052aa857dbcd.asciidoc b/docs/doc_examples/48e142e6c69014e0509d4c9251749d77.asciidoc similarity index 85% rename from docs/doc_examples/9f16fca9813304e398ee052aa857dbcd.asciidoc rename to docs/doc_examples/48e142e6c69014e0509d4c9251749d77.asciidoc index 6be472e3b..7c4401c7d 100644 --- a/docs/doc_examples/9f16fca9813304e398ee052aa857dbcd.asciidoc +++ b/docs/doc_examples/48e142e6c69014e0509d4c9251749d77.asciidoc @@ -10,7 +10,8 @@ const response = await client.inference.put({ service: "openai", service_settings: { api_key: "", - model_id: "text-embedding-ada-002", + model_id: "text-embedding-3-small", + dimensions: 128, }, }, }); diff --git a/docs/doc_examples/49b31e23f8b9667b6a7b2734d55fb6ed.asciidoc b/docs/doc_examples/49b31e23f8b9667b6a7b2734d55fb6ed.asciidoc deleted file mode 100644 index d66581f09..000000000 --- a/docs/doc_examples/49b31e23f8b9667b6a7b2734d55fb6ed.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.knnSearch({ - index: "my-index", - knn: { - field: "image_vector", - query_vector: [0.3, 0.1, 1.2], - k: 10, - num_candidates: 100, - }, - _source: ["name", "file_type"], -}); -console.log(response); ----- diff --git a/docs/doc_examples/5a70db31f587b7ffed5e9bc1445430cb.asciidoc b/docs/doc_examples/5a70db31f587b7ffed5e9bc1445430cb.asciidoc deleted file mode 100644 index 255b2df23..000000000 --- a/docs/doc_examples/5a70db31f587b7ffed5e9bc1445430cb.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: "semantic-embeddings", - mappings: { - properties: { - semantic_text: { - type: "semantic_text", - inference_id: "my-elser-endpoint", - }, - content: { - type: "text", - copy_to: "semantic_text", - }, - }, - }, -}); -console.log(response); ----- diff --git a/docs/doc_examples/f9ee5d55a73f4c1fe7d507609047aefd.asciidoc b/docs/doc_examples/6fa02c2ad485bbe91f44b321158250f3.asciidoc similarity index 76% rename from docs/doc_examples/f9ee5d55a73f4c1fe7d507609047aefd.asciidoc rename to docs/doc_examples/6fa02c2ad485bbe91f44b321158250f3.asciidoc index 0c7b48ea7..afea3d985 100644 --- a/docs/doc_examples/f9ee5d55a73f4c1fe7d507609047aefd.asciidoc +++ b/docs/doc_examples/6fa02c2ad485bbe91f44b321158250f3.asciidoc @@ -12,6 +12,13 @@ const response = await client.search({ fields: ["my_field", "my_field._2gram", "my_field._3gram"], }, }, + highlight: { + fields: { + my_field: { + matched_fields: ["my_field._index_prefix"], + }, + }, + }, }); console.log(response); ---- diff --git a/docs/doc_examples/730045fae3743c39b612813a42c330c3.asciidoc b/docs/doc_examples/730045fae3743c39b612813a42c330c3.asciidoc new file mode 100644 index 000000000..b2400e39b --- /dev/null +++ b/docs/doc_examples/730045fae3743c39b612813a42c330c3.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + prefix: { + full_name: { + value: "ki", + }, + }, + }, + highlight: { + fields: { + full_name: { + matched_fields: ["full_name._index_prefix"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7478ff69113fb53f41ea07cdf911fa67.asciidoc b/docs/doc_examples/7478ff69113fb53f41ea07cdf911fa67.asciidoc new file mode 100644 index 000000000..047487632 --- /dev/null +++ b/docs/doc_examples/7478ff69113fb53f41ea07cdf911fa67.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + daily_sales: { + date_histogram: { + field: "order_date", + calendar_interval: "day", + }, + aggs: { + daily_revenue: { + sum: { + field: "taxful_total_price", + }, + }, + smoothed_revenue: { + moving_fn: { + buckets_path: "daily_revenue", + window: 3, + script: "MovingFunctions.unweightedAvg(values)", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7dd0d9cc6c5982a2c003d301e90feeba.asciidoc b/docs/doc_examples/7dd0d9cc6c5982a2c003d301e90feeba.asciidoc new file mode 100644 index 000000000..733c366ba --- /dev/null +++ b/docs/doc_examples/7dd0d9cc6c5982a2c003d301e90feeba.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + daily_sales: { + date_histogram: { + field: "order_date", + calendar_interval: "day", + format: "yyyy-MM-dd", + }, + aggs: { + revenue: { + sum: { + field: "taxful_total_price", + }, + }, + unique_customers: { + cardinality: { + field: "customer_id", + }, + }, + avg_basket_size: { + avg: { + field: "total_quantity", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f4d0ef2e0f76babee83d999fe35127f2.asciidoc b/docs/doc_examples/80135e8c644e34cc70ce8a4e7915d1a2.asciidoc similarity index 96% rename from docs/doc_examples/f4d0ef2e0f76babee83d999fe35127f2.asciidoc rename to docs/doc_examples/80135e8c644e34cc70ce8a4e7915d1a2.asciidoc index 0bf72b678..bb8174e35 100644 --- a/docs/doc_examples/f4d0ef2e0f76babee83d999fe35127f2.asciidoc +++ b/docs/doc_examples/80135e8c644e34cc70ce8a4e7915d1a2.asciidoc @@ -12,7 +12,7 @@ const response = await client.ingest.putPipeline({ field: "data", indexed_chars: 11, indexed_chars_field: "max_size", - remove_binary: false, + remove_binary: true, }, }, ], diff --git a/docs/doc_examples/8c639d3eef5c2de29e12bd9c6a42d3d4.asciidoc b/docs/doc_examples/8c639d3eef5c2de29e12bd9c6a42d3d4.asciidoc new file mode 100644 index 000000000..aa09492cf --- /dev/null +++ b/docs/doc_examples/8c639d3eef5c2de29e12bd9c6a42d3d4.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + categories: { + terms: { + field: "category.keyword", + size: 5, + order: { + total_revenue: "desc", + }, + }, + aggs: { + total_revenue: { + sum: { + field: "taxful_total_price", + }, + }, + avg_order_value: { + avg: { + field: "taxful_total_price", + }, + }, + total_items: { + sum: { + field: "total_quantity", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8cad5d95a0e7c103f08be53d0b172558.asciidoc b/docs/doc_examples/8cad5d95a0e7c103f08be53d0b172558.asciidoc deleted file mode 100644 index b5190e9a8..000000000 --- a/docs/doc_examples/8cad5d95a0e7c103f08be53d0b172558.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.inference.put({ - task_type: "sparse_embedding", - inference_id: "my-elser-endpoint", - inference_config: { - service: "elser", - service_settings: { - adaptive_allocations: { - enabled: true, - min_number_of_allocations: 3, - max_number_of_allocations: 10, - }, - num_threads: 1, - }, - }, -}); -console.log(response); ----- diff --git a/docs/doc_examples/8593715fcc70315a0816b435551258e0.asciidoc b/docs/doc_examples/93c77c65f1e11382f8043d0300e87b89.asciidoc similarity index 90% rename from docs/doc_examples/8593715fcc70315a0816b435551258e0.asciidoc rename to docs/doc_examples/93c77c65f1e11382f8043d0300e87b89.asciidoc index aae698a6f..7a9809243 100644 --- a/docs/doc_examples/8593715fcc70315a0816b435551258e0.asciidoc +++ b/docs/doc_examples/93c77c65f1e11382f8043d0300e87b89.asciidoc @@ -9,7 +9,7 @@ const response = await client.indices.create({ properties: { infer_field: { type: "semantic_text", - inference_id: "my-elser-endpoint", + inference_id: ".elser-2-elasticsearch", }, source_field: { type: "text", diff --git a/docs/doc_examples/b590241c4296299b836fbb5a95bdd2dc.asciidoc b/docs/doc_examples/b590241c4296299b836fbb5a95bdd2dc.asciidoc new file mode 100644 index 000000000..f71aebf61 --- /dev/null +++ b/docs/doc_examples/b590241c4296299b836fbb5a95bdd2dc.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + avg_order_value: { + avg: { + field: "taxful_total_price", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b6d278737d27973e498ac61cda9e5126.asciidoc b/docs/doc_examples/b6d278737d27973e498ac61cda9e5126.asciidoc new file mode 100644 index 000000000..446bba938 --- /dev/null +++ b/docs/doc_examples/b6d278737d27973e498ac61cda9e5126.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + daily_orders: { + date_histogram: { + field: "order_date", + calendar_interval: "day", + format: "yyyy-MM-dd", + min_doc_count: 0, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bdc55256fa5f701680631a149dbb75a9.asciidoc b/docs/doc_examples/bdc55256fa5f701680631a149dbb75a9.asciidoc new file mode 100644 index 000000000..4e074487d --- /dev/null +++ b/docs/doc_examples/bdc55256fa5f701680631a149dbb75a9.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + sales_by_category: { + terms: { + field: "category.keyword", + size: 5, + order: { + _count: "desc", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bdd28276618235487ac96bd6679bc206.asciidoc b/docs/doc_examples/bdd28276618235487ac96bd6679bc206.asciidoc new file mode 100644 index 000000000..b518cae85 --- /dev/null +++ b/docs/doc_examples/bdd28276618235487ac96bd6679bc206.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + daily_sales: { + date_histogram: { + field: "order_date", + calendar_interval: "day", + }, + aggs: { + revenue: { + sum: { + field: "taxful_total_price", + }, + }, + cumulative_revenue: { + cumulative_sum: { + buckets_path: "revenue", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b26b5574438e4eaf146b2428bf537c51.asciidoc b/docs/doc_examples/cecfaa659af6646b3b67d7b311586fa0.asciidoc similarity index 96% rename from docs/doc_examples/b26b5574438e4eaf146b2428bf537c51.asciidoc rename to docs/doc_examples/cecfaa659af6646b3b67d7b311586fa0.asciidoc index 8945d602e..bae03c0ff 100644 --- a/docs/doc_examples/b26b5574438e4eaf146b2428bf537c51.asciidoc +++ b/docs/doc_examples/cecfaa659af6646b3b67d7b311586fa0.asciidoc @@ -14,7 +14,7 @@ const response = await client.ingest.putPipeline({ attachment: { target_field: "_ingest._value.attachment", field: "_ingest._value.data", - remove_binary: false, + remove_binary: true, }, }, }, diff --git a/docs/doc_examples/35fd9549350926f8d57dc1765e2f40d3.asciidoc b/docs/doc_examples/d5242b1ab0213f25e5e0742032274ce6.asciidoc similarity index 96% rename from docs/doc_examples/35fd9549350926f8d57dc1765e2f40d3.asciidoc rename to docs/doc_examples/d5242b1ab0213f25e5e0742032274ce6.asciidoc index 3302992dc..865d407f9 100644 --- a/docs/doc_examples/35fd9549350926f8d57dc1765e2f40d3.asciidoc +++ b/docs/doc_examples/d5242b1ab0213f25e5e0742032274ce6.asciidoc @@ -10,7 +10,7 @@ const response = await client.ingest.putPipeline({ { attachment: { field: "data", - remove_binary: false, + remove_binary: true, }, }, ], diff --git a/docs/doc_examples/a225fc8c134cb21a85bc6025dac9368b.asciidoc b/docs/doc_examples/df81b88a2192dd6f9912e0c948a44487.asciidoc similarity index 92% rename from docs/doc_examples/a225fc8c134cb21a85bc6025dac9368b.asciidoc rename to docs/doc_examples/df81b88a2192dd6f9912e0c948a44487.asciidoc index da9071e2c..d4a4521d5 100644 --- a/docs/doc_examples/a225fc8c134cb21a85bc6025dac9368b.asciidoc +++ b/docs/doc_examples/df81b88a2192dd6f9912e0c948a44487.asciidoc @@ -7,7 +7,7 @@ const response = await client.inference.put({ task_type: "sparse_embedding", inference_id: "elser_embeddings", inference_config: { - service: "elser", + service: "elasticsearch", service_settings: { num_allocations: 1, num_threads: 1, diff --git a/docs/doc_examples/5ba32ebaa7ee28a339c7693696d305ca.asciidoc b/docs/doc_examples/e77c2f41a7eca765b0c5f734a66d919f.asciidoc similarity index 93% rename from docs/doc_examples/5ba32ebaa7ee28a339c7693696d305ca.asciidoc rename to docs/doc_examples/e77c2f41a7eca765b0c5f734a66d919f.asciidoc index d17ba0b28..76351698d 100644 --- a/docs/doc_examples/5ba32ebaa7ee28a339c7693696d305ca.asciidoc +++ b/docs/doc_examples/e77c2f41a7eca765b0c5f734a66d919f.asciidoc @@ -11,7 +11,7 @@ const response = await client.ingest.putPipeline({ attachment: { field: "data", properties: ["content", "title"], - remove_binary: false, + remove_binary: true, }, }, ], diff --git a/docs/doc_examples/bb5a1319c496acc862c670cc7224e59a.asciidoc b/docs/doc_examples/ea8c4229afa6dd4f1321355542be9912.asciidoc similarity index 96% rename from docs/doc_examples/bb5a1319c496acc862c670cc7224e59a.asciidoc rename to docs/doc_examples/ea8c4229afa6dd4f1321355542be9912.asciidoc index d3998b385..c4744fb4e 100644 --- a/docs/doc_examples/bb5a1319c496acc862c670cc7224e59a.asciidoc +++ b/docs/doc_examples/ea8c4229afa6dd4f1321355542be9912.asciidoc @@ -12,7 +12,7 @@ const response = await client.ingest.putPipeline({ field: "data", indexed_chars: 11, indexed_chars_field: "max_size", - remove_binary: false, + remove_binary: true, }, }, ], diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 4b7dae886..e94dd8b6c 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -42,6 +42,7 @@ client.bulk({ ... }) * *Request (object):* ** *`index` (Optional, string)*: Name of the data stream, index, or index alias to perform bulk actions on. ** *`operations` (Optional, { index, create, update, delete } | { detect_noop, doc, doc_as_upsert, script, scripted_upsert, _source, upsert } | object[])* +** *`list_executed_pipelines` (Optional, boolean)*: If `true`, the response will include the ingest pipelines that were executed for each index or create. ** *`pipeline` (Optional, string)*: ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. @@ -55,6 +56,7 @@ Valid values: `true`, `false`, `wait_for`. ** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). ** *`require_alias` (Optional, boolean)*: If `true`, the request’s actions must target an index alias. +** *`require_data_stream` (Optional, boolean)*: If `true`, the request's actions must target a data stream (existing or to-be-created). [discrete] === clear_scroll @@ -2418,7 +2420,8 @@ If `false`, the request returns a 404 status code when there are no matches or o === ccr [discrete] ==== delete_auto_follow_pattern -Deletes auto-follow patterns. +Delete auto-follow patterns. +Delete a collection of cross-cluster replication auto-follow patterns. {ref}/ccr-delete-auto-follow-pattern.html[Endpoint documentation] [source,ts] @@ -2434,7 +2437,9 @@ client.ccr.deleteAutoFollowPattern({ name }) [discrete] ==== follow -Creates a new follower index configured to follow the referenced leader index. +Create a follower. +Create a cross-cluster replication follower index that follows a specific leader index. +When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index. {ref}/ccr-put-follow.html[Endpoint documentation] [source,ts] @@ -2473,7 +2478,9 @@ remote Lucene segment files to the follower index. [discrete] ==== follow_info -Retrieves information about all follower indices, including parameters and status for each follower index +Get follower information. +Get information about all cross-cluster replication follower indices. +For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused. {ref}/ccr-get-follow-info.html[Endpoint documentation] [source,ts] @@ -2489,7 +2496,9 @@ client.ccr.followInfo({ index }) [discrete] ==== follow_stats -Retrieves follower stats. return shard-level stats about the following tasks associated with each shard for the specified indices. +Get follower stats. +Get cross-cluster replication follower stats. +The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices. {ref}/ccr-get-follow-stats.html[Endpoint documentation] [source,ts] @@ -2505,7 +2514,18 @@ client.ccr.followStats({ index }) [discrete] ==== forget_follower -Removes the follower retention leases from the leader. +Forget a follower. +Remove the cross-cluster replication follower retention leases from the leader. + +A following index takes out retention leases on its leader index. +These leases are used to increase the likelihood that the shards of the leader index retain the history of operations that the shards of the following index need to run replication. +When a follower index is converted to a regular index by the unfollow API (either by directly calling the API or by index lifecycle management tasks), these leases are removed. +However, removal of the leases can fail, for example when the remote cluster containing the leader index is unavailable. +While the leases will eventually expire on their own, their extended existence can cause the leader index to hold more history than necessary and prevent index lifecycle management from performing some operations on the leader index. +This API exists to enable manually removing the leases when the unfollow API is unable to do so. + +NOTE: This API does not stop replication by a following index. If you use this API with a follower index that is still actively following, the following index will add back retention leases on the leader. +The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked. {ref}/ccr-post-forget-follower.html[Endpoint documentation] [source,ts] @@ -2525,7 +2545,8 @@ client.ccr.forgetFollower({ index }) [discrete] ==== get_auto_follow_pattern -Gets configured auto-follow patterns. Returns the specified auto-follow pattern collection. +Get auto-follow patterns. +Get cross-cluster replication auto-follow patterns. {ref}/ccr-get-auto-follow-pattern.html[Endpoint documentation] [source,ts] @@ -2541,7 +2562,14 @@ client.ccr.getAutoFollowPattern({ ... }) [discrete] ==== pause_auto_follow_pattern -Pauses an auto-follow pattern +Pause an auto-follow pattern. +Pause a cross-cluster replication auto-follow pattern. +When the API returns, the auto-follow pattern is inactive. +New indices that are created on the remote cluster and match the auto-follow patterns are ignored. + +You can resume auto-following with the resume auto-follow pattern API. +When it resumes, the auto-follow pattern is active again and automatically configures follower indices for newly created indices on the remote cluster that match its patterns. +Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim. {ref}/ccr-pause-auto-follow-pattern.html[Endpoint documentation] [source,ts] @@ -2557,7 +2585,11 @@ client.ccr.pauseAutoFollowPattern({ name }) [discrete] ==== pause_follow -Pauses a follower index. The follower index will not fetch any additional operations from the leader index. +Pause a follower. +Pause a cross-cluster replication follower index. +The follower index will not fetch any additional operations from the leader index. +You can resume following with the resume follower API. +You can pause and resume a follower index to change the configuration of the following task. {ref}/ccr-post-pause-follow.html[Endpoint documentation] [source,ts] @@ -2573,7 +2605,13 @@ client.ccr.pauseFollow({ index }) [discrete] ==== put_auto_follow_pattern -Creates a new named collection of auto-follow patterns against a specified remote cluster. Newly created indices on the remote cluster matching any of the specified patterns will be automatically configured as follower indices. +Create or update auto-follow patterns. +Create a collection of cross-cluster replication auto-follow patterns for a remote cluster. +Newly created indices on the remote cluster that match any of the patterns are automatically configured as follower indices. +Indices on the remote cluster that were created before the auto-follow pattern was created will not be auto-followed even if they match the pattern. + +This API can also be used to update auto-follow patterns. +NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns. {ref}/ccr-put-auto-follow-pattern.html[Endpoint documentation] [source,ts] @@ -2604,7 +2642,10 @@ client.ccr.putAutoFollowPattern({ name, remote_cluster }) [discrete] ==== resume_auto_follow_pattern -Resumes an auto-follow pattern that has been paused +Resume an auto-follow pattern. +Resume a cross-cluster replication auto-follow pattern that was paused. +The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster. +Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim. {ref}/ccr-resume-auto-follow-pattern.html[Endpoint documentation] [source,ts] @@ -2620,7 +2661,11 @@ client.ccr.resumeAutoFollowPattern({ name }) [discrete] ==== resume_follow -Resumes a follower index that has been paused +Resume a follower. +Resume a cross-cluster replication follower index that was paused. +The follower index could have been paused with the pause follower API. +Alternatively it could be paused due to replication that cannot be retried due to failures during following tasks. +When this API returns, the follower index will resume fetching operations from the leader index. {ref}/ccr-post-resume-follow.html[Endpoint documentation] [source,ts] @@ -2646,7 +2691,8 @@ client.ccr.resumeFollow({ index }) [discrete] ==== stats -Gets all stats related to cross-cluster replication. +Get cross-cluster replication stats. +This API returns stats about auto-following and the same shard-level stats as the get follower stats API. {ref}/ccr-get-stats.html[Endpoint documentation] [source,ts] @@ -2657,7 +2703,12 @@ client.ccr.stats() [discrete] ==== unfollow -Stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. +Unfollow an index. +Convert a cross-cluster replication follower index to a regular index. +The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. +The follower index must be paused and closed before you call the unfollow API. + +NOTE: Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. {ref}/ccr-post-unfollow.html[Endpoint documentation] [source,ts] @@ -3956,7 +4007,16 @@ Defaults to `false`. If `true` then the response will include an extra section u === features [discrete] ==== get_features -Gets a list of features which can be included in snapshots using the feature_states field when creating a snapshot +Get the features. +Get a list of features that can be included in snapshots using the `feature_states` field when creating a snapshot. +You can use this API to determine which feature states to include when taking a snapshot. +By default, all feature states are included in a snapshot if that snapshot includes the global state, or none if it does not. + +A feature state includes one or more system indices necessary for a given feature to function. +In order to ensure data integrity, all system indices that comprise a feature state are snapshotted and restored together. + +The features listed by this API are a combination of built-in features and features defined by plugins. +In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node. {ref}/get-features-api.html[Endpoint documentation] [source,ts] @@ -3967,7 +4027,23 @@ client.features.getFeatures() [discrete] ==== reset_features -Resets the internal state of features, usually by deleting system indices +Reset the features. +Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices. + +WARNING: Intended for development and testing use only. Do not reset features on a production cluster. + +Return a cluster to the same state as a new installation by resetting the feature state for all Elasticsearch features. +This deletes all state information stored in system indices. + +The response code is HTTP 200 if the state is successfully reset for all features. +It is HTTP 500 if the reset operation failed for any feature. + +Note that select features might provide a way to reset particular system indices. +Using this API resets all features, both those that are built-in and implemented as plugins. + +To list the features that will be affected, use the get features API. + +IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes. {ref}/modules-snapshots.html[Endpoint documentation] [source,ts] @@ -4177,7 +4253,8 @@ Defaults to no timeout. === ilm [discrete] ==== delete_lifecycle -Deletes the specified lifecycle policy definition. You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. +Delete a lifecycle policy. +You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. {ref}/ilm-delete-lifecycle.html[Endpoint documentation] [source,ts] @@ -4195,7 +4272,11 @@ client.ilm.deleteLifecycle({ policy }) [discrete] ==== explain_lifecycle -Retrieves information about the index’s current lifecycle state, such as the currently executing phase, action, and step. Shows when the index entered each one, the definition of the running phase, and information about any failures. +Explain the lifecycle state. +Get the current lifecycle status for one or more indices. +For data streams, the API retrieves the current lifecycle status for the stream's backing indices. + +The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures. {ref}/ilm-explain-lifecycle.html[Endpoint documentation] [source,ts] @@ -4216,7 +4297,7 @@ To target all data streams and indices, use `*` or `_all`. [discrete] ==== get_lifecycle -Retrieves a lifecycle policy. +Get lifecycle policies. {ref}/ilm-get-lifecycle.html[Endpoint documentation] [source,ts] @@ -4234,7 +4315,8 @@ client.ilm.getLifecycle({ ... }) [discrete] ==== get_status -Retrieves the current index lifecycle management (ILM) status. +Get the ILM status. +Get the current index lifecycle management status. {ref}/ilm-get-status.html[Endpoint documentation] [source,ts] @@ -4245,10 +4327,21 @@ client.ilm.getStatus() [discrete] ==== migrate_to_data_tiers -Switches the indices, ILM policies, and legacy, composable and component templates from using custom node attributes and -attribute-based allocation filters to using data tiers, and optionally deletes one legacy index template.+ +Migrate to data tiers routing. +Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. +Optionally, delete one legacy index template. Using node roles enables ILM to automatically move the indices between data tiers. +Migrating away from custom node attributes routing can be manually performed. +This API provides an automated way of performing three out of the four manual steps listed in the migration guide: + +1. Stop setting the custom hot attribute on new indices. +1. Remove custom allocation settings from existing ILM policies. +1. Replace custom allocation settings from existing indices with the corresponding tier preference. + +ILM must be stopped before performing the migration. +Use the stop ILM and get ILM status APIs to wait until the reported operation mode is `STOPPED`. + {ref}/ilm-migrate-to-data-tiers.html[Endpoint documentation] [source,ts] ---- @@ -4266,7 +4359,20 @@ This provides a way to retrieve the indices and ILM policies that need to be mig [discrete] ==== move_to_step -Manually moves an index into the specified step and executes that step. +Move to a lifecycle step. +Manually move an index into a specific step in the lifecycle policy and run that step. + +WARNING: This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API. + +You must specify both the current step and the step to be executed in the body of the request. +The request will fail if the current step does not match the step currently running for the index +This is to prevent the index from being moved from an unexpected step into the next step. + +When specifying the target (`next_step`) to which the index will be moved, either the name or both the action and name fields are optional. +If only the phase is specified, the index will move to the first step of the first action in the target phase. +If the phase and action are specified, the index will move to the first step of the specified action in the specified phase. +Only actions specified in the ILM policy are considered valid. +An index cannot move to a step that is not part of its policy. {ref}/ilm-move-to-step.html[Endpoint documentation] [source,ts] @@ -4284,7 +4390,10 @@ client.ilm.moveToStep({ index, current_step, next_step }) [discrete] ==== put_lifecycle -Creates a lifecycle policy. If the specified policy exists, the policy is replaced and the policy version is incremented. +Create or update a lifecycle policy. +If the specified policy exists, it is replaced and the policy version is incremented. + +NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions. {ref}/ilm-put-lifecycle.html[Endpoint documentation] [source,ts] @@ -4302,7 +4411,9 @@ client.ilm.putLifecycle({ policy }) [discrete] ==== remove_policy -Removes the assigned lifecycle policy and stops managing the specified index +Remove policies from an index. +Remove the assigned lifecycle policies from an index or a data stream's backing indices. +It also stops managing the indices. {ref}/ilm-remove-policy.html[Endpoint documentation] [source,ts] @@ -4318,7 +4429,10 @@ client.ilm.removePolicy({ index }) [discrete] ==== retry -Retries executing the policy for an index that is in the ERROR step. +Retry a policy. +Retry running the lifecycle policy for an index that is in the ERROR step. +The API sets the policy back to the step where the error occurred and runs the step. +Use the explain lifecycle state API to determine whether an index is in the ERROR step. {ref}/ilm-retry-policy.html[Endpoint documentation] [source,ts] @@ -4334,7 +4448,10 @@ client.ilm.retry({ index }) [discrete] ==== start -Start the index lifecycle management (ILM) plugin. +Start the ILM plugin. +Start the index lifecycle management plugin if it is currently stopped. +ILM is started automatically when the cluster is formed. +Restarting ILM is necessary only when it has been stopped using the stop ILM API. {ref}/ilm-start.html[Endpoint documentation] [source,ts] @@ -4351,7 +4468,12 @@ client.ilm.start({ ... }) [discrete] ==== stop -Halts all lifecycle management operations and stops the index lifecycle management (ILM) plugin +Stop the ILM plugin. +Halt all lifecycle management operations and stop the index lifecycle management plugin. +This is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices. + +The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. +Use the get ILM status API to check whether ILM is running. {ref}/ilm-stop.html[Endpoint documentation] [source,ts] @@ -4425,8 +4547,9 @@ If an array of strings is provided, it is analyzed as a multi-value field. [discrete] ==== clear_cache -Clears the caches of one or more indices. -For data streams, the API clears the caches of the stream’s backing indices. +Clear the cache. +Clear the cache of one or more indices. +For data streams, the API clears the caches of the stream's backing indices. {ref}/indices-clearcache.html[Endpoint documentation] [source,ts] @@ -4456,7 +4579,29 @@ Use the `fields` parameter to clear the cache of specific fields only. [discrete] ==== clone -Clones an existing index. +Clone an index. +Clone an existing index into a new index. +Each original primary shard is cloned into a new primary shard in the new index. + +IMPORTANT: Elasticsearch does not apply index templates to the resulting index. +The API also does not copy index metadata from the original index. +Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. +For example, if you clone a CCR follower index, the resulting clone will not be a follower index. + +The clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`. +To set the number of replicas in the resulting index, configure these settings in the clone request. + +Cloning works as follows: + +* First, it creates a new target index with the same definition as the source index. +* Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process. +* Finally, it recovers the target index as though it were a closed index which had just been re-opened. + +IMPORTANT: Indices can only be cloned if they meet the following requirements: + +* The target index must not exist. +* The source index must have the same number of primary shards as the target index. +* The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index. {ref}/indices-clone-index.html[Endpoint documentation] [source,ts] @@ -4481,7 +4626,24 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] ==== close -Closes an index. +Close an index. +A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. +It is not possible to index documents or to search for documents in a closed index. +Closed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster. + +When opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index. +The shards will then go through the normal recovery process. +The data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. + +You can open and close multiple indices. +An error is thrown if the request explicitly refers to a missing index. +This behaviour can be turned off using the `ignore_unavailable=true` parameter. + +By default, you must explicitly name the indices you are opening or closing. +To open or close indices with `_all`, `*`, or other wildcard expressions, change the` action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. + +Closed indices consume a significant amount of disk-space which can cause problems in managed environments. +Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. {ref}/indices-close.html[Endpoint documentation] [source,ts] @@ -4721,7 +4883,10 @@ If no response is received before the timeout expires, the request fails and ret [discrete] ==== disk_usage -Analyzes the disk usage of each field of an index or data stream. +Analyze the index disk usage. +Analyze the disk usage of each field of an index or data stream. +This API might not support indices created in previous Elasticsearch versions. +The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. {ref}/indices-disk-usage.html[Endpoint documentation] [source,ts] @@ -4749,7 +4914,14 @@ To use the API, this parameter must be set to `true`. [discrete] ==== downsample -Aggregates a time series (TSDS) index and stores pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. +Downsample an index. +Aggregate a time series (TSDS) index and store pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. +For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. +All documents within an hour interval are summarized and stored as a single document in the downsample index. + +NOTE: Only indices in a time series data stream are supported. +Neither field nor document level security can be defined on the source index. +The source index must be read only (`index.blocks.write: true`). {ref}/indices-downsample-data-stream.html[Endpoint documentation] [source,ts] @@ -4861,7 +5033,7 @@ client.indices.existsTemplate({ name }) [discrete] ==== explain_data_lifecycle Get the status for a data stream lifecycle. -Retrieves information about an index or data stream’s current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. +Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. {ref}/data-streams-explain-lifecycle.html[Endpoint documentation] [source,ts] @@ -4879,7 +5051,10 @@ client.indices.explainDataLifecycle({ index }) [discrete] ==== field_usage_stats -Returns field usage information for each shard and field of an index. +Get field usage stats. +Get field usage information for each shard and field of an index. +Field usage statistics are automatically captured when queries are running on a cluster. +A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use. {ref}/field-usage-stats.html[Endpoint documentation] [source,ts] @@ -4909,7 +5084,17 @@ Set to all or any positive integer up to the total number of shards in the index [discrete] ==== flush -Flushes one or more data streams or indices. +Flush data streams or indices. +Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. +When restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart. +Elasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush. + +After each operation has been flushed it is permanently stored in the Lucene index. +This may mean that there is no need to maintain an additional copy of it in the transaction log. +The transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space. + +It is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly. +If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called. {ref}/indices-flush.html[Endpoint documentation] [source,ts] @@ -4937,7 +5122,19 @@ If `false`, Elasticsearch returns an error if you request a flush when another f [discrete] ==== forcemerge -Performs the force merge operation on one or more indices. +Force a merge. +Perform the force merge operation on the shards of one or more indices. +For data streams, the API forces a merge on the shards of the stream's backing indices. + +Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. +Merging normally happens automatically, but sometimes it is useful to trigger a merge manually. + +WARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes). +When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". +These soft-deleted documents are automatically cleaned up during regular segment merges. +But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. +So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. +If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally. {ref}/indices-forcemerge.html[Endpoint documentation] [source,ts] @@ -5289,7 +5486,17 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] ==== promote_data_stream -Promotes a data stream from a replicated data stream managed by CCR to a regular data stream +Promote a data stream. +Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream. + +With CCR auto following, a data stream from a remote cluster can be replicated to the local cluster. +These data streams can't be rolled over in the local cluster. +These replicated data streams roll over only if the upstream data stream rolls over. +In the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster. + +NOTE: When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream. +If this is missing, the data stream will not be able to roll over until a matching index template is created. +This will affect the lifecycle management of the data stream and interfere with the data stream size and retention. {ref}/data-streams.html[Endpoint documentation] [source,ts] @@ -5508,6 +5715,16 @@ error. ==== put_template Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. +Elasticsearch applies templates to new indices based on an index pattern that matches the index name. + +IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. + +Composable templates always take precedence over legacy templates. +If no composable template matches a new index, matching legacy templates are applied according to their order. + +Index templates are only applied during index creation. +Changes to index templates do not affect existing indices. +Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. {ref}/indices-templates-v1.html[Endpoint documentation] [source,ts] @@ -5539,8 +5756,27 @@ received before the timeout expires, the request fails and returns an error. [discrete] ==== recovery -Returns information about ongoing and completed shard recoveries for one or more indices. -For data streams, the API returns information for the stream’s backing indices. +Get index recovery information. +Get information about ongoing and completed shard recoveries for one or more indices. +For data streams, the API returns information for the stream's backing indices. + +Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. +When a shard recovery completes, the recovered shard is available for search and indexing. + +Recovery automatically occurs during the following processes: + +* When creating an index for the first time. +* When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path. +* Creation of new replica shard copies from the primary. +* Relocation of a shard copy to a different node in the same cluster. +* A snapshot restore operation. +* A clone, shrink, or split operation. + +You can determine the cause of a shard recovery using the recovery or cat recovery APIs. + +The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. +It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. +This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API. {ref}/indices-recovery.html[Endpoint documentation] [source,ts] @@ -5587,7 +5823,20 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. [discrete] ==== reload_search_analyzers -Reloads an index's search analyzers and their resources. +Reload search analyzers. +Reload an index's search analyzers and their resources. +For data streams, the API reloads search analyzers and resources for the stream's backing indices. + +IMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer. + +You can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer. +To be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers. + +NOTE: This API does not perform a reload for each shard of an index. +Instead, it performs a reload for each node containing index shards. +As a result, the total shard count returned by the API can differ from the number of index shards. +Because reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API. +This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future. {ref}/indices-reload-analyzers.html[Endpoint documentation] [source,ts] @@ -5606,10 +5855,23 @@ client.indices.reloadSearchAnalyzers({ index }) [discrete] ==== resolve_cluster -Resolves the specified index expressions to return information about each cluster, including -the local cluster, if included. +Resolve the cluster. +Resolve the specified index expressions to return information about each cluster, including the local cluster, if included. Multiple patterns and remote clusters are supported. +This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. + +You use the same index expression with this endpoint as you would for cross-cluster search. +Index and cluster exclusions are also supported with this endpoint. + +For each cluster in the index expression, information is returned about: + +* Whether the querying ("local") cluster is currently connected to each remote cluster in the index expression scope. +* Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. +* Whether there are any indices, aliases, or data streams on that cluster that match the index expression. +* Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). +* Cluster version information, including the Elasticsearch server version. + {ref}/indices-resolve-cluster-api.html[Endpoint documentation] [source,ts] ---- @@ -5699,8 +5961,9 @@ Set to all or any positive integer up to the total number of shards in the index [discrete] ==== segments -Returns low-level information about the Lucene segments in index shards. -For data streams, the API returns information about the stream’s backing indices. +Get index segments. +Get low-level information about the Lucene segments in index shards. +For data streams, the API returns information about the stream's backing indices. {ref}/indices-segments.html[Endpoint documentation] [source,ts] @@ -5725,8 +5988,18 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. [discrete] ==== shard_stores -Retrieves store information about replica shards in one or more indices. -For data streams, the API retrieves store information for the stream’s backing indices. +Get index shard stores. +Get store information about replica shards in one or more indices. +For data streams, the API retrieves store information for the stream's backing indices. + +The index shard stores API returns the following information: + +* The node on which each replica shard exists. +* The allocation ID for each replica shard. +* A unique ID for each replica shard. +* Any errors encountered while opening the shard index or from an earlier failure. + +By default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards. {ref}/indices-shards-stores.html[Endpoint documentation] [source,ts] @@ -5749,7 +6022,38 @@ this argument determines whether wildcard expressions match hidden data streams. [discrete] ==== shrink -Shrinks an existing index into a new index with fewer primary shards. +Shrink an index. +Shrink an index into a new index with fewer primary shards. + +Before you can shrink an index: + +* The index must be read-only. +* A copy of every shard in the index must reside on the same node. +* The index must have a green health status. + +To make shard allocation easier, we recommend you also remove the index's replica shards. +You can later re-add replica shards as part of the shrink operation. + +The requested number of primary shards in the target index must be a factor of the number of shards in the source index. +For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. +If the number of shards in the index is a prime number it can only be shrunk into a single primary shard + Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. + +The current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk. + +A shrink operation: + +* Creates a new target index with the same definition as the source index, but with a smaller number of primary shards. +* Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks. +* Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. + +IMPORTANT: Indices can only be shrunk if they satisfy the following requirements: + +* The target index must not exist. +* The source index must have more primary shards than the target index. +* The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index. +* The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard. +* The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index. {ref}/indices-shrink-index.html[Endpoint documentation] [source,ts] @@ -5839,7 +6143,30 @@ that uses deprecated components, Elasticsearch will emit a deprecation warning. [discrete] ==== split -Splits an existing index into a new index with more primary shards. +Split an index. +Split an index into a new index with more primary shards. +* Before you can split an index: + +* The index must be read-only. +* The cluster health status must be green. + +The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. +The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. +For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. + +A split operation: + +* Creates a new target index with the same definition as the source index, but with a larger number of primary shards. +* Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process. +* Hashes all documents again, after low level files are created, to delete documents that belong to a different shard. +* Recovers the target index as though it were a closed index which had just been re-opened. + +IMPORTANT: Indices can only be split if they satisfy the following requirements: + +* The target index must not exist. +* The source index must have fewer primary shards than the target index. +* The number of primary shards in the target index must be a multiple of the number of primary shards in the source index. +* The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index. {ref}/indices-split-index.html[Endpoint documentation] [source,ts] @@ -5864,8 +6191,17 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] ==== stats -Returns statistics for one or more indices. -For data streams, the API retrieves statistics for the stream’s backing indices. +Get index statistics. +For data streams, the API retrieves statistics for the stream's backing indices. + +By default, the returned statistics are index-level with `primaries` and `total` aggregations. +`primaries` are the values for only the primary shards. +`total` are the accumulated values for both primary and replica shards. + +To get shard-level statistics, set the `level` parameter to `shards`. + +NOTE: When moving to another node, the shard-level statistics for a shard are cleared. +Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed. {ref}/indices-stats.html[Endpoint documentation] [source,ts] @@ -5893,7 +6229,8 @@ such as `open,hidden`. [discrete] ==== unfreeze -Unfreezes an index. +Unfreeze an index. +When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again. {ref}/unfreeze-index-api.html[Endpoint documentation] [source,ts] @@ -6292,7 +6629,10 @@ If you specify both this and the request path parameter, the API only uses the r === license [discrete] ==== delete -Deletes licensing information for the cluster +Delete the license. +When the license expires, your subscription level reverts to Basic. + +If the operator privileges feature is enabled, only operator users can use this API. {ref}/delete-license.html[Endpoint documentation] [source,ts] @@ -6304,8 +6644,10 @@ client.license.delete() [discrete] ==== get Get license information. -Returns information about your Elastic license, including its type, its status, when it was issued, and when it expires. -For more information about the different types of licenses, refer to [Elastic Stack subscriptions](https://www.elastic.co/subscriptions). +Get information about your Elastic license including its type, its status, when it was issued, and when it expires. + +NOTE: If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response. +If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. {ref}/get-license.html[Endpoint documentation] [source,ts] @@ -6323,7 +6665,7 @@ This parameter is deprecated and will always be set to true in 8.x. [discrete] ==== get_basic_status -Retrieves information about the status of the basic license. +Get the basic license status. {ref}/get-basic-status.html[Endpoint documentation] [source,ts] @@ -6334,7 +6676,7 @@ client.license.getBasicStatus() [discrete] ==== get_trial_status -Retrieves information about the status of the trial license. +Get the trial status. {ref}/get-trial-status.html[Endpoint documentation] [source,ts] @@ -6345,7 +6687,14 @@ client.license.getTrialStatus() [discrete] ==== post -Updates the license for the cluster. +Update the license. +You can update your license at runtime without shutting down your nodes. +License updates take effect immediately. +If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. +You must then re-submit the API request with the acknowledge parameter set to true. + +NOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license. +If the operator privileges feature is enabled, only operator users can use this API. {ref}/update-license.html[Endpoint documentation] [source,ts] @@ -6363,8 +6712,15 @@ client.license.post({ ... }) [discrete] ==== post_start_basic -The start basic API enables you to initiate an indefinite basic license, which gives access to all the basic features. If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true. -To check the status of your basic license, use the following API: [Get basic status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html). +Start a basic license. +Start an indefinite basic license, which gives access to all the basic features. + +NOTE: In order to start a basic license, you must not currently have a basic license. + +If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. +You must then re-submit the API request with the `acknowledge` parameter set to `true`. + +To check the status of your basic license, use the get basic license API. {ref}/start-basic.html[Endpoint documentation] [source,ts] @@ -6380,7 +6736,13 @@ client.license.postStartBasic({ ... }) [discrete] ==== post_start_trial -The start trial API enables you to start a 30-day trial, which gives access to all subscription features. +Start a trial. +Start a 30-day trial, which gives access to all subscription features. + +NOTE: You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version. +For example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension. + +To check the status of your trial, use the get trial status API. {ref}/start-trial.html[Endpoint documentation] [source,ts] @@ -7489,6 +7851,7 @@ be retrieved and then added to another cluster. ** *`from` (Optional, number)*: Skips the specified number of models. ** *`include` (Optional, Enum("definition" | "feature_importance_baseline" | "hyperparameters" | "total_feature_importance" | "definition_status"))*: A comma delimited string of optional fields to include in the response body. +** *`include_model_definition` (Optional, boolean)*: parameter is deprecated! Use [include=definition] instead ** *`size` (Optional, number)*: Specifies the maximum number of models to obtain. ** *`tags` (Optional, string | string[])*: A comma delimited string of tags. A trained model can have many tags, or none. When supplied, only trained models that contain all the supplied @@ -7778,6 +8141,7 @@ model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. +** *`_meta` (Optional, Record)* ** *`model_memory_limit` (Optional, string)*: The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try @@ -7908,6 +8272,18 @@ client.ml.putJob({ job_id, analysis_config, data_description }) ** *`renormalization_window_days` (Optional, number)*: Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. The default value is the longer of 30 days or 100 bucket spans. ** *`results_index_name` (Optional, string)*: A text string that affects the name of the machine learning results index. By default, the job generates an index named `.ml-anomalies-shared`. ** *`results_retention_days` (Optional, number)*: Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. Annotations added by users are retained forever. +** *`allow_no_indices` (Optional, boolean)*: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the +`_all` string or when no indices are specified. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines +whether wildcard expressions match hidden data streams. Supports a list of values. Valid values are: + +* `all`: Match any data stream or index, including hidden ones. +* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. +* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. +* `none`: Wildcard patterns are not accepted. +* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. +** *`ignore_throttled` (Optional, boolean)*: If `true`, concrete, expanded or aliased indices are ignored when frozen. +** *`ignore_unavailable` (Optional, boolean)*: If `true`, unavailable indices (missing or closed) are ignored. [discrete] ==== put_trained_model @@ -8483,7 +8859,7 @@ bucket result. If this property has a non-null value, once per day at than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. ** *`groups` (Optional, string[])*: A list of job groups. A job can belong to no groups or many. -** *`detectors` (Optional, { by_field_name, custom_rules, detector_description, detector_index, exclude_frequent, field_name, function, over_field_name, partition_field_name, use_null }[])*: An array of detector update objects. +** *`detectors` (Optional, { detector_index, description, custom_rules }[])*: An array of detector update objects. ** *`per_partition_categorization` (Optional, { enabled, stop_on_warn })*: Settings related to how categorization interacts with partition fields. [discrete] diff --git a/src/api/api/ccr.ts b/src/api/api/ccr.ts index 7b3d86fd8..8fafab146 100644 --- a/src/api/api/ccr.ts +++ b/src/api/api/ccr.ts @@ -44,7 +44,7 @@ export default class Ccr { } /** - * Deletes auto-follow patterns. + * Delete auto-follow patterns. Delete a collection of cross-cluster replication auto-follow patterns. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-delete-auto-follow-pattern.html | Elasticsearch API documentation} */ async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -76,7 +76,7 @@ export default class Ccr { } /** - * Creates a new follower index configured to follow the referenced leader index. + * Create a follower. Create a cross-cluster replication follower index that follows a specific leader index. When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-put-follow.html | Elasticsearch API documentation} */ async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -112,7 +112,7 @@ export default class Ccr { } /** - * Retrieves information about all follower indices, including parameters and status for each follower index + * Get follower information. Get information about all cross-cluster replication follower indices. For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-follow-info.html | Elasticsearch API documentation} */ async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -144,7 +144,7 @@ export default class Ccr { } /** - * Retrieves follower stats. return shard-level stats about the following tasks associated with each shard for the specified indices. + * Get follower stats. Get cross-cluster replication follower stats. The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-follow-stats.html | Elasticsearch API documentation} */ async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -176,7 +176,7 @@ export default class Ccr { } /** - * Removes the follower retention leases from the leader. + * Forget a follower. Remove the cross-cluster replication follower retention leases from the leader. A following index takes out retention leases on its leader index. These leases are used to increase the likelihood that the shards of the leader index retain the history of operations that the shards of the following index need to run replication. When a follower index is converted to a regular index by the unfollow API (either by directly calling the API or by index lifecycle management tasks), these leases are removed. However, removal of the leases can fail, for example when the remote cluster containing the leader index is unavailable. While the leases will eventually expire on their own, their extended existence can cause the leader index to hold more history than necessary and prevent index lifecycle management from performing some operations on the leader index. This API exists to enable manually removing the leases when the unfollow API is unable to do so. NOTE: This API does not stop replication by a following index. If you use this API with a follower index that is still actively following, the following index will add back retention leases on the leader. The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-forget-follower.html | Elasticsearch API documentation} */ async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -212,7 +212,7 @@ export default class Ccr { } /** - * Gets configured auto-follow patterns. Returns the specified auto-follow pattern collection. + * Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-auto-follow-pattern.html | Elasticsearch API documentation} */ async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -252,7 +252,7 @@ export default class Ccr { } /** - * Pauses an auto-follow pattern + * Pause an auto-follow pattern. Pause a cross-cluster replication auto-follow pattern. When the API returns, the auto-follow pattern is inactive. New indices that are created on the remote cluster and match the auto-follow patterns are ignored. You can resume auto-following with the resume auto-follow pattern API. When it resumes, the auto-follow pattern is active again and automatically configures follower indices for newly created indices on the remote cluster that match its patterns. Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-pause-auto-follow-pattern.html | Elasticsearch API documentation} */ async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -284,7 +284,7 @@ export default class Ccr { } /** - * Pauses a follower index. The follower index will not fetch any additional operations from the leader index. + * Pause a follower. Pause a cross-cluster replication follower index. The follower index will not fetch any additional operations from the leader index. You can resume following with the resume follower API. You can pause and resume a follower index to change the configuration of the following task. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-pause-follow.html | Elasticsearch API documentation} */ async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -316,7 +316,7 @@ export default class Ccr { } /** - * Creates a new named collection of auto-follow patterns against a specified remote cluster. Newly created indices on the remote cluster matching any of the specified patterns will be automatically configured as follower indices. + * Create or update auto-follow patterns. Create a collection of cross-cluster replication auto-follow patterns for a remote cluster. Newly created indices on the remote cluster that match any of the patterns are automatically configured as follower indices. Indices on the remote cluster that were created before the auto-follow pattern was created will not be auto-followed even if they match the pattern. This API can also be used to update auto-follow patterns. NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-put-auto-follow-pattern.html | Elasticsearch API documentation} */ async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -352,7 +352,7 @@ export default class Ccr { } /** - * Resumes an auto-follow pattern that has been paused + * Resume an auto-follow pattern. Resume a cross-cluster replication auto-follow pattern that was paused. The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster. Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-resume-auto-follow-pattern.html | Elasticsearch API documentation} */ async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -384,7 +384,7 @@ export default class Ccr { } /** - * Resumes a follower index that has been paused + * Resume a follower. Resume a cross-cluster replication follower index that was paused. The follower index could have been paused with the pause follower API. Alternatively it could be paused due to replication that cannot be retried due to failures during following tasks. When this API returns, the follower index will resume fetching operations from the leader index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-resume-follow.html | Elasticsearch API documentation} */ async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -420,7 +420,7 @@ export default class Ccr { } /** - * Gets all stats related to cross-cluster replication. + * Get cross-cluster replication stats. This API returns stats about auto-following and the same shard-level stats as the get follower stats API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-get-stats.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -450,7 +450,7 @@ export default class Ccr { } /** - * Stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. + * Unfollow an index. Convert a cross-cluster replication follower index to a regular index. The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. The follower index must be paused and closed before you call the unfollow API. NOTE: Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ccr-post-unfollow.html | Elasticsearch API documentation} */ async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/features.ts b/src/api/api/features.ts index feab5b5c4..555be12bc 100644 --- a/src/api/api/features.ts +++ b/src/api/api/features.ts @@ -44,7 +44,7 @@ export default class Features { } /** - * Gets a list of features which can be included in snapshots using the feature_states field when creating a snapshot + * Get the features. Get a list of features that can be included in snapshots using the `feature_states` field when creating a snapshot. You can use this API to determine which feature states to include when taking a snapshot. By default, all feature states are included in a snapshot if that snapshot includes the global state, or none if it does not. A feature state includes one or more system indices necessary for a given feature to function. In order to ensure data integrity, all system indices that comprise a feature state are snapshotted and restored together. The features listed by this API are a combination of built-in features and features defined by plugins. In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-features-api.html | Elasticsearch API documentation} */ async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -74,7 +74,7 @@ export default class Features { } /** - * Resets the internal state of features, usually by deleting system indices + * Reset the features. Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices. WARNING: Intended for development and testing use only. Do not reset features on a production cluster. Return a cluster to the same state as a new installation by resetting the feature state for all Elasticsearch features. This deletes all state information stored in system indices. The response code is HTTP 200 if the state is successfully reset for all features. It is HTTP 500 if the reset operation failed for any feature. Note that select features might provide a way to reset particular system indices. Using this API resets all features, both those that are built-in and implemented as plugins. To list the features that will be affected, use the get features API. IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html | Elasticsearch API documentation} */ async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/ilm.ts b/src/api/api/ilm.ts index b144baac1..86748d989 100644 --- a/src/api/api/ilm.ts +++ b/src/api/api/ilm.ts @@ -44,7 +44,7 @@ export default class Ilm { } /** - * Deletes the specified lifecycle policy definition. You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. + * Delete a lifecycle policy. You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-delete-lifecycle.html | Elasticsearch API documentation} */ async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -76,7 +76,7 @@ export default class Ilm { } /** - * Retrieves information about the index’s current lifecycle state, such as the currently executing phase, action, and step. Shows when the index entered each one, the definition of the running phase, and information about any failures. + * Explain the lifecycle state. Get the current lifecycle status for one or more indices. For data streams, the API retrieves the current lifecycle status for the stream's backing indices. The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-explain-lifecycle.html | Elasticsearch API documentation} */ async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -108,7 +108,7 @@ export default class Ilm { } /** - * Retrieves a lifecycle policy. + * Get lifecycle policies. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-get-lifecycle.html | Elasticsearch API documentation} */ async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -148,7 +148,7 @@ export default class Ilm { } /** - * Retrieves the current index lifecycle management (ILM) status. + * Get the ILM status. Get the current index lifecycle management status. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-get-status.html | Elasticsearch API documentation} */ async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -178,7 +178,7 @@ export default class Ilm { } /** - * Switches the indices, ILM policies, and legacy, composable and component templates from using custom node attributes and attribute-based allocation filters to using data tiers, and optionally deletes one legacy index template.+ Using node roles enables ILM to automatically move the indices between data tiers. + * Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. Optionally, delete one legacy index template. Using node roles enables ILM to automatically move the indices between data tiers. Migrating away from custom node attributes routing can be manually performed. This API provides an automated way of performing three out of the four manual steps listed in the migration guide: 1. Stop setting the custom hot attribute on new indices. 1. Remove custom allocation settings from existing ILM policies. 1. Replace custom allocation settings from existing indices with the corresponding tier preference. ILM must be stopped before performing the migration. Use the stop ILM and get ILM status APIs to wait until the reported operation mode is `STOPPED`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-migrate-to-data-tiers.html | Elasticsearch API documentation} */ async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -212,7 +212,7 @@ export default class Ilm { } /** - * Manually moves an index into the specified step and executes that step. + * Move to a lifecycle step. Manually move an index into a specific step in the lifecycle policy and run that step. WARNING: This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API. You must specify both the current step and the step to be executed in the body of the request. The request will fail if the current step does not match the step currently running for the index This is to prevent the index from being moved from an unexpected step into the next step. When specifying the target (`next_step`) to which the index will be moved, either the name or both the action and name fields are optional. If only the phase is specified, the index will move to the first step of the first action in the target phase. If the phase and action are specified, the index will move to the first step of the specified action in the specified phase. Only actions specified in the ILM policy are considered valid. An index cannot move to a step that is not part of its policy. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-move-to-step.html | Elasticsearch API documentation} */ async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -248,7 +248,7 @@ export default class Ilm { } /** - * Creates a lifecycle policy. If the specified policy exists, the policy is replaced and the policy version is incremented. + * Create or update a lifecycle policy. If the specified policy exists, it is replaced and the policy version is incremented. NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-put-lifecycle.html | Elasticsearch API documentation} */ async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -284,7 +284,7 @@ export default class Ilm { } /** - * Removes the assigned lifecycle policy and stops managing the specified index + * Remove policies from an index. Remove the assigned lifecycle policies from an index or a data stream's backing indices. It also stops managing the indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-remove-policy.html | Elasticsearch API documentation} */ async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -316,7 +316,7 @@ export default class Ilm { } /** - * Retries executing the policy for an index that is in the ERROR step. + * Retry a policy. Retry running the lifecycle policy for an index that is in the ERROR step. The API sets the policy back to the step where the error occurred and runs the step. Use the explain lifecycle state API to determine whether an index is in the ERROR step. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-retry-policy.html | Elasticsearch API documentation} */ async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -348,7 +348,7 @@ export default class Ilm { } /** - * Start the index lifecycle management (ILM) plugin. + * Start the ILM plugin. Start the index lifecycle management plugin if it is currently stopped. ILM is started automatically when the cluster is formed. Restarting ILM is necessary only when it has been stopped using the stop ILM API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-start.html | Elasticsearch API documentation} */ async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -378,7 +378,7 @@ export default class Ilm { } /** - * Halts all lifecycle management operations and stops the index lifecycle management (ILM) plugin + * Stop the ILM plugin. Halt all lifecycle management operations and stop the index lifecycle management plugin. This is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices. The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. Use the get ILM status API to check whether ILM is running. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-stop.html | Elasticsearch API documentation} */ async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 82e9227db..98bc01435 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -121,7 +121,7 @@ export default class Indices { } /** - * Clears the caches of one or more indices. For data streams, the API clears the caches of the stream’s backing indices. + * Clear the cache. Clear the cache of one or more indices. For data streams, the API clears the caches of the stream's backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clearcache.html | Elasticsearch API documentation} */ async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -161,7 +161,7 @@ export default class Indices { } /** - * Clones an existing index. + * Clone an index. Clone an existing index into a new index. Each original primary shard is cloned into a new primary shard in the new index. IMPORTANT: Elasticsearch does not apply index templates to the resulting index. The API also does not copy index metadata from the original index. Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. For example, if you clone a CCR follower index, the resulting clone will not be a follower index. The clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`. To set the number of replicas in the resulting index, configure these settings in the clone request. Cloning works as follows: * First, it creates a new target index with the same definition as the source index. * Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process. * Finally, it recovers the target index as though it were a closed index which had just been re-opened. IMPORTANT: Indices can only be cloned if they meet the following requirements: * The target index must not exist. * The source index must have the same number of primary shards as the target index. * The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clone-index.html | Elasticsearch API documentation} */ async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -198,7 +198,7 @@ export default class Indices { } /** - * Closes an index. + * Close an index. A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. It is not possible to index documents or to search for documents in a closed index. Closed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster. When opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index. The shards will then go through the normal recovery process. The data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. You can open and close multiple indices. An error is thrown if the request explicitly refers to a missing index. This behaviour can be turned off using the `ignore_unavailable=true` parameter. By default, you must explicitly name the indices you are opening or closing. To open or close indices with `_all`, `*`, or other wildcard expressions, change the` action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. Closed indices consume a significant amount of disk-space which can cause problems in managed environments. Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-close.html | Elasticsearch API documentation} */ async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -538,7 +538,7 @@ export default class Indices { } /** - * Analyzes the disk usage of each field of an index or data stream. + * Analyze the index disk usage. Analyze the disk usage of each field of an index or data stream. This API might not support indices created in previous Elasticsearch versions. The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-disk-usage.html | Elasticsearch API documentation} */ async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -570,7 +570,7 @@ export default class Indices { } /** - * Aggregates a time series (TSDS) index and stores pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. + * Downsample an index. Aggregate a time series (TSDS) index and store pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. All documents within an hour interval are summarized and stored as a single document in the downsample index. NOTE: Only indices in a time series data stream are supported. Neither field nor document level security can be defined on the source index. The source index must be read only (`index.blocks.write: true`). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-downsample-data-stream.html | Elasticsearch API documentation} */ async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -743,7 +743,7 @@ export default class Indices { } /** - * Get the status for a data stream lifecycle. Retrieves information about an index or data stream’s current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. + * Get the status for a data stream lifecycle. Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-explain-lifecycle.html | Elasticsearch API documentation} */ async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -775,7 +775,7 @@ export default class Indices { } /** - * Returns field usage information for each shard and field of an index. + * Get field usage stats. Get field usage information for each shard and field of an index. Field usage statistics are automatically captured when queries are running on a cluster. A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/field-usage-stats.html | Elasticsearch API documentation} */ async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -807,7 +807,7 @@ export default class Indices { } /** - * Flushes one or more data streams or indices. + * Flush data streams or indices. Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. When restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart. Elasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush. After each operation has been flushed it is permanently stored in the Lucene index. This may mean that there is no need to maintain an additional copy of it in the transaction log. The transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space. It is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly. If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-flush.html | Elasticsearch API documentation} */ async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -847,7 +847,7 @@ export default class Indices { } /** - * Performs the force merge operation on one or more indices. + * Force a merge. Perform the force merge operation on the shards of one or more indices. For data streams, the API forces a merge on the shards of the stream's backing indices. Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. Merging normally happens automatically, but sometimes it is useful to trigger a merge manually. WARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes). When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". These soft-deleted documents are automatically cleaned up during regular segment merges. But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerge.html | Elasticsearch API documentation} */ async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1342,7 +1342,7 @@ export default class Indices { } /** - * Promotes a data stream from a replicated data stream managed by CCR to a regular data stream + * Promote a data stream. Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream. With CCR auto following, a data stream from a remote cluster can be replicated to the local cluster. These data streams can't be rolled over in the local cluster. These replicated data streams roll over only if the upstream data stream rolls over. In the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster. NOTE: When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream. If this is missing, the data stream will not be able to roll over until a matching index template is created. This will affect the lifecycle management of the data stream and interfere with the data stream size and retention. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} */ async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1569,7 +1569,7 @@ export default class Indices { } /** - * Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. + * Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. Composable templates always take precedence over legacy templates. If no composable template matches a new index, matching legacy templates are applied according to their order. Index templates are only applied during index creation. Changes to index templates do not affect existing indices. Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates-v1.html | Elasticsearch API documentation} */ async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1605,7 +1605,7 @@ export default class Indices { } /** - * Returns information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information for the stream’s backing indices. + * Get index recovery information. Get information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information for the stream's backing indices. Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. Recovery automatically occurs during the following processes: * When creating an index for the first time. * When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path. * Creation of new replica shard copies from the primary. * Relocation of a shard copy to a different node in the same cluster. * A snapshot restore operation. * A clone, shrink, or split operation. You can determine the cause of a shard recovery using the recovery or cat recovery APIs. The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-recovery.html | Elasticsearch API documentation} */ async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1685,7 +1685,7 @@ export default class Indices { } /** - * Reloads an index's search analyzers and their resources. + * Reload search analyzers. Reload an index's search analyzers and their resources. For data streams, the API reloads search analyzers and resources for the stream's backing indices. IMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer. You can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer. To be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers. NOTE: This API does not perform a reload for each shard of an index. Instead, it performs a reload for each node containing index shards. As a result, the total shard count returned by the API can differ from the number of index shards. Because reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API. This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-reload-analyzers.html | Elasticsearch API documentation} */ async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1717,7 +1717,7 @@ export default class Indices { } /** - * Resolves the specified index expressions to return information about each cluster, including the local cluster, if included. Multiple patterns and remote clusters are supported. + * Resolve the cluster. Resolve the specified index expressions to return information about each cluster, including the local cluster, if included. Multiple patterns and remote clusters are supported. This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. You use the same index expression with this endpoint as you would for cross-cluster search. Index and cluster exclusions are also supported with this endpoint. For each cluster in the index expression, information is returned about: * Whether the querying ("local") cluster is currently connected to each remote cluster in the index expression scope. * Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. * Whether there are any indices, aliases, or data streams on that cluster that match the index expression. * Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). * Cluster version information, including the Elasticsearch server version. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-cluster-api.html | Elasticsearch API documentation} */ async resolveCluster (this: That, params: T.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1825,7 +1825,7 @@ export default class Indices { } /** - * Returns low-level information about the Lucene segments in index shards. For data streams, the API returns information about the stream’s backing indices. + * Get index segments. Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the stream's backing indices. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-segments.html | Elasticsearch API documentation} */ async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1865,7 +1865,7 @@ export default class Indices { } /** - * Retrieves store information about replica shards in one or more indices. For data streams, the API retrieves store information for the stream’s backing indices. + * Get index shard stores. Get store information about replica shards in one or more indices. For data streams, the API retrieves store information for the stream's backing indices. The index shard stores API returns the following information: * The node on which each replica shard exists. * The allocation ID for each replica shard. * A unique ID for each replica shard. * Any errors encountered while opening the shard index or from an earlier failure. By default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shards-stores.html | Elasticsearch API documentation} */ async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1905,7 +1905,7 @@ export default class Indices { } /** - * Shrinks an existing index into a new index with fewer primary shards. + * Shrink an index. Shrink an index into a new index with fewer primary shards. Before you can shrink an index: * The index must be read-only. * A copy of every shard in the index must reside on the same node. * The index must have a green health status. To make shard allocation easier, we recommend you also remove the index's replica shards. You can later re-add replica shards as part of the shrink operation. The requested number of primary shards in the target index must be a factor of the number of shards in the source index. For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in the index is a prime number it can only be shrunk into a single primary shard Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. The current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk. A shrink operation: * Creates a new target index with the same definition as the source index, but with a smaller number of primary shards. * Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks. * Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. IMPORTANT: Indices can only be shrunk if they satisfy the following requirements: * The target index must not exist. * The source index must have more primary shards than the target index. * The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index. * The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard. * The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shrink-index.html | Elasticsearch API documentation} */ async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2018,7 +2018,7 @@ export default class Indices { } /** - * Splits an existing index into a new index with more primary shards. + * Split an index. Split an index into a new index with more primary shards. * Before you can split an index: * The index must be read-only. * The cluster health status must be green. The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. A split operation: * Creates a new target index with the same definition as the source index, but with a larger number of primary shards. * Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process. * Hashes all documents again, after low level files are created, to delete documents that belong to a different shard. * Recovers the target index as though it were a closed index which had just been re-opened. IMPORTANT: Indices can only be split if they satisfy the following requirements: * The target index must not exist. * The source index must have fewer primary shards than the target index. * The number of primary shards in the target index must be a multiple of the number of primary shards in the source index. * The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-split-index.html | Elasticsearch API documentation} */ async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2055,7 +2055,7 @@ export default class Indices { } /** - * Returns statistics for one or more indices. For data streams, the API retrieves statistics for the stream’s backing indices. + * Get index statistics. For data streams, the API retrieves statistics for the stream's backing indices. By default, the returned statistics are index-level with `primaries` and `total` aggregations. `primaries` are the values for only the primary shards. `total` are the accumulated values for both primary and replica shards. To get shard-level statistics, set the `level` parameter to `shards`. NOTE: When moving to another node, the shard-level statistics for a shard are cleared. Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-stats.html | Elasticsearch API documentation} */ async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2102,7 +2102,7 @@ export default class Indices { } /** - * Unfreezes an index. + * Unfreeze an index. When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/unfreeze-index-api.html | Elasticsearch API documentation} */ async unfreeze (this: That, params: T.IndicesUnfreezeRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/license.ts b/src/api/api/license.ts index cd7c5a4a9..9f2e8c627 100644 --- a/src/api/api/license.ts +++ b/src/api/api/license.ts @@ -44,7 +44,7 @@ export default class License { } /** - * Deletes licensing information for the cluster + * Delete the license. When the license expires, your subscription level reverts to Basic. If the operator privileges feature is enabled, only operator users can use this API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-license.html | Elasticsearch API documentation} */ async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -74,7 +74,7 @@ export default class License { } /** - * Get license information. Returns information about your Elastic license, including its type, its status, when it was issued, and when it expires. For more information about the different types of licenses, refer to [Elastic Stack subscriptions](https://www.elastic.co/subscriptions). + * Get license information. Get information about your Elastic license including its type, its status, when it was issued, and when it expires. NOTE: If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response. If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-license.html | Elasticsearch API documentation} */ async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -104,7 +104,7 @@ export default class License { } /** - * Retrieves information about the status of the basic license. + * Get the basic license status. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-basic-status.html | Elasticsearch API documentation} */ async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -134,7 +134,7 @@ export default class License { } /** - * Retrieves information about the status of the trial license. + * Get the trial status. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-trial-status.html | Elasticsearch API documentation} */ async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -164,7 +164,7 @@ export default class License { } /** - * Updates the license for the cluster. + * Update the license. You can update your license at runtime without shutting down your nodes. License updates take effect immediately. If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true. NOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license. If the operator privileges feature is enabled, only operator users can use this API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-license.html | Elasticsearch API documentation} */ async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -198,7 +198,7 @@ export default class License { } /** - * The start basic API enables you to initiate an indefinite basic license, which gives access to all the basic features. If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true. To check the status of your basic license, use the following API: [Get basic status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html). + * Start a basic license. Start an indefinite basic license, which gives access to all the basic features. NOTE: In order to start a basic license, you must not currently have a basic license. If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the `acknowledge` parameter set to `true`. To check the status of your basic license, use the get basic license API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-basic.html | Elasticsearch API documentation} */ async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -228,7 +228,7 @@ export default class License { } /** - * The start trial API enables you to start a 30-day trial, which gives access to all subscription features. + * Start a trial. Start a 30-day trial, which gives access to all subscription features. NOTE: You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version. For example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension. To check the status of your trial, use the get trial status API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trial.html | Elasticsearch API documentation} */ async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 82419d6e1..29d5b2202 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -1791,7 +1791,7 @@ export default class Ml { async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['allow_lazy_start', 'analysis', 'analyzed_fields', 'description', 'dest', 'max_num_threads', 'model_memory_limit', 'source', 'headers', 'version'] + const acceptedBody: string[] = ['allow_lazy_start', 'analysis', 'analyzed_fields', 'description', 'dest', 'max_num_threads', '_meta', 'model_memory_limit', 'source', 'headers', 'version'] const querystring: Record = {} const body: Record = {} @@ -1827,7 +1827,7 @@ export default class Ml { async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['aggregations', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size', 'headers'] + const acceptedBody: string[] = ['aggregations', 'aggs', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size', 'headers'] const querystring: Record = {} const body: Record = {} @@ -1898,8 +1898,8 @@ export default class Ml { async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptions): Promise async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['allow_lazy_open', 'analysis_config', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'daily_model_snapshot_retention_after_days', 'data_description', 'datafeed_config', 'description', 'groups', 'model_plot_config', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_index_name', 'results_retention_days'] + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['allow_lazy_open', 'analysis_config', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'daily_model_snapshot_retention_after_days', 'data_description', 'datafeed_config', 'description', 'job_id', 'groups', 'model_plot_config', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_index_name', 'results_retention_days'] const querystring: Record = {} const body: Record = {} diff --git a/src/api/types.ts b/src/api/types.ts index fbe66a62f..db98685f1 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -58,6 +58,7 @@ export type BulkOperationType = 'index' | 'create' | 'update' | 'delete' export interface BulkRequest extends RequestBase { index?: IndexName + list_executed_pipelines?: boolean pipeline?: string refresh?: Refresh routing?: Routing @@ -67,6 +68,7 @@ export interface BulkRequest ex timeout?: Duration wait_for_active_shards?: WaitForActiveShards require_alias?: boolean + require_data_stream?: boolean operations?: (BulkOperationContainer | BulkUpdateAction | TDocument)[] } @@ -13419,6 +13421,12 @@ export interface MigrationPostFeatureUpgradeResponse { features: MigrationPostFeatureUpgradeMigrationFeature[] } +export interface MlAdaptiveAllocationsSettings { + enabled: boolean + min_number_of_allocations?: integer + max_number_of_allocations?: integer +} + export interface MlAnalysisConfig { bucket_span?: Duration categorization_analyzer?: MlCategorizationAnalyzer @@ -13449,7 +13457,7 @@ export interface MlAnalysisConfigRead { export interface MlAnalysisLimits { categorization_examples_limit?: long - model_memory_limit?: string + model_memory_limit?: ByteSize } export interface MlAnalysisMemoryLimit { @@ -13601,6 +13609,14 @@ export interface MlClassificationInferenceOptions { top_classes_results_field?: string } +export interface MlCommonTokenizationConfig { + do_lower_case?: boolean + max_sequence_length?: integer + span?: integer + truncate?: MlTokenizationTruncate + with_special_tokens?: boolean +} + export type MlConditionOperator = 'gt' | 'gte' | 'lt' | 'lte' export type MlCustomSettings = any @@ -13690,15 +13706,16 @@ export type MlDatafeedState = 'started' | 'stopped' | 'starting' | 'stopping' export interface MlDatafeedStats { assignment_explanation?: string datafeed_id: Id - node?: MlDiscoveryNode + node?: MlDiscoveryNodeCompact state: MlDatafeedState - timing_stats: MlDatafeedTimingStats + timing_stats?: MlDatafeedTimingStats running_state?: MlDatafeedRunningState } export interface MlDatafeedTimingStats { bucket_count: long exponential_average_search_time_per_hour_ms: DurationValue + exponential_average_calculation_context?: MlExponentialAverageCalculationContext job_id: Id search_count: long total_search_time_ms: DurationValue @@ -13890,6 +13907,7 @@ export interface MlDataframeAnalyticsSummary { model_memory_limit?: string source: MlDataframeAnalyticsSource version?: VersionString + _meta?: Metadata } export interface MlDataframeEvaluationClassification { @@ -13995,21 +14013,48 @@ export interface MlDetectorRead { use_null?: boolean } -export interface MlDiscoveryNode { - attributes: Record +export interface MlDetectorUpdate { + detector_index: integer + description?: string + custom_rules?: MlDetectionRule[] +} + +export type MlDiscoveryNode = Partial> + +export interface MlDiscoveryNodeCompact { + name: Name ephemeral_id: Id id: Id - name: Name transport_address: TransportAddress + attributes: Record +} + +export interface MlDiscoveryNodeContent { + name?: Name + ephemeral_id: Id + transport_address: TransportAddress + external_id: string + attributes: Record + roles: string[] + version: VersionString + min_index_version: integer + max_index_version: integer } export type MlExcludeFrequent = 'all' | 'none' | 'by' | 'over' +export interface MlExponentialAverageCalculationContext { + incremental_metric_value_ms: DurationValue + latest_timestamp?: EpochTime + previous_exponential_average_ms?: DurationValue +} + export interface MlFillMaskInferenceOptions { mask_token?: string num_top_classes?: integer tokenization?: MlTokenizationConfigContainer results_field?: string + vocabulary: MlVocabulary } export interface MlFillMaskInferenceUpdateOptions { @@ -14197,7 +14242,7 @@ export interface MlJobStats { forecasts_stats: MlJobForecastStatistics job_id: string model_size_stats: MlModelSizeStats - node?: MlDiscoveryNode + node?: MlDiscoveryNodeCompact open_time?: DateTime state: MlJobState timing_stats: MlJobTimingStats @@ -14217,6 +14262,23 @@ export interface MlJobTimingStats { export type MlMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit' +export interface MlModelPackageConfig { + create_time?: EpochTime + description?: string + inference_config?: Record + metadata?: Metadata + minimum_version?: string + model_repository?: string + model_type?: string + packaged_model_id: Id + platform_architecture?: string + prefix_strings?: MlTrainedModelPrefixStrings + size?: ByteSize + sha256?: string + tags?: string[] + vocabulary_file?: string +} + export interface MlModelPlotConfig { annotations_enabled?: boolean enabled?: boolean @@ -14231,6 +14293,7 @@ export interface MlModelSizeStats { model_bytes: ByteSize model_bytes_exceeded?: ByteSize model_bytes_memory_limit?: ByteSize + output_memory_allocator_bytes?: ByteSize peak_model_bytes?: ByteSize assignment_memory_basis?: string result_type: string @@ -14280,20 +14343,11 @@ export interface MlNerInferenceUpdateOptions { results_field?: string } -export interface MlNlpBertTokenizationConfig { - do_lower_case?: boolean - with_special_tokens?: boolean - max_sequence_length?: integer - truncate?: MlTokenizationTruncate - span?: integer +export interface MlNlpBertTokenizationConfig extends MlCommonTokenizationConfig { } -export interface MlNlpRobertaTokenizationConfig { +export interface MlNlpRobertaTokenizationConfig extends MlCommonTokenizationConfig { add_prefix_space?: boolean - with_special_tokens?: boolean - max_sequence_length?: integer - truncate?: MlTokenizationTruncate - span?: integer } export interface MlNlpTokenizationUpdateOptions { @@ -14317,7 +14371,7 @@ export interface MlOverallBucket { overall_score: double result_type: string timestamp: EpochTime - timestamp_string: DateTime + timestamp_string?: DateTime } export interface MlOverallBucketJob { @@ -14405,6 +14459,7 @@ export interface MlTextEmbeddingInferenceOptions { embedding_size?: integer tokenization?: MlTokenizationConfigContainer results_field?: string + vocabulary: MlVocabulary } export interface MlTextEmbeddingInferenceUpdateOptions { @@ -14415,6 +14470,7 @@ export interface MlTextEmbeddingInferenceUpdateOptions { export interface MlTextExpansionInferenceOptions { tokenization?: MlTokenizationConfigContainer results_field?: string + vocabulary: MlVocabulary } export interface MlTextExpansionInferenceUpdateOptions { @@ -14429,6 +14485,7 @@ export interface MlTimingStats { export interface MlTokenizationConfigContainer { bert?: MlNlpBertTokenizationConfig + bert_ja?: MlNlpBertTokenizationConfig mpnet?: MlNlpBertTokenizationConfig roberta?: MlNlpRobertaTokenizationConfig } @@ -14459,27 +14516,31 @@ export interface MlTotalFeatureImportanceStatistics { } export interface MlTrainedModelAssignment { + adaptive_allocations?: MlAdaptiveAllocationsSettings | null assignment_state: MlDeploymentAssignmentState max_assigned_allocations?: integer + reason?: string routing_table: Record start_time: DateTime task_parameters: MlTrainedModelAssignmentTaskParameters } export interface MlTrainedModelAssignmentRoutingTable { - reason: string + reason?: string routing_state: MlRoutingState current_allocations: integer target_allocations: integer } export interface MlTrainedModelAssignmentTaskParameters { - model_bytes: integer + model_bytes: ByteSize model_id: Id deployment_id: Id - cache_size: ByteSize + cache_size?: ByteSize number_of_allocations: integer priority: MlTrainingPriority + per_deployment_memory_bytes: ByteSize + per_allocation_memory_bytes: ByteSize queue_capacity: integer threads_per_allocation: integer } @@ -14502,6 +14563,7 @@ export interface MlTrainedModelConfig { license_level?: string metadata?: MlTrainedModelConfigMetadata model_size_bytes?: ByteSize + model_package?: MlModelPackageConfig location?: MlTrainedModelLocation prefix_strings?: MlTrainedModelPrefixStrings } @@ -14524,36 +14586,45 @@ export interface MlTrainedModelDeploymentAllocationStatus { } export interface MlTrainedModelDeploymentNodesStats { - average_inference_time_ms: DurationValue - error_count: integer - inference_count: integer - last_access: long - node: MlDiscoveryNode - number_of_allocations: integer - number_of_pending_requests: integer - rejection_execution_count: integer + average_inference_time_ms?: DurationValue + average_inference_time_ms_last_minute?: DurationValue + average_inference_time_ms_excluding_cache_hits?: DurationValue + error_count?: integer + inference_count?: long + inference_cache_hit_count?: long + inference_cache_hit_count_last_minute?: long + last_access?: EpochTime + node?: MlDiscoveryNode + number_of_allocations?: integer + number_of_pending_requests?: integer + peak_throughput_per_minute: long + rejection_execution_count?: integer routing_state: MlTrainedModelAssignmentRoutingTable - start_time: EpochTime - threads_per_allocation: integer - timeout_count: integer + start_time?: EpochTime + threads_per_allocation?: integer + throughput_last_minute: integer + timeout_count?: integer } export interface MlTrainedModelDeploymentStats { - allocation_status: MlTrainedModelDeploymentAllocationStatus + adaptive_allocations?: MlAdaptiveAllocationsSettings + allocation_status?: MlTrainedModelDeploymentAllocationStatus cache_size?: ByteSize deployment_id: Id - error_count: integer - inference_count: integer + error_count?: integer + inference_count?: integer model_id: Id nodes: MlTrainedModelDeploymentNodesStats[] - number_of_allocations: integer - queue_capacity: integer - rejected_execution_count: integer - reason: string + number_of_allocations?: integer + peak_throughput_per_minute: long + priority: MlTrainingPriority + queue_capacity?: integer + rejected_execution_count?: integer + reason?: string start_time: EpochTime - state: MlDeploymentAssignmentState - threads_per_allocation: integer - timeout_count: integer + state?: MlDeploymentAssignmentState + threads_per_allocation?: integer + timeout_count?: integer } export interface MlTrainedModelEntities { @@ -15187,6 +15258,7 @@ export interface MlGetTrainedModelsRequest extends RequestBase { exclude_generated?: boolean from?: integer include?: MlInclude + include_model_definition?: boolean size?: integer tags?: string | string[] } @@ -15237,9 +15309,11 @@ export interface MlInfoDefaults { } export interface MlInfoLimits { - max_model_memory_limit?: string - effective_max_model_memory_limit: string - total_ml_memory: string + max_single_ml_node_processors?: integer + total_ml_processors?: integer + max_model_memory_limit?: ByteSize + effective_max_model_memory_limit?: ByteSize + total_ml_memory: ByteSize } export interface MlInfoNativeCode { @@ -15284,21 +15358,24 @@ export interface MlPostDataRequest extends RequestBase { } export interface MlPostDataResponse { - bucket_count: long - earliest_record_timestamp: long - empty_bucket_count: long + job_id: Id + processed_record_count: long + processed_field_count: long input_bytes: long input_field_count: long - input_record_count: long invalid_date_count: long - job_id: Id - last_data_time: integer - latest_record_timestamp: long missing_field_count: long out_of_order_timestamp_count: long - processed_field_count: long - processed_record_count: long + empty_bucket_count: long sparse_bucket_count: long + bucket_count: long + earliest_record_timestamp?: EpochTime + latest_record_timestamp?: EpochTime + last_data_time?: EpochTime + latest_empty_bucket_timestamp?: EpochTime + latest_sparse_bucket_timestamp?: EpochTime + input_record_count: long + log_time?: EpochTime } export interface MlPreviewDataFrameAnalyticsDataframePreviewConfig { @@ -15359,6 +15436,7 @@ export interface MlPutDataFrameAnalyticsRequest extends RequestBase { description?: string dest: MlDataframeAnalyticsDestination max_num_threads?: integer + _meta?: Metadata model_memory_limit?: string source: MlDataframeAnalyticsSource headers?: HttpHeaders @@ -15375,6 +15453,7 @@ export interface MlPutDataFrameAnalyticsResponse { dest: MlDataframeAnalyticsDestination id: Id max_num_threads: integer + _meta?: Metadata model_memory_limit: string source: MlDataframeAnalyticsSource version: VersionString @@ -15387,6 +15466,8 @@ export interface MlPutDatafeedRequest extends RequestBase { ignore_throttled?: boolean ignore_unavailable?: boolean aggregations?: Record + /** @alias aggregations */ + aggs?: Record chunking_config?: MlChunkingConfig delayed_data_check_config?: MlDelayedDataCheckConfig frequency?: Duration @@ -15436,6 +15517,10 @@ export interface MlPutFilterResponse { export interface MlPutJobRequest extends RequestBase { job_id: Id + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_throttled?: boolean + ignore_unavailable?: boolean allow_lazy_open?: boolean analysis_config: MlAnalysisConfig analysis_limits?: MlAnalysisLimits @@ -15786,7 +15871,7 @@ export interface MlUpdateJobRequest extends RequestBase { renormalization_window_days?: long results_retention_days?: long groups?: string[] - detectors?: MlDetector[] + detectors?: MlDetectorUpdate[] per_partition_categorization?: MlPerPartitionCategorization }