From 7d436a0c6cf664d154e011f7457d76d617a448a9 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Wed, 11 Dec 2024 10:09:00 -0800 Subject: [PATCH] [DOCS] Edit index operation summaries (#3268) --- output/openapi/elasticsearch-openapi.json | 134 ++++++++++------ output/schema/schema.json | 147 +++++++++++------- specification/_doc_ids/table.csv | 3 + .../IndicesIndicesClearCacheRequest.ts | 5 +- .../indices/clone/IndicesCloneRequest.ts | 25 ++- .../indices/close/CloseIndexRequest.ts | 19 ++- .../disk_usage/IndicesDiskUsageRequest.ts | 5 +- specification/indices/downsample/Request.ts | 9 +- .../IndicesFieldUsageStatsRequest.ts | 5 +- .../indices/flush/IndicesFlushRequest.ts | 12 +- .../forcemerge/IndicesForceMergeRequest.ts | 14 ++ .../IndicesPromoteDataStreamRequest.ts | 11 ++ .../put_template/IndicesPutTemplateRequest.ts | 12 ++ .../recovery/IndicesRecoveryRequest.ts | 23 ++- .../ReloadSearchAnalyzersRequest.ts | 15 ++ .../resolve_cluster/ResolveClusterRequest.ts | 18 ++- .../segments/IndicesSegmentsRequest.ts | 6 +- .../shard_stores/IndicesShardStoresRequest.ts | 15 +- .../indices/shrink/IndicesShrinkRequest.ts | 34 +++- .../indices/split/IndicesSplitRequest.ts | 26 +++- .../indices/stats/IndicesStatsRequest.ts | 13 +- .../unfreeze/IndicesUnfreezeRequest.ts | 4 +- 22 files changed, 429 insertions(+), 126 deletions(-) diff --git a/output/openapi/elasticsearch-openapi.json b/output/openapi/elasticsearch-openapi.json index bbb0f4c919..db2de5aad0 100644 --- a/output/openapi/elasticsearch-openapi.json +++ b/output/openapi/elasticsearch-openapi.json @@ -10301,8 +10301,8 @@ "tags": [ "indices" ], - "summary": "Clears the caches of one or more indices", - "description": "For data streams, the API clears the caches of the stream’s backing indices.", + "summary": "Clear the cache", + "description": "Clear the cache of one or more indices.\nFor data streams, the API clears the caches of the stream's backing indices.", "operationId": "indices-clear-cache", "parameters": [ { @@ -10339,8 +10339,8 @@ "tags": [ "indices" ], - "summary": "Clears the caches of one or more indices", - "description": "For data streams, the API clears the caches of the stream’s backing indices.", + "summary": "Clear the cache", + "description": "Clear the cache of one or more indices.\nFor data streams, the API clears the caches of the stream's backing indices.", "operationId": "indices-clear-cache-1", "parameters": [ { @@ -10380,7 +10380,8 @@ "tags": [ "indices" ], - "summary": "Clones an existing index", + "summary": "Clone an index", + "description": "Clone an existing index into a new index.\nEach original primary shard is cloned into a new primary shard in the new index.\n\nIMPORTANT: Elasticsearch does not apply index templates to the resulting index.\nThe API also does not copy index metadata from the original index.\nIndex metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information.\nFor example, if you clone a CCR follower index, the resulting clone will not be a follower index.\n\nThe clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`.\nTo set the number of replicas in the resulting index, configure these settings in the clone request.\n\nCloning works as follows:\n\n* First, it creates a new target index with the same definition as the source index.\n* Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Finally, it recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be cloned if they meet the following requirements:\n\n* The target index must not exist.\n* The source index must have the same number of primary shards as the target index.\n* The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index.", "operationId": "indices-clone", "parameters": [ { @@ -10413,7 +10414,8 @@ "tags": [ "indices" ], - "summary": "Clones an existing index", + "summary": "Clone an index", + "description": "Clone an existing index into a new index.\nEach original primary shard is cloned into a new primary shard in the new index.\n\nIMPORTANT: Elasticsearch does not apply index templates to the resulting index.\nThe API also does not copy index metadata from the original index.\nIndex metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information.\nFor example, if you clone a CCR follower index, the resulting clone will not be a follower index.\n\nThe clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`.\nTo set the number of replicas in the resulting index, configure these settings in the clone request.\n\nCloning works as follows:\n\n* First, it creates a new target index with the same definition as the source index.\n* Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Finally, it recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be cloned if they meet the following requirements:\n\n* The target index must not exist.\n* The source index must have the same number of primary shards as the target index.\n* The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index.", "operationId": "indices-clone-1", "parameters": [ { @@ -10448,7 +10450,8 @@ "tags": [ "indices" ], - "summary": "Closes an index", + "summary": "Close an index", + "description": "A closed index is blocked for read or write operations and does not allow all operations that opened indices allow.\nIt is not possible to index documents or to search for documents in a closed index.\nClosed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster.\n\nWhen opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index.\nThe shards will then go through the normal recovery process.\nThe data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times.\n\nYou can open and close multiple indices.\nAn error is thrown if the request explicitly refers to a missing index.\nThis behaviour can be turned off using the `ignore_unavailable=true` parameter.\n\nBy default, you must explicitly name the indices you are opening or closing.\nTo open or close indices with `_all`, `*`, or other wildcard expressions, change the` action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API.\n\nClosed indices consume a significant amount of disk-space which can cause problems in managed environments.\nClosing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`.", "operationId": "indices-close", "parameters": [ { @@ -11815,7 +11818,10 @@ "indices" ], "summary": "Create or update an index template", - "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.", + "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.\nElasticsearch applies templates to new indices based on an index pattern that matches the index name.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.\n\nComposable templates always take precedence over legacy templates.\nIf no composable template matches a new index, matching legacy templates are applied according to their order.\n\nIndex templates are only applied during index creation.\nChanges to index templates do not affect existing indices.\nSettings and mappings specified in create index API requests override any settings or mappings specified in an index template.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/index-templates.html" + }, "operationId": "indices-put-template", "parameters": [ { @@ -11848,7 +11854,10 @@ "indices" ], "summary": "Create or update an index template", - "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.", + "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.\nElasticsearch applies templates to new indices based on an index pattern that matches the index name.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.\n\nComposable templates always take precedence over legacy templates.\nIf no composable template matches a new index, matching legacy templates are applied according to their order.\n\nIndex templates are only applied during index creation.\nChanges to index templates do not affect existing indices.\nSettings and mappings specified in create index API requests override any settings or mappings specified in an index template.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/index-templates.html" + }, "operationId": "indices-put-template-1", "parameters": [ { @@ -11993,7 +12002,8 @@ "tags": [ "indices" ], - "summary": "Analyzes the disk usage of each field of an index or data stream", + "summary": "Analyze the index disk usage", + "description": "Analyze the disk usage of each field of an index or data stream.\nThis API might not support indices created in previous Elasticsearch versions.\nThe result of a small index can be inaccurate as some parts of an index might not be analyzed by the API.", "operationId": "indices-disk-usage", "parameters": [ { @@ -12078,7 +12088,8 @@ "tags": [ "indices" ], - "summary": "Aggregates a time series (TSDS) index and stores pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval", + "summary": "Downsample an index", + "description": "Aggregate a time series (TSDS) index and store pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval.\nFor example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index.\nAll documents within an hour interval are summarized and stored as a single document in the downsample index.\n\nNOTE: Only indices in a time series data stream are supported.\nNeither field nor document level security can be defined on the source index.\nThe source index must be read only (`index.blocks.write: true`).", "operationId": "indices-downsample", "parameters": [ { @@ -12263,7 +12274,8 @@ "tags": [ "indices" ], - "summary": "Returns field usage information for each shard and field of an index", + "summary": "Get field usage stats", + "description": "Get field usage information for each shard and field of an index.\nField usage statistics are automatically captured when queries are running on a cluster.\nA shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use.", "operationId": "indices-field-usage-stats", "parameters": [ { @@ -12368,7 +12380,8 @@ "tags": [ "indices" ], - "summary": "Flushes one or more data streams or indices", + "summary": "Flush data streams or indices", + "description": "Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index.\nWhen restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart.\nElasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush.\n\nAfter each operation has been flushed it is permanently stored in the Lucene index.\nThis may mean that there is no need to maintain an additional copy of it in the transaction log.\nThe transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space.\n\nIt is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly.\nIf you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called.", "operationId": "indices-flush-1", "parameters": [ { @@ -12397,7 +12410,8 @@ "tags": [ "indices" ], - "summary": "Flushes one or more data streams or indices", + "summary": "Flush data streams or indices", + "description": "Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index.\nWhen restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart.\nElasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush.\n\nAfter each operation has been flushed it is permanently stored in the Lucene index.\nThis may mean that there is no need to maintain an additional copy of it in the transaction log.\nThe transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space.\n\nIt is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly.\nIf you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called.", "operationId": "indices-flush", "parameters": [ { @@ -12428,7 +12442,8 @@ "tags": [ "indices" ], - "summary": "Flushes one or more data streams or indices", + "summary": "Flush data streams or indices", + "description": "Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index.\nWhen restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart.\nElasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush.\n\nAfter each operation has been flushed it is permanently stored in the Lucene index.\nThis may mean that there is no need to maintain an additional copy of it in the transaction log.\nThe transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space.\n\nIt is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly.\nIf you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called.", "operationId": "indices-flush-3", "parameters": [ { @@ -12460,7 +12475,8 @@ "tags": [ "indices" ], - "summary": "Flushes one or more data streams or indices", + "summary": "Flush data streams or indices", + "description": "Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index.\nWhen restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart.\nElasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush.\n\nAfter each operation has been flushed it is permanently stored in the Lucene index.\nThis may mean that there is no need to maintain an additional copy of it in the transaction log.\nThe transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space.\n\nIt is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly.\nIf you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called.", "operationId": "indices-flush-2", "parameters": [ { @@ -12494,7 +12510,11 @@ "tags": [ "indices" ], - "summary": "Performs the force merge operation on one or more indices", + "summary": "Force a merge", + "description": "Perform the force merge operation on the shards of one or more indices.\nFor data streams, the API forces a merge on the shards of the stream's backing indices.\n\nMerging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents.\nMerging normally happens automatically, but sometimes it is useful to trigger a merge manually.\n\nWARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes).\nWhen documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a \"tombstone\".\nThese soft-deleted documents are automatically cleaned up during regular segment merges.\nBut force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges.\nSo the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance.\nIf you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-merge.html" + }, "operationId": "indices-forcemerge", "parameters": [ { @@ -12532,7 +12552,11 @@ "tags": [ "indices" ], - "summary": "Performs the force merge operation on one or more indices", + "summary": "Force a merge", + "description": "Perform the force merge operation on the shards of one or more indices.\nFor data streams, the API forces a merge on the shards of the stream's backing indices.\n\nMerging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents.\nMerging normally happens automatically, but sometimes it is useful to trigger a merge manually.\n\nWARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes).\nWhen documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a \"tombstone\".\nThese soft-deleted documents are automatically cleaned up during regular segment merges.\nBut force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges.\nSo the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance.\nIf you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-merge.html" + }, "operationId": "indices-forcemerge-1", "parameters": [ { @@ -13391,7 +13415,8 @@ "tags": [ "indices" ], - "summary": "Promotes a data stream from a replicated data stream managed by CCR to a regular data stream", + "summary": "Promote a data stream", + "description": "Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream.\n\nWith CCR auto following, a data stream from a remote cluster can be replicated to the local cluster.\nThese data streams can't be rolled over in the local cluster.\nThese replicated data streams roll over only if the upstream data stream rolls over.\nIn the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster.\n\nNOTE: When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream.\nIf this is missing, the data stream will not be able to roll over until a matching index template is created.\nThis will affect the lifecycle management of the data stream and interfere with the data stream size and retention.", "operationId": "indices-promote-data-stream", "parameters": [ { @@ -13436,8 +13461,8 @@ "tags": [ "indices" ], - "summary": "Returns information about ongoing and completed shard recoveries for one or more indices", - "description": "For data streams, the API returns information for the stream’s backing indices.", + "summary": "Get index recovery information", + "description": "Get information about ongoing and completed shard recoveries for one or more indices.\nFor data streams, the API returns information for the stream's backing indices.\n\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard.\nWhen a shard recovery completes, the recovered shard is available for search and indexing.\n\nRecovery automatically occurs during the following processes:\n\n* When creating an index for the first time.\n* When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path.\n* Creation of new replica shard copies from the primary.\n* Relocation of a shard copy to a different node in the same cluster.\n* A snapshot restore operation.\n* A clone, shrink, or split operation.\n\nYou can determine the cause of a shard recovery using the recovery or cat recovery APIs.\n\nThe index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster.\nIt only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist.\nThis means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API.", "operationId": "indices-recovery", "parameters": [ { @@ -13459,8 +13484,8 @@ "tags": [ "indices" ], - "summary": "Returns information about ongoing and completed shard recoveries for one or more indices", - "description": "For data streams, the API returns information for the stream’s backing indices.", + "summary": "Get index recovery information", + "description": "Get information about ongoing and completed shard recoveries for one or more indices.\nFor data streams, the API returns information for the stream's backing indices.\n\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard.\nWhen a shard recovery completes, the recovered shard is available for search and indexing.\n\nRecovery automatically occurs during the following processes:\n\n* When creating an index for the first time.\n* When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path.\n* Creation of new replica shard copies from the primary.\n* Relocation of a shard copy to a different node in the same cluster.\n* A snapshot restore operation.\n* A clone, shrink, or split operation.\n\nYou can determine the cause of a shard recovery using the recovery or cat recovery APIs.\n\nThe index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster.\nIt only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist.\nThis means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API.", "operationId": "indices-recovery-1", "parameters": [ { @@ -13591,7 +13616,11 @@ "tags": [ "indices" ], - "summary": "Reloads an index's search analyzers and their resources", + "summary": "Reload search analyzers", + "description": "Reload an index's search analyzers and their resources.\nFor data streams, the API reloads search analyzers and resources for the stream's backing indices.\n\nIMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer.\n\nYou can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer.\nTo be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers.\n\nNOTE: This API does not perform a reload for each shard of an index.\nInstead, it performs a reload for each node containing index shards.\nAs a result, the total shard count returned by the API can differ from the number of index shards.\nBecause reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API.\nThis ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/search-analyzer.html" + }, "operationId": "indices-reload-search-analyzers", "parameters": [ { @@ -13618,7 +13647,11 @@ "tags": [ "indices" ], - "summary": "Reloads an index's search analyzers and their resources", + "summary": "Reload search analyzers", + "description": "Reload an index's search analyzers and their resources.\nFor data streams, the API reloads search analyzers and resources for the stream's backing indices.\n\nIMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer.\n\nYou can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer.\nTo be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers.\n\nNOTE: This API does not perform a reload for each shard of an index.\nInstead, it performs a reload for each node containing index shards.\nAs a result, the total shard count returned by the API can differ from the number of index shards.\nBecause reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API.\nThis ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/search-analyzer.html" + }, "operationId": "indices-reload-search-analyzers-1", "parameters": [ { @@ -13647,8 +13680,8 @@ "tags": [ "indices" ], - "summary": "Resolves the specified index expressions to return information about each cluster, including\n", - "description": "the local cluster, if included.\nMultiple patterns and remote clusters are supported.", + "summary": "Resolve the cluster", + "description": "Resolve the specified index expressions to return information about each cluster, including the local cluster, if included.\nMultiple patterns and remote clusters are supported.\n\nThis endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search.\n\nYou use the same index expression with this endpoint as you would for cross-cluster search.\nIndex and cluster exclusions are also supported with this endpoint.\n\nFor each cluster in the index expression, information is returned about:\n\n* Whether the querying (\"local\") cluster is currently connected to each remote cluster in the index expression scope.\n* Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`.\n* Whether there are any indices, aliases, or data streams on that cluster that match the index expression.\n* Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index).\n* Cluster version information, including the Elasticsearch server version.", "operationId": "indices-resolve-cluster", "parameters": [ { @@ -13892,8 +13925,8 @@ "tags": [ "indices" ], - "summary": "Returns low-level information about the Lucene segments in index shards", - "description": "For data streams, the API returns information about the stream’s backing indices.", + "summary": "Get index segments", + "description": "Get low-level information about the Lucene segments in index shards.\nFor data streams, the API returns information about the stream's backing indices.", "operationId": "indices-segments", "parameters": [ { @@ -13918,8 +13951,8 @@ "tags": [ "indices" ], - "summary": "Returns low-level information about the Lucene segments in index shards", - "description": "For data streams, the API returns information about the stream’s backing indices.", + "summary": "Get index segments", + "description": "Get low-level information about the Lucene segments in index shards.\nFor data streams, the API returns information about the stream's backing indices.", "operationId": "indices-segments-1", "parameters": [ { @@ -13947,8 +13980,8 @@ "tags": [ "indices" ], - "summary": "Retrieves store information about replica shards in one or more indices", - "description": "For data streams, the API retrieves store information for the stream’s backing indices.", + "summary": "Get index shard stores", + "description": "Get store information about replica shards in one or more indices.\nFor data streams, the API retrieves store information for the stream's backing indices.\n\nThe index shard stores API returns the following information:\n\n* The node on which each replica shard exists.\n* The allocation ID for each replica shard.\n* A unique ID for each replica shard.\n* Any errors encountered while opening the shard index or from an earlier failure.\n\nBy default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards.", "operationId": "indices-shard-stores", "parameters": [ { @@ -13976,8 +14009,8 @@ "tags": [ "indices" ], - "summary": "Retrieves store information about replica shards in one or more indices", - "description": "For data streams, the API retrieves store information for the stream’s backing indices.", + "summary": "Get index shard stores", + "description": "Get store information about replica shards in one or more indices.\nFor data streams, the API retrieves store information for the stream's backing indices.\n\nThe index shard stores API returns the following information:\n\n* The node on which each replica shard exists.\n* The allocation ID for each replica shard.\n* A unique ID for each replica shard.\n* Any errors encountered while opening the shard index or from an earlier failure.\n\nBy default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards.", "operationId": "indices-shard-stores-1", "parameters": [ { @@ -14008,7 +14041,8 @@ "tags": [ "indices" ], - "summary": "Shrinks an existing index into a new index with fewer primary shards", + "summary": "Shrink an index", + "description": "Shrink an index into a new index with fewer primary shards.\n\nBefore you can shrink an index:\n\n* The index must be read-only.\n* A copy of every shard in the index must reside on the same node.\n* The index must have a green health status.\n\nTo make shard allocation easier, we recommend you also remove the index's replica shards.\nYou can later re-add replica shards as part of the shrink operation.\n\nThe requested number of primary shards in the target index must be a factor of the number of shards in the source index.\nFor example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1.\nIf the number of shards in the index is a prime number it can only be shrunk into a single primary shard\n Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node.\n\nThe current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk.\n\nA shrink operation:\n\n* Creates a new target index with the same definition as the source index, but with a smaller number of primary shards.\n* Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks.\n* Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting.\n\nIMPORTANT: Indices can only be shrunk if they satisfy the following requirements:\n\n* The target index must not exist.\n* The source index must have more primary shards than the target index.\n* The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index.\n* The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard.\n* The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index.", "operationId": "indices-shrink", "parameters": [ { @@ -14041,7 +14075,8 @@ "tags": [ "indices" ], - "summary": "Shrinks an existing index into a new index with fewer primary shards", + "summary": "Shrink an index", + "description": "Shrink an index into a new index with fewer primary shards.\n\nBefore you can shrink an index:\n\n* The index must be read-only.\n* A copy of every shard in the index must reside on the same node.\n* The index must have a green health status.\n\nTo make shard allocation easier, we recommend you also remove the index's replica shards.\nYou can later re-add replica shards as part of the shrink operation.\n\nThe requested number of primary shards in the target index must be a factor of the number of shards in the source index.\nFor example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1.\nIf the number of shards in the index is a prime number it can only be shrunk into a single primary shard\n Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node.\n\nThe current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk.\n\nA shrink operation:\n\n* Creates a new target index with the same definition as the source index, but with a smaller number of primary shards.\n* Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks.\n* Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting.\n\nIMPORTANT: Indices can only be shrunk if they satisfy the following requirements:\n\n* The target index must not exist.\n* The source index must have more primary shards than the target index.\n* The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index.\n* The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard.\n* The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index.", "operationId": "indices-shrink-1", "parameters": [ { @@ -14207,7 +14242,8 @@ "tags": [ "indices" ], - "summary": "Splits an existing index into a new index with more primary shards", + "summary": "Split an index", + "description": "Split an index into a new index with more primary shards.\n* Before you can split an index:\n\n* The index must be read-only.\n* The cluster health status must be green.\n\nThe number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting.\nThe number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing.\nFor instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3.\n\nA split operation:\n\n* Creates a new target index with the same definition as the source index, but with a larger number of primary shards.\n* Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Hashes all documents again, after low level files are created, to delete documents that belong to a different shard.\n* Recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be split if they satisfy the following requirements:\n\n* The target index must not exist.\n* The source index must have fewer primary shards than the target index.\n* The number of primary shards in the target index must be a multiple of the number of primary shards in the source index.\n* The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index.", "operationId": "indices-split", "parameters": [ { @@ -14240,7 +14276,8 @@ "tags": [ "indices" ], - "summary": "Splits an existing index into a new index with more primary shards", + "summary": "Split an index", + "description": "Split an index into a new index with more primary shards.\n* Before you can split an index:\n\n* The index must be read-only.\n* The cluster health status must be green.\n\nThe number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting.\nThe number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing.\nFor instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3.\n\nA split operation:\n\n* Creates a new target index with the same definition as the source index, but with a larger number of primary shards.\n* Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Hashes all documents again, after low level files are created, to delete documents that belong to a different shard.\n* Recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be split if they satisfy the following requirements:\n\n* The target index must not exist.\n* The source index must have fewer primary shards than the target index.\n* The number of primary shards in the target index must be a multiple of the number of primary shards in the source index.\n* The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index.", "operationId": "indices-split-1", "parameters": [ { @@ -14275,8 +14312,8 @@ "tags": [ "indices" ], - "summary": "Returns statistics for one or more indices", - "description": "For data streams, the API retrieves statistics for the stream’s backing indices.", + "summary": "Get index statistics", + "description": "For data streams, the API retrieves statistics for the stream's backing indices.\n\nBy default, the returned statistics are index-level with `primaries` and `total` aggregations.\n`primaries` are the values for only the primary shards.\n`total` are the accumulated values for both primary and replica shards.\n\nTo get shard-level statistics, set the `level` parameter to `shards`.\n\nNOTE: When moving to another node, the shard-level statistics for a shard are cleared.\nAlthough the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed.", "operationId": "indices-stats", "parameters": [ { @@ -14320,8 +14357,8 @@ "tags": [ "indices" ], - "summary": "Returns statistics for one or more indices", - "description": "For data streams, the API retrieves statistics for the stream’s backing indices.", + "summary": "Get index statistics", + "description": "For data streams, the API retrieves statistics for the stream's backing indices.\n\nBy default, the returned statistics are index-level with `primaries` and `total` aggregations.\n`primaries` are the values for only the primary shards.\n`total` are the accumulated values for both primary and replica shards.\n\nTo get shard-level statistics, set the `level` parameter to `shards`.\n\nNOTE: When moving to another node, the shard-level statistics for a shard are cleared.\nAlthough the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed.", "operationId": "indices-stats-1", "parameters": [ { @@ -14368,8 +14405,8 @@ "tags": [ "indices" ], - "summary": "Returns statistics for one or more indices", - "description": "For data streams, the API retrieves statistics for the stream’s backing indices.", + "summary": "Get index statistics", + "description": "For data streams, the API retrieves statistics for the stream's backing indices.\n\nBy default, the returned statistics are index-level with `primaries` and `total` aggregations.\n`primaries` are the values for only the primary shards.\n`total` are the accumulated values for both primary and replica shards.\n\nTo get shard-level statistics, set the `level` parameter to `shards`.\n\nNOTE: When moving to another node, the shard-level statistics for a shard are cleared.\nAlthough the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed.", "operationId": "indices-stats-2", "parameters": [ { @@ -14416,8 +14453,8 @@ "tags": [ "indices" ], - "summary": "Returns statistics for one or more indices", - "description": "For data streams, the API retrieves statistics for the stream’s backing indices.", + "summary": "Get index statistics", + "description": "For data streams, the API retrieves statistics for the stream's backing indices.\n\nBy default, the returned statistics are index-level with `primaries` and `total` aggregations.\n`primaries` are the values for only the primary shards.\n`total` are the accumulated values for both primary and replica shards.\n\nTo get shard-level statistics, set the `level` parameter to `shards`.\n\nNOTE: When moving to another node, the shard-level statistics for a shard are cleared.\nAlthough the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed.", "operationId": "indices-stats-3", "parameters": [ { @@ -14467,7 +14504,8 @@ "tags": [ "indices" ], - "summary": "Unfreezes an index", + "summary": "Unfreeze an index", + "description": "When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again.", "operationId": "indices-unfreeze", "parameters": [ { diff --git a/output/schema/schema.json b/output/schema/schema.json index 9c75929421..a74fd41a1e 100644 --- a/output/schema/schema.json +++ b/output/schema/schema.json @@ -6075,7 +6075,7 @@ "stability": "stable" } }, - "description": "Clears the caches of one or more indices.\nFor data streams, the API clears the caches of the stream’s backing indices.", + "description": "Clear the cache.\nClear the cache of one or more indices.\nFor data streams, the API clears the caches of the stream's backing indices.", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clearcache.html", "name": "indices.clear_cache", "request": { @@ -6112,7 +6112,7 @@ "stability": "stable" } }, - "description": "Clones an existing index.", + "description": "Clone an index.\nClone an existing index into a new index.\nEach original primary shard is cloned into a new primary shard in the new index.\n\nIMPORTANT: Elasticsearch does not apply index templates to the resulting index.\nThe API also does not copy index metadata from the original index.\nIndex metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information.\nFor example, if you clone a CCR follower index, the resulting clone will not be a follower index.\n\nThe clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`.\nTo set the number of replicas in the resulting index, configure these settings in the clone request.\n\nCloning works as follows:\n\n* First, it creates a new target index with the same definition as the source index.\n* Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Finally, it recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be cloned if they meet the following requirements:\n\n* The target index must not exist.\n* The source index must have the same number of primary shards as the target index.\n* The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index.", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clone-index.html", "name": "indices.clone", "request": { @@ -6150,7 +6150,7 @@ "stability": "stable" } }, - "description": "Closes an index.", + "description": "Close an index.\nA closed index is blocked for read or write operations and does not allow all operations that opened indices allow.\nIt is not possible to index documents or to search for documents in a closed index.\nClosed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster.\n\nWhen opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index.\nThe shards will then go through the normal recovery process.\nThe data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times.\n\nYou can open and close multiple indices.\nAn error is thrown if the request explicitly refers to a missing index.\nThis behaviour can be turned off using the `ignore_unavailable=true` parameter.\n\nBy default, you must explicitly name the indices you are opening or closing.\nTo open or close indices with `_all`, `*`, or other wildcard expressions, change the` action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API.\n\nClosed indices consume a significant amount of disk-space which can cause problems in managed environments.\nClosing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`.", "docId": "indices-close", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-close.html", "name": "indices.close", @@ -6544,7 +6544,7 @@ "stability": "experimental" } }, - "description": "Analyzes the disk usage of each field of an index or data stream.", + "description": "Analyze the index disk usage.\nAnalyze the disk usage of each field of an index or data stream.\nThis API might not support indices created in previous Elasticsearch versions.\nThe result of a small index can be inaccurate as some parts of an index might not be analyzed by the API.", "docId": "indices-disk-usage", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-disk-usage.html", "name": "indices.disk_usage", @@ -6580,7 +6580,7 @@ "stability": "experimental" } }, - "description": "Aggregates a time series (TSDS) index and stores pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval.", + "description": "Downsample an index.\nAggregate a time series (TSDS) index and store pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval.\nFor example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index.\nAll documents within an hour interval are summarized and stored as a single document in the downsample index.\n\nNOTE: Only indices in a time series data stream are supported.\nNeither field nor document level security can be defined on the source index.\nThe source index must be read only (`index.blocks.write: true`).", "docId": "indices-downsample-data-stream", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-downsample-data-stream.html", "name": "indices.downsample", @@ -6792,7 +6792,7 @@ "stability": "experimental" } }, - "description": "Returns field usage information for each shard and field of an index.", + "description": "Get field usage stats.\nGet field usage information for each shard and field of an index.\nField usage statistics are automatically captured when queries are running on a cluster.\nA shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use.", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/field-usage-stats.html", "name": "indices.field_usage_stats", "privileges": { @@ -6831,7 +6831,7 @@ "stability": "stable" } }, - "description": "Flushes one or more data streams or indices.", + "description": "Flush data streams or indices.\nFlushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index.\nWhen restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart.\nElasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush.\n\nAfter each operation has been flushed it is permanently stored in the Lucene index.\nThis may mean that there is no need to maintain an additional copy of it in the transaction log.\nThe transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space.\n\nIt is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly.\nIf you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called.", "docId": "indices-flush", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-flush.html", "name": "indices.flush", @@ -6875,8 +6875,10 @@ "stability": "stable" } }, - "description": "Performs the force merge operation on one or more indices.", + "description": "Force a merge.\nPerform the force merge operation on the shards of one or more indices.\nFor data streams, the API forces a merge on the shards of the stream's backing indices.\n\nMerging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents.\nMerging normally happens automatically, but sometimes it is useful to trigger a merge manually.\n\nWARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes).\nWhen documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a \"tombstone\".\nThese soft-deleted documents are automatically cleaned up during regular segment merges.\nBut force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges.\nSo the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance.\nIf you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally.", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerge.html", + "extDocId": "index-modules-merge", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/index-modules-merge.html", "name": "indices.forcemerge", "request": { "name": "Request", @@ -7419,7 +7421,7 @@ "stability": "stable" } }, - "description": "Promotes a data stream from a replicated data stream managed by CCR to a regular data stream", + "description": "Promote a data stream.\nPromote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream.\n\nWith CCR auto following, a data stream from a remote cluster can be replicated to the local cluster.\nThese data streams can't be rolled over in the local cluster.\nThese replicated data streams roll over only if the upstream data stream rolls over.\nIn the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster.\n\nNOTE: When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream.\nIf this is missing, the data stream will not be able to roll over until a matching index template is created.\nThis will affect the lifecycle management of the data stream and interfere with the data stream size and retention.", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html", "name": "indices.promote_data_stream", "request": { @@ -7657,9 +7659,17 @@ "stability": "stable" } }, - "description": "Create or update an index template.\nIndex templates define settings, mappings, and aliases that can be applied automatically to new indices.", + "description": "Create or update an index template.\nIndex templates define settings, mappings, and aliases that can be applied automatically to new indices.\nElasticsearch applies templates to new indices based on an index pattern that matches the index name.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.\n\nComposable templates always take precedence over legacy templates.\nIf no composable template matches a new index, matching legacy templates are applied according to their order.\n\nIndex templates are only applied during index creation.\nChanges to index templates do not affect existing indices.\nSettings and mappings specified in create index API requests override any settings or mappings specified in an index template.", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates-v1.html", + "extDocId": "index-templates", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/index-templates.html", "name": "indices.put_template", + "privileges": { + "cluster": [ + "manage_index_templates", + "manage" + ] + }, "request": { "name": "Request", "namespace": "indices.put_template" @@ -7695,7 +7705,7 @@ "stability": "stable" } }, - "description": "Returns information about ongoing and completed shard recoveries for one or more indices.\nFor data streams, the API returns information for the stream’s backing indices.", + "description": "Get index recovery information.\nGet information about ongoing and completed shard recoveries for one or more indices.\nFor data streams, the API returns information for the stream's backing indices.\n\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard.\nWhen a shard recovery completes, the recovered shard is available for search and indexing.\n\nRecovery automatically occurs during the following processes:\n\n* When creating an index for the first time.\n* When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path.\n* Creation of new replica shard copies from the primary.\n* Relocation of a shard copy to a different node in the same cluster.\n* A snapshot restore operation.\n* A clone, shrink, or split operation.\n\nYou can determine the cause of a shard recovery using the recovery or cat recovery APIs.\n\nThe index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster.\nIt only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist.\nThis means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API.", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-recovery.html", "name": "indices.recovery", "request": { @@ -7774,8 +7784,10 @@ "stability": "stable" } }, - "description": "Reloads an index's search analyzers and their resources.", + "description": "Reload search analyzers.\nReload an index's search analyzers and their resources.\nFor data streams, the API reloads search analyzers and resources for the stream's backing indices.\n\nIMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer.\n\nYou can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer.\nTo be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers.\n\nNOTE: This API does not perform a reload for each shard of an index.\nInstead, it performs a reload for each node containing index shards.\nAs a result, the total shard count returned by the API can differ from the number of index shards.\nBecause reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API.\nThis ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future.", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-reload-analyzers.html", + "extDocId": "search-analyzer", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-analyzer.html", "name": "indices.reload_search_analyzers", "request": { "name": "Request", @@ -7806,7 +7818,7 @@ "stability": "stable" } }, - "description": "Resolves the specified index expressions to return information about each cluster, including\nthe local cluster, if included.\nMultiple patterns and remote clusters are supported.", + "description": "Resolve the cluster.\nResolve the specified index expressions to return information about each cluster, including the local cluster, if included.\nMultiple patterns and remote clusters are supported.\n\nThis endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search.\n\nYou use the same index expression with this endpoint as you would for cross-cluster search.\nIndex and cluster exclusions are also supported with this endpoint.\n\nFor each cluster in the index expression, information is returned about:\n\n* Whether the querying (\"local\") cluster is currently connected to each remote cluster in the index expression scope.\n* Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`.\n* Whether there are any indices, aliases, or data streams on that cluster that match the index expression.\n* Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index).\n* Cluster version information, including the Elasticsearch server version.", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-cluster-api.html", "name": "indices.resolve_cluster", "request": { @@ -7920,7 +7932,7 @@ "stability": "stable" } }, - "description": "Returns low-level information about the Lucene segments in index shards.\nFor data streams, the API returns information about the stream’s backing indices.", + "description": "Get index segments.\nGet low-level information about the Lucene segments in index shards.\nFor data streams, the API returns information about the stream's backing indices.", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-segments.html", "name": "indices.segments", "request": { @@ -7956,9 +7968,15 @@ "stability": "stable" } }, - "description": "Retrieves store information about replica shards in one or more indices.\nFor data streams, the API retrieves store information for the stream’s backing indices.", + "description": "Get index shard stores.\nGet store information about replica shards in one or more indices.\nFor data streams, the API retrieves store information for the stream's backing indices.\n\nThe index shard stores API returns the following information:\n\n* The node on which each replica shard exists.\n* The allocation ID for each replica shard.\n* A unique ID for each replica shard.\n* Any errors encountered while opening the shard index or from an earlier failure.\n\nBy default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards.", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shards-stores.html", "name": "indices.shard_stores", + "privileges": { + "index": [ + "monitor", + "manage" + ] + }, "request": { "name": "Request", "namespace": "indices.shard_stores" @@ -7993,10 +8011,15 @@ "stability": "stable" } }, - "description": "Shrinks an existing index into a new index with fewer primary shards.", + "description": "Shrink an index.\nShrink an index into a new index with fewer primary shards.\n\nBefore you can shrink an index:\n\n* The index must be read-only.\n* A copy of every shard in the index must reside on the same node.\n* The index must have a green health status.\n\nTo make shard allocation easier, we recommend you also remove the index's replica shards.\nYou can later re-add replica shards as part of the shrink operation.\n\nThe requested number of primary shards in the target index must be a factor of the number of shards in the source index.\nFor example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1.\nIf the number of shards in the index is a prime number it can only be shrunk into a single primary shard\n Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node.\n\nThe current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk.\n\nA shrink operation:\n\n* Creates a new target index with the same definition as the source index, but with a smaller number of primary shards.\n* Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks.\n* Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting.\n\nIMPORTANT: Indices can only be shrunk if they satisfy the following requirements:\n\n* The target index must not exist.\n* The source index must have more primary shards than the target index.\n* The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index.\n* The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard.\n* The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index.", "docId": "indices-shrink-index", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-shrink-index.html", "name": "indices.shrink", + "privileges": { + "index": [ + "manage" + ] + }, "request": { "name": "Request", "namespace": "indices.shrink" @@ -8116,10 +8139,15 @@ "stability": "stable" } }, - "description": "Splits an existing index into a new index with more primary shards.", + "description": "Split an index.\nSplit an index into a new index with more primary shards.\n* Before you can split an index:\n\n* The index must be read-only.\n* The cluster health status must be green.\n\nThe number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting.\nThe number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing.\nFor instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3.\n\nA split operation:\n\n* Creates a new target index with the same definition as the source index, but with a larger number of primary shards.\n* Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Hashes all documents again, after low level files are created, to delete documents that belong to a different shard.\n* Recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be split if they satisfy the following requirements:\n\n* The target index must not exist.\n* The source index must have fewer primary shards than the target index.\n* The number of primary shards in the target index must be a multiple of the number of primary shards in the source index.\n* The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index.", "docId": "indices-split-index", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-split-index.html", "name": "indices.split", + "privileges": { + "index": [ + "manage" + ] + }, "request": { "name": "Request", "namespace": "indices.split" @@ -8156,7 +8184,7 @@ "stability": "stable" } }, - "description": "Returns statistics for one or more indices.\nFor data streams, the API retrieves statistics for the stream’s backing indices.", + "description": "Get index statistics.\nFor data streams, the API retrieves statistics for the stream's backing indices.\n\nBy default, the returned statistics are index-level with `primaries` and `total` aggregations.\n`primaries` are the values for only the primary shards.\n`total` are the accumulated values for both primary and replica shards.\n\nTo get shard-level statistics, set the `level` parameter to `shards`.\n\nNOTE: When moving to another node, the shard-level statistics for a shard are cleared.\nAlthough the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed.", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-stats.html", "name": "indices.stats", "privileges": { @@ -8211,9 +8239,14 @@ "stability": "stable" } }, - "description": "Unfreezes an index.", + "description": "Unfreeze an index.\nWhen a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again.", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/unfreeze-index-api.html", "name": "indices.unfreeze", + "privileges": { + "index": [ + "manage" + ] + }, "request": { "name": "Request", "namespace": "indices.unfreeze" @@ -128419,7 +128452,7 @@ "body": { "kind": "no_body" }, - "description": "Clears the caches of one or more indices.\nFor data streams, the API clears the caches of the stream’s backing indices.", + "description": "Clear the cache.\nClear the cache of one or more indices.\nFor data streams, the API clears the caches of the stream's backing indices.", "inherits": { "type": { "name": "RequestBase", @@ -128533,7 +128566,7 @@ } } ], - "specLocation": "indices/clear_cache/IndicesIndicesClearCacheRequest.ts#L23-L77" + "specLocation": "indices/clear_cache/IndicesIndicesClearCacheRequest.ts#L23-L78" }, { "kind": "response", @@ -128607,7 +128640,7 @@ } ] }, - "description": "Clones an existing index.", + "description": "Clone an index.\nClone an existing index into a new index.\nEach original primary shard is cloned into a new primary shard in the new index.\n\nIMPORTANT: Elasticsearch does not apply index templates to the resulting index.\nThe API also does not copy index metadata from the original index.\nIndex metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information.\nFor example, if you clone a CCR follower index, the resulting clone will not be a follower index.\n\nThe clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`.\nTo set the number of replicas in the resulting index, configure these settings in the clone request.\n\nCloning works as follows:\n\n* First, it creates a new target index with the same definition as the source index.\n* Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Finally, it recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be cloned if they meet the following requirements:\n\n* The target index must not exist.\n* The source index must have the same number of primary shards as the target index.\n* The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index.", "inherits": { "type": { "name": "RequestBase", @@ -128687,7 +128720,7 @@ } } ], - "specLocation": "indices/clone/IndicesCloneRequest.ts#L27-L75" + "specLocation": "indices/clone/IndicesCloneRequest.ts#L27-L98" }, { "kind": "response", @@ -128810,7 +128843,7 @@ "body": { "kind": "no_body" }, - "description": "Closes an index.", + "description": "Close an index.\nA closed index is blocked for read or write operations and does not allow all operations that opened indices allow.\nIt is not possible to index documents or to search for documents in a closed index.\nClosed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster.\n\nWhen opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index.\nThe shards will then go through the normal recovery process.\nThe data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times.\n\nYou can open and close multiple indices.\nAn error is thrown if the request explicitly refers to a missing index.\nThis behaviour can be turned off using the `ignore_unavailable=true` parameter.\n\nBy default, you must explicitly name the indices you are opening or closing.\nTo open or close indices with `_all`, `*`, or other wildcard expressions, change the` action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API.\n\nClosed indices consume a significant amount of disk-space which can cause problems in managed environments.\nClosing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`.", "inherits": { "type": { "name": "RequestBase", @@ -128915,7 +128948,7 @@ } } ], - "specLocation": "indices/close/CloseIndexRequest.ts#L24-L77" + "specLocation": "indices/close/CloseIndexRequest.ts#L24-L94" }, { "kind": "response", @@ -130000,7 +130033,7 @@ "body": { "kind": "no_body" }, - "description": "Analyzes the disk usage of each field of an index or data stream.", + "description": "Analyze the index disk usage.\nAnalyze the disk usage of each field of an index or data stream.\nThis API might not support indices created in previous Elasticsearch versions.\nThe result of a small index can be inaccurate as some parts of an index might not be analyzed by the API.", "inherits": { "type": { "name": "RequestBase", @@ -130092,7 +130125,7 @@ } } ], - "specLocation": "indices/disk_usage/IndicesDiskUsageRequest.ts#L23-L71" + "specLocation": "indices/disk_usage/IndicesDiskUsageRequest.ts#L23-L74" }, { "kind": "response", @@ -130124,7 +130157,7 @@ } } }, - "description": "Aggregates a time series (TSDS) index and stores pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval.", + "description": "Downsample an index.\nAggregate a time series (TSDS) index and store pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval.\nFor example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index.\nAll documents within an hour interval are summarized and stored as a single document in the downsample index.\n\nNOTE: Only indices in a time series data stream are supported.\nNeither field nor document level security can be defined on the source index.\nThe source index must be read only (`index.blocks.write: true`).", "inherits": { "type": { "name": "RequestBase", @@ -130162,7 +130195,7 @@ } ], "query": [], - "specLocation": "indices/downsample/Request.ts#L24-L44" + "specLocation": "indices/downsample/Request.ts#L24-L51" }, { "kind": "response", @@ -131029,7 +131062,7 @@ "body": { "kind": "no_body" }, - "description": "Returns field usage information for each shard and field of an index.", + "description": "Get field usage stats.\nGet field usage information for each shard and field of an index.\nField usage statistics are automatically captured when queries are running on a cluster.\nA shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use.", "inherits": { "type": { "name": "RequestBase", @@ -131144,7 +131177,7 @@ } } ], - "specLocation": "indices/field_usage_stats/IndicesFieldUsageStatsRequest.ts#L29-L84" + "specLocation": "indices/field_usage_stats/IndicesFieldUsageStatsRequest.ts#L29-L87" }, { "kind": "response", @@ -131302,7 +131335,7 @@ "body": { "kind": "no_body" }, - "description": "Flushes one or more data streams or indices.", + "description": "Flush data streams or indices.\nFlushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index.\nWhen restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart.\nElasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush.\n\nAfter each operation has been flushed it is permanently stored in the Lucene index.\nThis may mean that there is no need to maintain an additional copy of it in the transaction log.\nThe transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space.\n\nIt is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly.\nIf you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called.", "inherits": { "type": { "name": "RequestBase", @@ -131394,7 +131427,7 @@ } } ], - "specLocation": "indices/flush/IndicesFlushRequest.ts#L23-L71" + "specLocation": "indices/flush/IndicesFlushRequest.ts#L23-L81" }, { "kind": "response", @@ -131422,7 +131455,7 @@ "body": { "kind": "no_body" }, - "description": "Performs the force merge operation on one or more indices.", + "description": "Force a merge.\nPerform the force merge operation on the shards of one or more indices.\nFor data streams, the API forces a merge on the shards of the stream's backing indices.\n\nMerging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents.\nMerging normally happens automatically, but sometimes it is useful to trigger a merge manually.\n\nWARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes).\nWhen documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a \"tombstone\".\nThese soft-deleted documents are automatically cleaned up during regular segment merges.\nBut force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges.\nSo the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance.\nIf you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally.", "inherits": { "type": { "name": "RequestBase", @@ -131533,7 +131566,7 @@ } } ], - "specLocation": "indices/forcemerge/IndicesForceMergeRequest.ts#L24-L42" + "specLocation": "indices/forcemerge/IndicesForceMergeRequest.ts#L24-L56" }, { "kind": "response", @@ -133396,7 +133429,7 @@ "body": { "kind": "no_body" }, - "description": "Promotes a data stream from a replicated data stream managed by CCR to a regular data stream", + "description": "Promote a data stream.\nPromote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream.\n\nWith CCR auto following, a data stream from a remote cluster can be replicated to the local cluster.\nThese data streams can't be rolled over in the local cluster.\nThese replicated data streams roll over only if the upstream data stream rolls over.\nIn the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster.\n\nNOTE: When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream.\nIf this is missing, the data stream will not be able to roll over until a matching index template is created.\nThis will affect the lifecycle management of the data stream and interfere with the data stream size and retention.", "inherits": { "type": { "name": "RequestBase", @@ -133436,7 +133469,7 @@ } } ], - "specLocation": "indices/promote_data_stream/IndicesPromoteDataStreamRequest.ts#L24-L39" + "specLocation": "indices/promote_data_stream/IndicesPromoteDataStreamRequest.ts#L24-L50" }, { "kind": "response", @@ -134592,7 +134625,7 @@ } ] }, - "description": "Create or update an index template.\nIndex templates define settings, mappings, and aliases that can be applied automatically to new indices.", + "description": "Create or update an index template.\nIndex templates define settings, mappings, and aliases that can be applied automatically to new indices.\nElasticsearch applies templates to new indices based on an index pattern that matches the index name.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.\n\nComposable templates always take precedence over legacy templates.\nIf no composable template matches a new index, matching legacy templates are applied according to their order.\n\nIndex templates are only applied during index creation.\nChanges to index templates do not affect existing indices.\nSettings and mappings specified in create index API requests override any settings or mappings specified in an index template.", "inherits": { "type": { "name": "RequestBase", @@ -134668,7 +134701,7 @@ } } ], - "specLocation": "indices/put_template/IndicesPutTemplateRequest.ts#L29-L94" + "specLocation": "indices/put_template/IndicesPutTemplateRequest.ts#L29-L106" }, { "kind": "response", @@ -135290,7 +135323,7 @@ "body": { "kind": "no_body" }, - "description": "Returns information about ongoing and completed shard recoveries for one or more indices.\nFor data streams, the API returns information for the stream’s backing indices.", + "description": "Get index recovery information.\nGet information about ongoing and completed shard recoveries for one or more indices.\nFor data streams, the API returns information for the stream's backing indices.\n\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard.\nWhen a shard recovery completes, the recovered shard is available for search and indexing.\n\nRecovery automatically occurs during the following processes:\n\n* When creating an index for the first time.\n* When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path.\n* Creation of new replica shard copies from the primary.\n* Relocation of a shard copy to a different node in the same cluster.\n* A snapshot restore operation.\n* A clone, shrink, or split operation.\n\nYou can determine the cause of a shard recovery using the recovery or cat recovery APIs.\n\nThe index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster.\nIt only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist.\nThis means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API.", "inherits": { "type": { "name": "RequestBase", @@ -135343,7 +135376,7 @@ } } ], - "specLocation": "indices/recovery/IndicesRecoveryRequest.ts#L23-L51" + "specLocation": "indices/recovery/IndicesRecoveryRequest.ts#L23-L70" }, { "kind": "response", @@ -135930,7 +135963,7 @@ "body": { "kind": "no_body" }, - "description": "Reloads an index's search analyzers and their resources.", + "description": "Reload search analyzers.\nReload an index's search analyzers and their resources.\nFor data streams, the API reloads search analyzers and resources for the stream's backing indices.\n\nIMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer.\n\nYou can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer.\nTo be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers.\n\nNOTE: This API does not perform a reload for each shard of an index.\nInstead, it performs a reload for each node containing index shards.\nAs a result, the total shard count returned by the API can differ from the number of index shards.\nBecause reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API.\nThis ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future.", "inherits": { "type": { "name": "RequestBase", @@ -135993,7 +136026,7 @@ } } ], - "specLocation": "indices/reload_search_analyzers/ReloadSearchAnalyzersRequest.ts#L23-L36" + "specLocation": "indices/reload_search_analyzers/ReloadSearchAnalyzersRequest.ts#L23-L51" }, { "kind": "response", @@ -136021,7 +136054,7 @@ "body": { "kind": "no_body" }, - "description": "Resolves the specified index expressions to return information about each cluster, including\nthe local cluster, if included.\nMultiple patterns and remote clusters are supported.", + "description": "Resolve the cluster.\nResolve the specified index expressions to return information about each cluster, including the local cluster, if included.\nMultiple patterns and remote clusters are supported.\n\nThis endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search.\n\nYou use the same index expression with this endpoint as you would for cross-cluster search.\nIndex and cluster exclusions are also supported with this endpoint.\n\nFor each cluster in the index expression, information is returned about:\n\n* Whether the querying (\"local\") cluster is currently connected to each remote cluster in the index expression scope.\n* Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`.\n* Whether there are any indices, aliases, or data streams on that cluster that match the index expression.\n* Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index).\n* Cluster version information, including the Elasticsearch server version.", "inherits": { "type": { "name": "RequestBase", @@ -136097,7 +136130,7 @@ } } ], - "specLocation": "indices/resolve_cluster/ResolveClusterRequest.ts#L23-L62" + "specLocation": "indices/resolve_cluster/ResolveClusterRequest.ts#L23-L76" }, { "kind": "interface", @@ -136974,7 +137007,7 @@ "body": { "kind": "no_body" }, - "description": "Returns low-level information about the Lucene segments in index shards.\nFor data streams, the API returns information about the stream’s backing indices.", + "description": "Get index segments.\nGet low-level information about the Lucene segments in index shards.\nFor data streams, the API returns information about the stream's backing indices.", "inherits": { "type": { "name": "RequestBase", @@ -137040,7 +137073,7 @@ } } ], - "specLocation": "indices/segments/IndicesSegmentsRequest.ts#L23-L59" + "specLocation": "indices/segments/IndicesSegmentsRequest.ts#L23-L61" }, { "kind": "response", @@ -137356,7 +137389,7 @@ "body": { "kind": "no_body" }, - "description": "Retrieves store information about replica shards in one or more indices.\nFor data streams, the API retrieves store information for the stream’s backing indices.", + "description": "Get index shard stores.\nGet store information about replica shards in one or more indices.\nFor data streams, the API retrieves store information for the stream's backing indices.\n\nThe index shard stores API returns the following information:\n\n* The node on which each replica shard exists.\n* The allocation ID for each replica shard.\n* A unique ID for each replica shard.\n* Any errors encountered while opening the shard index or from an earlier failure.\n\nBy default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards.", "inherits": { "type": { "name": "RequestBase", @@ -137448,7 +137481,7 @@ } } ], - "specLocation": "indices/shard_stores/IndicesShardStoresRequest.ts#L24-L60" + "specLocation": "indices/shard_stores/IndicesShardStoresRequest.ts#L24-L71" }, { "kind": "response", @@ -137802,7 +137835,7 @@ } ] }, - "description": "Shrinks an existing index into a new index with fewer primary shards.", + "description": "Shrink an index.\nShrink an index into a new index with fewer primary shards.\n\nBefore you can shrink an index:\n\n* The index must be read-only.\n* A copy of every shard in the index must reside on the same node.\n* The index must have a green health status.\n\nTo make shard allocation easier, we recommend you also remove the index's replica shards.\nYou can later re-add replica shards as part of the shrink operation.\n\nThe requested number of primary shards in the target index must be a factor of the number of shards in the source index.\nFor example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1.\nIf the number of shards in the index is a prime number it can only be shrunk into a single primary shard\n Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node.\n\nThe current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk.\n\nA shrink operation:\n\n* Creates a new target index with the same definition as the source index, but with a smaller number of primary shards.\n* Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks.\n* Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting.\n\nIMPORTANT: Indices can only be shrunk if they satisfy the following requirements:\n\n* The target index must not exist.\n* The source index must have more primary shards than the target index.\n* The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index.\n* The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard.\n* The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index.", "inherits": { "type": { "name": "RequestBase", @@ -137880,7 +137913,7 @@ } } ], - "specLocation": "indices/shrink/IndicesShrinkRequest.ts#L27-L75" + "specLocation": "indices/shrink/IndicesShrinkRequest.ts#L27-L107" }, { "kind": "response", @@ -138434,7 +138467,7 @@ } ] }, - "description": "Splits an existing index into a new index with more primary shards.", + "description": "Split an index.\nSplit an index into a new index with more primary shards.\n* Before you can split an index:\n\n* The index must be read-only.\n* The cluster health status must be green.\n\nThe number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting.\nThe number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing.\nFor instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3.\n\nA split operation:\n\n* Creates a new target index with the same definition as the source index, but with a larger number of primary shards.\n* Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Hashes all documents again, after low level files are created, to delete documents that belong to a different shard.\n* Recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be split if they satisfy the following requirements:\n\n* The target index must not exist.\n* The source index must have fewer primary shards than the target index.\n* The number of primary shards in the target index must be a multiple of the number of primary shards in the source index.\n* The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index.", "inherits": { "type": { "name": "RequestBase", @@ -138512,7 +138545,7 @@ } } ], - "specLocation": "indices/split/IndicesSplitRequest.ts#L27-L74" + "specLocation": "indices/split/IndicesSplitRequest.ts#L27-L98" }, { "kind": "response", @@ -138971,7 +139004,7 @@ "body": { "kind": "no_body" }, - "description": "Returns statistics for one or more indices.\nFor data streams, the API retrieves statistics for the stream’s backing indices.", + "description": "Get index statistics.\nFor data streams, the API retrieves statistics for the stream's backing indices.\n\nBy default, the returned statistics are index-level with `primaries` and `total` aggregations.\n`primaries` are the values for only the primary shards.\n`total` are the accumulated values for both primary and replica shards.\n\nTo get shard-level statistics, set the `level` parameter to `shards`.\n\nNOTE: When moving to another node, the shard-level statistics for a shard are cleared.\nAlthough the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed.", "inherits": { "type": { "name": "RequestBase", @@ -139136,7 +139169,7 @@ } } ], - "specLocation": "indices/stats/IndicesStatsRequest.ts#L29-L85" + "specLocation": "indices/stats/IndicesStatsRequest.ts#L29-L94" }, { "kind": "response", @@ -140035,7 +140068,7 @@ "body": { "kind": "no_body" }, - "description": "Unfreezes an index.", + "description": "Unfreeze an index.\nWhen a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again.", "inherits": { "type": { "name": "RequestBase", @@ -140140,7 +140173,7 @@ } } ], - "specLocation": "indices/unfreeze/IndicesUnfreezeRequest.ts#L24-L75" + "specLocation": "indices/unfreeze/IndicesUnfreezeRequest.ts#L24-L77" }, { "kind": "response", diff --git a/specification/_doc_ids/table.csv b/specification/_doc_ids/table.csv index e2f4e2bba2..7cf100deae 100644 --- a/specification/_doc_ids/table.csv +++ b/specification/_doc_ids/table.csv @@ -212,6 +212,8 @@ index-modules-slowlog-slowlog,https://www.elastic.co/guide/en/elasticsearch/refe index-modules,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/index-modules.html index,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/index.html indexing-buffer,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indexing-buffer.html +index-modules-merge,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/index-modules-merge.html +index-templates,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/index-templates.html indices-aliases,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-aliases.html indices-analyze,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-analyze.html indices-clearcache,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-clearcache.html @@ -524,6 +526,7 @@ search-aggregations-metrics-top-metrics,https://www.elastic.co/guide/en/elastics search-aggregations-metrics-valuecount-aggregation,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-aggregations-metrics-valuecount-aggregation.html search-aggregations-metrics-weight-avg-aggregation,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-aggregations-metrics-weight-avg-aggregation.html search-aggregations-bucket-variablewidthhistogram-aggregation,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-aggregations-bucket-variablewidthhistogram-aggregation.html +search-analyzer,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-analyzer.html search-count,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-count.html search-explain,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-explain.html search-field-caps,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-field-caps.html diff --git a/specification/indices/clear_cache/IndicesIndicesClearCacheRequest.ts b/specification/indices/clear_cache/IndicesIndicesClearCacheRequest.ts index 6a9fed2bcb..395bdebce4 100644 --- a/specification/indices/clear_cache/IndicesIndicesClearCacheRequest.ts +++ b/specification/indices/clear_cache/IndicesIndicesClearCacheRequest.ts @@ -21,8 +21,9 @@ import { RequestBase } from '@_types/Base' import { ExpandWildcards, Fields, Indices } from '@_types/common' /** - * Clears the caches of one or more indices. - * For data streams, the API clears the caches of the stream’s backing indices. + * Clear the cache. + * Clear the cache of one or more indices. + * For data streams, the API clears the caches of the stream's backing indices. * @rest_spec_name indices.clear_cache * @availability stack stability=stable * @availability serverless stability=stable visibility=private diff --git a/specification/indices/clone/IndicesCloneRequest.ts b/specification/indices/clone/IndicesCloneRequest.ts index b15955635b..7623074936 100644 --- a/specification/indices/clone/IndicesCloneRequest.ts +++ b/specification/indices/clone/IndicesCloneRequest.ts @@ -25,7 +25,30 @@ import { IndexName, Name, WaitForActiveShards } from '@_types/common' import { Duration } from '@_types/Time' /** - * Clones an existing index. + * Clone an index. + * Clone an existing index into a new index. + * Each original primary shard is cloned into a new primary shard in the new index. + * + * IMPORTANT: Elasticsearch does not apply index templates to the resulting index. + * The API also does not copy index metadata from the original index. + * Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. + * For example, if you clone a CCR follower index, the resulting clone will not be a follower index. + * + * The clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`. + * To set the number of replicas in the resulting index, configure these settings in the clone request. + * + * Cloning works as follows: + * + * * First, it creates a new target index with the same definition as the source index. + * * Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process. + * * Finally, it recovers the target index as though it were a closed index which had just been re-opened. + * + * IMPORTANT: Indices can only be cloned if they meet the following requirements: + * + * * The target index must not exist. + * * The source index must have the same number of primary shards as the target index. + * * The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index. + * * @rest_spec_name indices.clone * @availability stack since=7.4.0 stability=stable */ diff --git a/specification/indices/close/CloseIndexRequest.ts b/specification/indices/close/CloseIndexRequest.ts index f290a2de24..46ee95332d 100644 --- a/specification/indices/close/CloseIndexRequest.ts +++ b/specification/indices/close/CloseIndexRequest.ts @@ -22,7 +22,24 @@ import { ExpandWildcards, Indices, WaitForActiveShards } from '@_types/common' import { Duration } from '@_types/Time' /** - * Closes an index. + * Close an index. + * A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. + * It is not possible to index documents or to search for documents in a closed index. + * Closed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster. + * + * When opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index. + * The shards will then go through the normal recovery process. + * The data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. + * + * You can open and close multiple indices. + * An error is thrown if the request explicitly refers to a missing index. + * This behaviour can be turned off using the `ignore_unavailable=true` parameter. + * + * By default, you must explicitly name the indices you are opening or closing. + * To open or close indices with `_all`, `*`, or other wildcard expressions, change the` action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. + * + * Closed indices consume a significant amount of disk-space which can cause problems in managed environments. + * Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. * @doc_id indices-close * @rest_spec_name indices.close * @availability stack stability=stable diff --git a/specification/indices/disk_usage/IndicesDiskUsageRequest.ts b/specification/indices/disk_usage/IndicesDiskUsageRequest.ts index 30b4847f59..b8703b5c0d 100644 --- a/specification/indices/disk_usage/IndicesDiskUsageRequest.ts +++ b/specification/indices/disk_usage/IndicesDiskUsageRequest.ts @@ -21,7 +21,10 @@ import { RequestBase } from '@_types/Base' import { ExpandWildcards, Indices } from '@_types/common' /** - * Analyzes the disk usage of each field of an index or data stream. + * Analyze the index disk usage. + * Analyze the disk usage of each field of an index or data stream. + * This API might not support indices created in previous Elasticsearch versions. + * The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. * @doc_id indices-disk-usage * @rest_spec_name indices.disk_usage * @availability stack since=7.15.0 stability=experimental diff --git a/specification/indices/downsample/Request.ts b/specification/indices/downsample/Request.ts index b603bf1d60..6bc6c1c4f7 100644 --- a/specification/indices/downsample/Request.ts +++ b/specification/indices/downsample/Request.ts @@ -22,7 +22,14 @@ import { RequestBase } from '@_types/Base' import { IndexName } from '@_types/common' /** - * Aggregates a time series (TSDS) index and stores pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. + * Downsample an index. + * Aggregate a time series (TSDS) index and store pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. + * For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. + * All documents within an hour interval are summarized and stored as a single document in the downsample index. + * + * NOTE: Only indices in a time series data stream are supported. + * Neither field nor document level security can be defined on the source index. + * The source index must be read only (`index.blocks.write: true`). * @doc_id indices-downsample-data-stream * @rest_spec_name indices.downsample * @availability stack since=8.5.0 stability=experimental diff --git a/specification/indices/field_usage_stats/IndicesFieldUsageStatsRequest.ts b/specification/indices/field_usage_stats/IndicesFieldUsageStatsRequest.ts index b39ca352ca..703f8a386b 100644 --- a/specification/indices/field_usage_stats/IndicesFieldUsageStatsRequest.ts +++ b/specification/indices/field_usage_stats/IndicesFieldUsageStatsRequest.ts @@ -27,7 +27,10 @@ import { import { Duration } from '@_types/Time' /** - * Returns field usage information for each shard and field of an index. + * Get field usage stats. + * Get field usage information for each shard and field of an index. + * Field usage statistics are automatically captured when queries are running on a cluster. + * A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use. * @rest_spec_name indices.field_usage_stats * @availability stack since=7.15.0 stability=experimental * @availability serverless stability=experimental visibility=private diff --git a/specification/indices/flush/IndicesFlushRequest.ts b/specification/indices/flush/IndicesFlushRequest.ts index cac3bd07f0..d111e574bd 100644 --- a/specification/indices/flush/IndicesFlushRequest.ts +++ b/specification/indices/flush/IndicesFlushRequest.ts @@ -21,7 +21,17 @@ import { RequestBase } from '@_types/Base' import { ExpandWildcards, Indices } from '@_types/common' /** - * Flushes one or more data streams or indices. + * Flush data streams or indices. + * Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. + * When restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart. + * Elasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush. + * + * After each operation has been flushed it is permanently stored in the Lucene index. + * This may mean that there is no need to maintain an additional copy of it in the transaction log. + * The transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space. + * + * It is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly. + * If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called. * @doc_id indices-flush * @rest_spec_name indices.flush * @availability stack stability=stable diff --git a/specification/indices/forcemerge/IndicesForceMergeRequest.ts b/specification/indices/forcemerge/IndicesForceMergeRequest.ts index 6fc604109a..4d3234e2d1 100644 --- a/specification/indices/forcemerge/IndicesForceMergeRequest.ts +++ b/specification/indices/forcemerge/IndicesForceMergeRequest.ts @@ -22,9 +22,23 @@ import { ExpandWildcards, Indices } from '@_types/common' import { long } from '@_types/Numeric' /** + * Force a merge. + * Perform the force merge operation on the shards of one or more indices. + * For data streams, the API forces a merge on the shards of the stream's backing indices. + * + * Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. + * Merging normally happens automatically, but sometimes it is useful to trigger a merge manually. + * + * WARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes). + * When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". + * These soft-deleted documents are automatically cleaned up during regular segment merges. + * But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. + * So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. + * If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally. * @rest_spec_name indices.forcemerge * @availability stack since=2.1.0 stability=stable * @availability serverless stability=stable visibility=private + * @ext_doc_id index-modules-merge */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/promote_data_stream/IndicesPromoteDataStreamRequest.ts b/specification/indices/promote_data_stream/IndicesPromoteDataStreamRequest.ts index a956954ed3..340ddee352 100644 --- a/specification/indices/promote_data_stream/IndicesPromoteDataStreamRequest.ts +++ b/specification/indices/promote_data_stream/IndicesPromoteDataStreamRequest.ts @@ -22,6 +22,17 @@ import { IndexName } from '@_types/common' import { Duration } from '@_types/Time' /** + * Promote a data stream. + * Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream. + * + * With CCR auto following, a data stream from a remote cluster can be replicated to the local cluster. + * These data streams can't be rolled over in the local cluster. + * These replicated data streams roll over only if the upstream data stream rolls over. + * In the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster. + * + * NOTE: When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream. + * If this is missing, the data stream will not be able to roll over until a matching index template is created. + * This will affect the lifecycle management of the data stream and interfere with the data stream size and retention. * @rest_spec_name indices.promote_data_stream * @availability stack since=7.9.0 stability=stable */ diff --git a/specification/indices/put_template/IndicesPutTemplateRequest.ts b/specification/indices/put_template/IndicesPutTemplateRequest.ts index 895fee8b1c..8fbb2ae16a 100644 --- a/specification/indices/put_template/IndicesPutTemplateRequest.ts +++ b/specification/indices/put_template/IndicesPutTemplateRequest.ts @@ -29,8 +29,20 @@ import { Duration } from '@_types/Time' /** * Create or update an index template. * Index templates define settings, mappings, and aliases that can be applied automatically to new indices. + * Elasticsearch applies templates to new indices based on an index pattern that matches the index name. + * + * IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. + * + * Composable templates always take precedence over legacy templates. + * If no composable template matches a new index, matching legacy templates are applied according to their order. + * + * Index templates are only applied during index creation. + * Changes to index templates do not affect existing indices. + * Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. * @rest_spec_name indices.put_template * @availability stack stability=stable + * @cluster_privileges manage_index_templates, manage + * @ext_doc_id index-templates */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/recovery/IndicesRecoveryRequest.ts b/specification/indices/recovery/IndicesRecoveryRequest.ts index 9c31096e57..0c4212ae46 100644 --- a/specification/indices/recovery/IndicesRecoveryRequest.ts +++ b/specification/indices/recovery/IndicesRecoveryRequest.ts @@ -21,8 +21,27 @@ import { RequestBase } from '@_types/Base' import { Indices } from '@_types/common' /** - * Returns information about ongoing and completed shard recoveries for one or more indices. - * For data streams, the API returns information for the stream’s backing indices. + * Get index recovery information. + * Get information about ongoing and completed shard recoveries for one or more indices. + * For data streams, the API returns information for the stream's backing indices. + * + * Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. + * When a shard recovery completes, the recovered shard is available for search and indexing. + * + * Recovery automatically occurs during the following processes: + * + * * When creating an index for the first time. + * * When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path. + * * Creation of new replica shard copies from the primary. + * * Relocation of a shard copy to a different node in the same cluster. + * * A snapshot restore operation. + * * A clone, shrink, or split operation. + * + * You can determine the cause of a shard recovery using the recovery or cat recovery APIs. + * + * The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. + * It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. + * This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API. * @rest_spec_name indices.recovery * @availability stack stability=stable * @availability serverless stability=stable visibility=private diff --git a/specification/indices/reload_search_analyzers/ReloadSearchAnalyzersRequest.ts b/specification/indices/reload_search_analyzers/ReloadSearchAnalyzersRequest.ts index 3c891ee676..c3b7e11439 100644 --- a/specification/indices/reload_search_analyzers/ReloadSearchAnalyzersRequest.ts +++ b/specification/indices/reload_search_analyzers/ReloadSearchAnalyzersRequest.ts @@ -21,8 +21,23 @@ import { RequestBase } from '@_types/Base' import { ExpandWildcards, Indices } from '@_types/common' /** + * Reload search analyzers. + * Reload an index's search analyzers and their resources. + * For data streams, the API reloads search analyzers and resources for the stream's backing indices. + * + * IMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer. + * + * You can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer. + * To be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers. + * + * NOTE: This API does not perform a reload for each shard of an index. + * Instead, it performs a reload for each node containing index shards. + * As a result, the total shard count returned by the API can differ from the number of index shards. + * Because reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API. + * This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future. * @rest_spec_name indices.reload_search_analyzers * @availability stack since=7.3.0 stability=stable + * @ext_doc_id search-analyzer */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/resolve_cluster/ResolveClusterRequest.ts b/specification/indices/resolve_cluster/ResolveClusterRequest.ts index 92564cfb86..0609869730 100644 --- a/specification/indices/resolve_cluster/ResolveClusterRequest.ts +++ b/specification/indices/resolve_cluster/ResolveClusterRequest.ts @@ -21,9 +21,23 @@ import { RequestBase } from '@_types/Base' import { ExpandWildcards, Names } from '@_types/common' /** - * Resolves the specified index expressions to return information about each cluster, including - * the local cluster, if included. + * Resolve the cluster. + * Resolve the specified index expressions to return information about each cluster, including the local cluster, if included. * Multiple patterns and remote clusters are supported. + * + * This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. + * + * You use the same index expression with this endpoint as you would for cross-cluster search. + * Index and cluster exclusions are also supported with this endpoint. + * + * For each cluster in the index expression, information is returned about: + * + * * Whether the querying ("local") cluster is currently connected to each remote cluster in the index expression scope. + * * Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. + * * Whether there are any indices, aliases, or data streams on that cluster that match the index expression. + * * Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). + * * Cluster version information, including the Elasticsearch server version. + * * @rest_spec_name indices.resolve_cluster * @availability stack since=8.13.0 stability=stable */ diff --git a/specification/indices/segments/IndicesSegmentsRequest.ts b/specification/indices/segments/IndicesSegmentsRequest.ts index 7d3fa06b75..9eeff54d02 100644 --- a/specification/indices/segments/IndicesSegmentsRequest.ts +++ b/specification/indices/segments/IndicesSegmentsRequest.ts @@ -20,8 +20,10 @@ import { RequestBase } from '@_types/Base' import { ExpandWildcards, Indices } from '@_types/common' -/** Returns low-level information about the Lucene segments in index shards. - * For data streams, the API returns information about the stream’s backing indices. +/** + * Get index segments. + * Get low-level information about the Lucene segments in index shards. + * For data streams, the API returns information about the stream's backing indices. * @rest_spec_name indices.segments * @availability stack stability=stable * @availability serverless stability=stable visibility=private diff --git a/specification/indices/shard_stores/IndicesShardStoresRequest.ts b/specification/indices/shard_stores/IndicesShardStoresRequest.ts index 04dd15b62b..f54c55af84 100644 --- a/specification/indices/shard_stores/IndicesShardStoresRequest.ts +++ b/specification/indices/shard_stores/IndicesShardStoresRequest.ts @@ -22,10 +22,21 @@ import { ExpandWildcards, Indices } from '@_types/common' import { ShardStoreStatus } from './types' /** - * Retrieves store information about replica shards in one or more indices. - * For data streams, the API retrieves store information for the stream’s backing indices. + * Get index shard stores. + * Get store information about replica shards in one or more indices. + * For data streams, the API retrieves store information for the stream's backing indices. + * + * The index shard stores API returns the following information: + * + * * The node on which each replica shard exists. + * * The allocation ID for each replica shard. + * * A unique ID for each replica shard. + * * Any errors encountered while opening the shard index or from an earlier failure. + * + * By default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards. * @rest_spec_name indices.shard_stores * @availability stack stability=stable + * @index_privileges monitor,manage */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/shrink/IndicesShrinkRequest.ts b/specification/indices/shrink/IndicesShrinkRequest.ts index 22a6573669..188dd991cf 100644 --- a/specification/indices/shrink/IndicesShrinkRequest.ts +++ b/specification/indices/shrink/IndicesShrinkRequest.ts @@ -25,10 +25,42 @@ import { IndexName, WaitForActiveShards } from '@_types/common' import { Duration } from '@_types/Time' /** - * Shrinks an existing index into a new index with fewer primary shards. + * Shrink an index. + * Shrink an index into a new index with fewer primary shards. + * + * Before you can shrink an index: + * + * * The index must be read-only. + * * A copy of every shard in the index must reside on the same node. + * * The index must have a green health status. + * + * To make shard allocation easier, we recommend you also remove the index's replica shards. + * You can later re-add replica shards as part of the shrink operation. + * + * The requested number of primary shards in the target index must be a factor of the number of shards in the source index. + * For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. + * If the number of shards in the index is a prime number it can only be shrunk into a single primary shard + * Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. + * + * The current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk. + * + * A shrink operation: + * + * * Creates a new target index with the same definition as the source index, but with a smaller number of primary shards. + * * Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks. + * * Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. + * + * IMPORTANT: Indices can only be shrunk if they satisfy the following requirements: + * + * * The target index must not exist. + * * The source index must have more primary shards than the target index. + * * The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index. + * * The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard. + * * The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index. * @doc_id indices-shrink-index * @rest_spec_name indices.shrink * @availability stack since=5.0.0 stability=stable + * @index_privileges manage */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/split/IndicesSplitRequest.ts b/specification/indices/split/IndicesSplitRequest.ts index 7bb87417a7..36ed65a25c 100644 --- a/specification/indices/split/IndicesSplitRequest.ts +++ b/specification/indices/split/IndicesSplitRequest.ts @@ -25,10 +25,34 @@ import { IndexName, WaitForActiveShards } from '@_types/common' import { Duration } from '@_types/Time' /** - * Splits an existing index into a new index with more primary shards. + * Split an index. + * Split an index into a new index with more primary shards. + * * Before you can split an index: + * + * * The index must be read-only. + * * The cluster health status must be green. + * + * The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. + * The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. + * For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. + * + * A split operation: + * + * * Creates a new target index with the same definition as the source index, but with a larger number of primary shards. + * * Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process. + * * Hashes all documents again, after low level files are created, to delete documents that belong to a different shard. + * * Recovers the target index as though it were a closed index which had just been re-opened. + * + * IMPORTANT: Indices can only be split if they satisfy the following requirements: + * + * * The target index must not exist. + * * The source index must have fewer primary shards than the target index. + * * The number of primary shards in the target index must be a multiple of the number of primary shards in the source index. + * * The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index. * @doc_id indices-split-index * @rest_spec_name indices.split * @availability stack since=6.1.0 stability=stable + * @index_privileges manage */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/stats/IndicesStatsRequest.ts b/specification/indices/stats/IndicesStatsRequest.ts index 9ccceaf4ee..6e10d8ce5b 100644 --- a/specification/indices/stats/IndicesStatsRequest.ts +++ b/specification/indices/stats/IndicesStatsRequest.ts @@ -27,8 +27,17 @@ import { } from '@_types/common' /** - * Returns statistics for one or more indices. - * For data streams, the API retrieves statistics for the stream’s backing indices. + * Get index statistics. + * For data streams, the API retrieves statistics for the stream's backing indices. + * + * By default, the returned statistics are index-level with `primaries` and `total` aggregations. + * `primaries` are the values for only the primary shards. + * `total` are the accumulated values for both primary and replica shards. + * + * To get shard-level statistics, set the `level` parameter to `shards`. + * + * NOTE: When moving to another node, the shard-level statistics for a shard are cleared. + * Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed. * @rest_spec_name indices.stats * @availability stack since=1.3.0 stability=stable * @availability serverless stability=stable visibility=private diff --git a/specification/indices/unfreeze/IndicesUnfreezeRequest.ts b/specification/indices/unfreeze/IndicesUnfreezeRequest.ts index 81dfc394a3..05be87ef0a 100644 --- a/specification/indices/unfreeze/IndicesUnfreezeRequest.ts +++ b/specification/indices/unfreeze/IndicesUnfreezeRequest.ts @@ -22,9 +22,11 @@ import { ExpandWildcards, IndexName } from '@_types/common' import { Duration } from '@_types/Time' /** - * Unfreezes an index. + * Unfreeze an index. + * When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again. * @rest_spec_name indices.unfreeze * @availability stack since=6.6.0 stability=stable + * @index_privileges manage */ export interface Request extends RequestBase { path_parts: {