From f60b79f8049e733cfd193c824907adb03f5b8be3 Mon Sep 17 00:00:00 2001 From: Laura Trotta Date: Tue, 17 Dec 2024 12:24:45 +0100 Subject: [PATCH] [codegen] update to latest spec --- .../ElasticsearchAsyncClient.java | 89 +- .../elasticsearch/ElasticsearchClient.java | 89 +- .../elasticsearch/ccr/CcrStatsRequest.java | 3 +- .../ccr/DeleteAutoFollowPatternRequest.java | 3 +- .../ccr/ElasticsearchCcrAsyncClient.java | 208 +++- .../ccr/ElasticsearchCcrClient.java | 208 +++- .../elasticsearch/ccr/FollowInfoRequest.java | 6 +- .../elasticsearch/ccr/FollowRequest.java | 6 +- .../elasticsearch/ccr/FollowStatsRequest.java | 5 +- .../ccr/ForgetFollowerRequest.java | 22 +- .../ccr/GetAutoFollowPatternRequest.java | 3 +- .../ccr/PauseAutoFollowPatternRequest.java | 12 +- .../elasticsearch/ccr/PauseFollowRequest.java | 7 +- .../ccr/PutAutoFollowPatternRequest.java | 14 +- .../ccr/ResumeAutoFollowPatternRequest.java | 6 +- .../ccr/ResumeFollowRequest.java | 6 +- .../elasticsearch/ccr/UnfollowRequest.java | 11 +- .../cluster/AllocationExplainRequest.java | 8 +- .../cluster/ClusterStatsRequest.java | 7 +- .../DeleteVotingConfigExclusionsRequest.java | 3 +- .../ElasticsearchClusterAsyncClient.java | 558 ++++++++-- .../cluster/ElasticsearchClusterClient.java | 558 ++++++++-- .../cluster/GetClusterSettingsRequest.java | 2 +- .../elasticsearch/cluster/HealthRequest.java | 22 +- .../cluster/PendingTasksRequest.java | 18 +- .../PostVotingConfigExclusionsRequest.java | 37 +- .../cluster/PutClusterSettingsRequest.java | 28 +- .../cluster/RemoteInfoRequest.java | 6 +- .../elasticsearch/cluster/RerouteRequest.java | 44 +- .../elasticsearch/cluster/StateRequest.java | 31 +- .../elasticsearch/core/BulkRequest.java | 64 ++ .../core/HealthReportRequest.java | 29 +- .../elasticsearch/core/PingRequest.java | 2 +- .../elasticsearch/doc-files/api-spec.html | 442 ++++---- .../ElasticsearchFeaturesAsyncClient.java | 40 +- .../features/ElasticsearchFeaturesClient.java | 40 +- .../features/GetFeaturesRequest.java | 16 +- .../features/ResetFeaturesRequest.java | 24 +- .../ilm/DeleteLifecycleRequest.java | 6 +- .../ilm/ElasticsearchIlmAsyncClient.java | 228 ++++- .../ilm/ElasticsearchIlmClient.java | 228 ++++- .../ilm/ExplainLifecycleRequest.java | 10 +- .../ilm/GetIlmStatusRequest.java | 2 +- .../ilm/GetLifecycleRequest.java | 2 +- .../ilm/MigrateToDataTiersRequest.java | 24 +- .../elasticsearch/ilm/MoveToStepRequest.java | 21 +- .../ilm/PutLifecycleRequest.java | 5 +- .../ilm/RemovePolicyRequest.java | 3 +- .../elasticsearch/ilm/RetryRequest.java | 5 +- .../elasticsearch/ilm/StartIlmRequest.java | 5 +- .../elasticsearch/ilm/StopIlmRequest.java | 11 +- .../indices/ClearCacheRequest.java | 4 +- .../indices/CloneIndexRequest.java | 38 +- .../indices/CloseIndexRequest.java | 28 +- .../indices/DiskUsageRequest.java | 5 +- .../indices/DownsampleRequest.java | 15 +- .../ElasticsearchIndicesAsyncClient.java | 961 ++++++++++++++++-- .../indices/ElasticsearchIndicesClient.java | 961 ++++++++++++++++-- .../indices/ExplainDataLifecycleRequest.java | 8 +- .../indices/FieldUsageStatsRequest.java | 6 +- .../elasticsearch/indices/FlushRequest.java | 21 +- .../indices/ForcemergeRequest.java | 20 +- .../indices/IndicesStatsRequest.java | 17 +- .../indices/PromoteDataStreamRequest.java | 18 +- .../indices/PutTemplateRequest.java | 15 + .../indices/RecoveryRequest.java | 34 +- .../indices/ReloadSearchAnalyzersRequest.java | 23 +- .../indices/ResolveClusterRequest.java | 31 +- .../indices/SegmentsRequest.java | 5 +- .../indices/ShardStoresRequest.java | 18 +- .../elasticsearch/indices/ShrinkRequest.java | 58 +- .../elasticsearch/indices/SplitRequest.java | 54 +- .../indices/UnfreezeRequest.java | 3 +- .../ElasticsearchInferenceAsyncClient.java | 40 +- .../ElasticsearchInferenceClient.java | 40 +- .../elasticsearch/inference/PutRequest.java | 20 +- .../license/DeleteLicenseRequest.java | 6 +- .../ElasticsearchLicenseAsyncClient.java | 175 +++- .../license/ElasticsearchLicenseClient.java | 175 +++- .../license/GetBasicStatusRequest.java | 2 +- .../license/GetLicenseRequest.java | 12 +- .../license/GetTrialStatusRequest.java | 2 +- .../elasticsearch/license/PostRequest.java | 11 +- .../license/PostStartBasicRequest.java | 20 +- .../license/PostStartTrialRequest.java | 12 +- .../logstash/DeletePipelineRequest.java | 8 +- .../ElasticsearchLogstashAsyncClient.java | 30 +- .../logstash/ElasticsearchLogstashClient.java | 30 +- .../logstash/GetPipelineRequest.java | 10 +- .../elasticsearch/logstash/Pipeline.java | 130 +-- .../logstash/PutPipelineRequest.java | 9 +- .../migration/DeprecationsRequest.java | 9 +- .../ElasticsearchMigrationAsyncClient.java | 45 +- .../ElasticsearchMigrationClient.java | 45 +- .../GetFeatureUpgradeStatusRequest.java | 8 +- .../migration/PostFeatureUpgradeRequest.java | 10 +- .../ml/AdaptiveAllocationsSettings.java | 217 ++++ .../ml/CommonTokenizationConfig.java | 270 +++++ .../elasticsearch/ml/DatafeedStats.java | 39 +- .../elasticsearch/ml/DatafeedTimingStats.java | 40 + .../ml/DataframeAnalyticsSummary.java | 47 + .../elasticsearch/ml/DetectorUpdate.java | 270 +++++ ...eryNode.java => DiscoveryNodeCompact.java} | 137 +-- .../ml/DiscoveryNodeContent.java | 422 ++++++++ .../ml/ElasticsearchMlAsyncClient.java | 8 +- .../ml/ElasticsearchMlClient.java | 8 +- .../ExponentialAverageCalculationContext.java | 222 ++++ .../ml/FillMaskInferenceOptions.java | 31 + .../ml/GetTrainedModelsRequest.java | 36 + .../clients/elasticsearch/ml/JobStats.java | 14 +- .../elasticsearch/ml/MlInfoRequest.java | 2 +- .../elasticsearch/ml/ModelPackageConfig.java | 593 +++++++++++ .../elasticsearch/ml/ModelSizeStats.java | 30 + .../ml/ModelSnapshotUpgrade.java | 14 +- .../ml/NlpBertTokenizationConfig.java | 202 +--- .../ml/NlpRobertaTokenizationConfig.java | 161 +-- .../elasticsearch/ml/OverallBucket.java | 19 +- .../elasticsearch/ml/PostDataResponse.java | 396 +++++--- .../ml/PutDataFrameAnalyticsRequest.java | 46 + .../ml/PutDataFrameAnalyticsResponse.java | 47 + .../elasticsearch/ml/PutDatafeedRequest.java | 2 +- .../elasticsearch/ml/PutJobRequest.java | 190 +++- .../ml/TextEmbeddingInferenceOptions.java | 31 + .../ml/TextExpansionInferenceOptions.java | 31 + .../elasticsearch/ml/TokenizationConfig.java | 31 + .../ml/TokenizationConfigBuilders.java | 19 + .../ml/TrainedModelAssignment.java | 66 ++ .../TrainedModelAssignmentRoutingTable.java | 21 +- .../TrainedModelAssignmentTaskParameters.java | 79 +- .../elasticsearch/ml/TrainedModelConfig.java | 36 + .../ml/TrainedModelDeploymentNodesStats.java | 436 ++++++-- .../ml/TrainedModelDeploymentStats.java | 307 ++++-- .../elasticsearch/ml/UpdateJobRequest.java | 18 +- .../ml/ValidateDetectorRequest.java | 2 +- .../clients/elasticsearch/ml/info/Limits.java | 78 +- .../elasticsearch/monitoring/BulkRequest.java | 3 +- .../ElasticsearchMonitoringAsyncClient.java | 6 +- .../ElasticsearchMonitoringClient.java | 6 +- ...earRepositoriesMeteringArchiveRequest.java | 4 +- .../nodes/ElasticsearchNodesAsyncClient.java | 125 ++- .../nodes/ElasticsearchNodesClient.java | 125 ++- .../GetRepositoriesMeteringInfoRequest.java | 12 +- .../nodes/HotThreadsRequest.java | 6 +- .../elasticsearch/nodes/NodesInfoRequest.java | 3 +- .../nodes/NodesStatsRequest.java | 3 +- .../nodes/NodesUsageRequest.java | 2 +- .../nodes/ReloadSecureSettingsRequest.java | 17 +- .../rollup/DeleteJobRequest.java | 30 +- .../ElasticsearchRollupAsyncClient.java | 240 ++++- .../rollup/ElasticsearchRollupClient.java | 240 ++++- .../elasticsearch/rollup/GetJobsRequest.java | 8 +- .../rollup/GetRollupCapsRequest.java | 18 +- .../rollup/GetRollupIndexCapsRequest.java | 14 +- .../elasticsearch/rollup/PutJobRequest.java | 18 +- .../rollup/RollupSearchRequest.java | 6 +- .../elasticsearch/rollup/StartJobRequest.java | 4 +- .../elasticsearch/rollup/StopJobRequest.java | 3 +- .../CacheStatsRequest.java | 3 +- .../ClearCacheRequest.java | 3 +- ...csearchSearchableSnapshotsAsyncClient.java | 32 +- ...lasticsearchSearchableSnapshotsClient.java | 32 +- .../searchable_snapshots/MountRequest.java | 4 +- .../SearchableSnapshotsStatsRequest.java | 2 +- .../shutdown/DeleteNodeRequest.java | 13 +- .../ElasticsearchShutdownAsyncClient.java | 109 +- .../shutdown/ElasticsearchShutdownClient.java | 109 +- .../shutdown/GetNodeRequest.java | 13 +- .../shutdown/PutNodeRequest.java | 22 +- .../slm/DeleteLifecycleRequest.java | 4 +- .../slm/ElasticsearchSlmAsyncClient.java | 68 +- .../slm/ElasticsearchSlmClient.java | 68 +- .../slm/ExecuteLifecycleRequest.java | 6 +- .../slm/ExecuteRetentionRequest.java | 6 +- .../slm/GetLifecycleRequest.java | 4 +- .../slm/GetSlmStatusRequest.java | 2 +- .../elasticsearch/slm/GetStatsRequest.java | 4 +- .../slm/PutLifecycleRequest.java | 10 +- .../elasticsearch/slm/StartSlmRequest.java | 4 +- .../elasticsearch/slm/StopSlmRequest.java | 12 +- .../snapshot/CleanupRepositoryRequest.java | 5 +- .../snapshot/CloneSnapshotRequest.java | 4 +- .../snapshot/CreateRepositoryRequest.java | 8 +- .../snapshot/CreateSnapshotRequest.java | 3 +- .../snapshot/DeleteRepositoryRequest.java | 5 +- .../snapshot/DeleteSnapshotRequest.java | 2 +- .../ElasticsearchSnapshotAsyncClient.java | 274 ++++- .../snapshot/ElasticsearchSnapshotClient.java | 274 ++++- .../snapshot/GetRepositoryRequest.java | 2 +- .../snapshot/GetSnapshotRequest.java | 2 +- .../RepositoryVerifyIntegrityRequest.java | 51 +- .../snapshot/RestoreRequest.java | 27 +- .../snapshot/SnapshotStatusRequest.java | 16 +- .../snapshot/VerifyRepositoryRequest.java | 3 +- 193 files changed, 10786 insertions(+), 2350 deletions(-) create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/ml/AdaptiveAllocationsSettings.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/ml/CommonTokenizationConfig.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/ml/DetectorUpdate.java rename java-client/src/main/java/co/elastic/clients/elasticsearch/ml/{DiscoveryNode.java => DiscoveryNodeCompact.java} (87%) create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/ml/DiscoveryNodeContent.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ExponentialAverageCalculationContext.java create mode 100644 java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ModelPackageConfig.java diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchAsyncClient.java index 5ab973dfa..251d0b5a1 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchAsyncClient.java @@ -1151,7 +1151,34 @@ public final CompletableFuture> getSour // ----- Endpoint: health_report /** - * Returns the health of the cluster. + * Get the cluster health. Get a report with the health status of an + * Elasticsearch cluster. The report contains a list of indicators that compose + * Elasticsearch functionality. + *

+ * Each indicator has a health status of: green, unknown, yellow or red. The + * indicator will provide an explanation and metadata describing the reason for + * its current health status. + *

+ * The cluster’s status is controlled by the worst indicator status. + *

+ * In the event that an indicator’s status is non-green, a list of impacts may + * be present in the indicator result which detail the functionalities that are + * negatively affected by the health issue. Each impact carries with it a + * severity level, an area of the system that is affected, and a simple + * description of the impact on the system. + *

+ * Some health indicators can determine the root cause of a health problem and + * prescribe a set of steps that can be performed in order to improve the health + * of the system. The root cause and remediation steps are encapsulated in a + * diagnosis. A diagnosis contains a cause detailing a root cause analysis, an + * action containing a brief description of the steps to take to fix the + * problem, the list of affected resources (if applicable), and a detailed + * step-by-step troubleshooting guide to fix the diagnosed problem. + *

+ * NOTE: The health indicators perform root cause analysis of non-green health + * statuses. This can be computationally expensive when called frequently. When + * setting up automated polling of the API for health status, set verbose to + * false to disable the more expensive analysis logic. * * @see Documentation @@ -1166,7 +1193,34 @@ public CompletableFuture healthReport(HealthReportRequest } /** - * Returns the health of the cluster. + * Get the cluster health. Get a report with the health status of an + * Elasticsearch cluster. The report contains a list of indicators that compose + * Elasticsearch functionality. + *

+ * Each indicator has a health status of: green, unknown, yellow or red. The + * indicator will provide an explanation and metadata describing the reason for + * its current health status. + *

+ * The cluster’s status is controlled by the worst indicator status. + *

+ * In the event that an indicator’s status is non-green, a list of impacts may + * be present in the indicator result which detail the functionalities that are + * negatively affected by the health issue. Each impact carries with it a + * severity level, an area of the system that is affected, and a simple + * description of the impact on the system. + *

+ * Some health indicators can determine the root cause of a health problem and + * prescribe a set of steps that can be performed in order to improve the health + * of the system. The root cause and remediation steps are encapsulated in a + * diagnosis. A diagnosis contains a cause detailing a root cause analysis, an + * action containing a brief description of the steps to take to fix the + * problem, the list of affected resources (if applicable), and a detailed + * step-by-step troubleshooting guide to fix the diagnosed problem. + *

+ * NOTE: The health indicators perform root cause analysis of non-green health + * statuses. This can be computationally expensive when called frequently. When + * setting up automated polling of the API for health status, set verbose to + * false to disable the more expensive analysis logic. * * @param fn * a function that initializes a builder to create the @@ -1182,7 +1236,34 @@ public final CompletableFuture healthReport( } /** - * Returns the health of the cluster. + * Get the cluster health. Get a report with the health status of an + * Elasticsearch cluster. The report contains a list of indicators that compose + * Elasticsearch functionality. + *

+ * Each indicator has a health status of: green, unknown, yellow or red. The + * indicator will provide an explanation and metadata describing the reason for + * its current health status. + *

+ * The cluster’s status is controlled by the worst indicator status. + *

+ * In the event that an indicator’s status is non-green, a list of impacts may + * be present in the indicator result which detail the functionalities that are + * negatively affected by the health issue. Each impact carries with it a + * severity level, an area of the system that is affected, and a simple + * description of the impact on the system. + *

+ * Some health indicators can determine the root cause of a health problem and + * prescribe a set of steps that can be performed in order to improve the health + * of the system. The root cause and remediation steps are encapsulated in a + * diagnosis. A diagnosis contains a cause detailing a root cause analysis, an + * action containing a brief description of the steps to take to fix the + * problem, the list of affected resources (if applicable), and a detailed + * step-by-step troubleshooting guide to fix the diagnosed problem. + *

+ * NOTE: The health indicators perform root cause analysis of non-green health + * statuses. This can be computationally expensive when called frequently. When + * setting up automated polling of the API for health status, set verbose to + * false to disable the more expensive analysis logic. * * @see Documentation @@ -1814,7 +1895,7 @@ public final CompletableFuture openPointInTime( // ----- Endpoint: ping /** - * Ping the cluster. Returns whether the cluster is running. + * Ping the cluster. Get information about whether the cluster is running. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchClient.java index d1bbae89a..16dddaa1d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ElasticsearchClient.java @@ -1168,7 +1168,34 @@ public final GetSourceResponse getSource( // ----- Endpoint: health_report /** - * Returns the health of the cluster. + * Get the cluster health. Get a report with the health status of an + * Elasticsearch cluster. The report contains a list of indicators that compose + * Elasticsearch functionality. + *

+ * Each indicator has a health status of: green, unknown, yellow or red. The + * indicator will provide an explanation and metadata describing the reason for + * its current health status. + *

+ * The cluster’s status is controlled by the worst indicator status. + *

+ * In the event that an indicator’s status is non-green, a list of impacts may + * be present in the indicator result which detail the functionalities that are + * negatively affected by the health issue. Each impact carries with it a + * severity level, an area of the system that is affected, and a simple + * description of the impact on the system. + *

+ * Some health indicators can determine the root cause of a health problem and + * prescribe a set of steps that can be performed in order to improve the health + * of the system. The root cause and remediation steps are encapsulated in a + * diagnosis. A diagnosis contains a cause detailing a root cause analysis, an + * action containing a brief description of the steps to take to fix the + * problem, the list of affected resources (if applicable), and a detailed + * step-by-step troubleshooting guide to fix the diagnosed problem. + *

+ * NOTE: The health indicators perform root cause analysis of non-green health + * statuses. This can be computationally expensive when called frequently. When + * setting up automated polling of the API for health status, set verbose to + * false to disable the more expensive analysis logic. * * @see Documentation @@ -1183,7 +1210,34 @@ public HealthReportResponse healthReport(HealthReportRequest request) throws IOE } /** - * Returns the health of the cluster. + * Get the cluster health. Get a report with the health status of an + * Elasticsearch cluster. The report contains a list of indicators that compose + * Elasticsearch functionality. + *

+ * Each indicator has a health status of: green, unknown, yellow or red. The + * indicator will provide an explanation and metadata describing the reason for + * its current health status. + *

+ * The cluster’s status is controlled by the worst indicator status. + *

+ * In the event that an indicator’s status is non-green, a list of impacts may + * be present in the indicator result which detail the functionalities that are + * negatively affected by the health issue. Each impact carries with it a + * severity level, an area of the system that is affected, and a simple + * description of the impact on the system. + *

+ * Some health indicators can determine the root cause of a health problem and + * prescribe a set of steps that can be performed in order to improve the health + * of the system. The root cause and remediation steps are encapsulated in a + * diagnosis. A diagnosis contains a cause detailing a root cause analysis, an + * action containing a brief description of the steps to take to fix the + * problem, the list of affected resources (if applicable), and a detailed + * step-by-step troubleshooting guide to fix the diagnosed problem. + *

+ * NOTE: The health indicators perform root cause analysis of non-green health + * statuses. This can be computationally expensive when called frequently. When + * setting up automated polling of the API for health status, set verbose to + * false to disable the more expensive analysis logic. * * @param fn * a function that initializes a builder to create the @@ -1200,7 +1254,34 @@ public final HealthReportResponse healthReport( } /** - * Returns the health of the cluster. + * Get the cluster health. Get a report with the health status of an + * Elasticsearch cluster. The report contains a list of indicators that compose + * Elasticsearch functionality. + *

+ * Each indicator has a health status of: green, unknown, yellow or red. The + * indicator will provide an explanation and metadata describing the reason for + * its current health status. + *

+ * The cluster’s status is controlled by the worst indicator status. + *

+ * In the event that an indicator’s status is non-green, a list of impacts may + * be present in the indicator result which detail the functionalities that are + * negatively affected by the health issue. Each impact carries with it a + * severity level, an area of the system that is affected, and a simple + * description of the impact on the system. + *

+ * Some health indicators can determine the root cause of a health problem and + * prescribe a set of steps that can be performed in order to improve the health + * of the system. The root cause and remediation steps are encapsulated in a + * diagnosis. A diagnosis contains a cause detailing a root cause analysis, an + * action containing a brief description of the steps to take to fix the + * problem, the list of affected resources (if applicable), and a detailed + * step-by-step troubleshooting guide to fix the diagnosed problem. + *

+ * NOTE: The health indicators perform root cause analysis of non-green health + * statuses. This can be computationally expensive when called frequently. When + * setting up automated polling of the API for health status, set verbose to + * false to disable the more expensive analysis logic. * * @see Documentation @@ -1842,7 +1923,7 @@ public final OpenPointInTimeResponse openPointInTime( // ----- Endpoint: ping /** - * Ping the cluster. Returns whether the cluster is running. + * Ping the cluster. Get information about whether the cluster is running. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/CcrStatsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/CcrStatsRequest.java index 76bca7dc1..683602c92 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/CcrStatsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/CcrStatsRequest.java @@ -50,7 +50,8 @@ // typedef: ccr.stats.Request /** - * Gets all stats related to cross-cluster replication. + * Get cross-cluster replication stats. This API returns stats about + * auto-following and the same shard-level stats as the get follower stats API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/DeleteAutoFollowPatternRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/DeleteAutoFollowPatternRequest.java index 342fbf5a7..61bb19eeb 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/DeleteAutoFollowPatternRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/DeleteAutoFollowPatternRequest.java @@ -56,7 +56,8 @@ // typedef: ccr.delete_auto_follow_pattern.Request /** - * Deletes auto-follow patterns. + * Delete auto-follow patterns. Delete a collection of cross-cluster replication + * auto-follow patterns. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ElasticsearchCcrAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ElasticsearchCcrAsyncClient.java index f5d41aaeb..5779ff630 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ElasticsearchCcrAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ElasticsearchCcrAsyncClient.java @@ -67,7 +67,8 @@ public ElasticsearchCcrAsyncClient withTransportOptions(@Nullable TransportOptio // ----- Endpoint: ccr.delete_auto_follow_pattern /** - * Deletes auto-follow patterns. + * Delete auto-follow patterns. Delete a collection of cross-cluster replication + * auto-follow patterns. * * @see Documentation @@ -83,7 +84,8 @@ public CompletableFuture deleteAutoFollowPatter } /** - * Deletes auto-follow patterns. + * Delete auto-follow patterns. Delete a collection of cross-cluster replication + * auto-follow patterns. * * @param fn * a function that initializes a builder to create the @@ -101,8 +103,10 @@ public final CompletableFuture deleteAutoFollow // ----- Endpoint: ccr.follow /** - * Creates a new follower index configured to follow the referenced leader - * index. + * Create a follower. Create a cross-cluster replication follower index that + * follows a specific leader index. When the API returns, the follower index + * exists and cross-cluster replication starts replicating operations from the + * leader index to the follower index. * * @see Documentation @@ -117,8 +121,10 @@ public CompletableFuture follow(FollowRequest request) { } /** - * Creates a new follower index configured to follow the referenced leader - * index. + * Create a follower. Create a cross-cluster replication follower index that + * follows a specific leader index. When the API returns, the follower index + * exists and cross-cluster replication starts replicating operations from the + * leader index to the follower index. * * @param fn * a function that initializes a builder to create the @@ -136,8 +142,10 @@ public final CompletableFuture follow( // ----- Endpoint: ccr.follow_info /** - * Retrieves information about all follower indices, including parameters and - * status for each follower index + * Get follower information. Get information about all cross-cluster replication + * follower indices. For example, the results include follower index names, + * leader index names, replication options, and whether the follower indices are + * active or paused. * * @see Documentation @@ -152,8 +160,10 @@ public CompletableFuture followInfo(FollowInfoRequest reques } /** - * Retrieves information about all follower indices, including parameters and - * status for each follower index + * Get follower information. Get information about all cross-cluster replication + * follower indices. For example, the results include follower index names, + * leader index names, replication options, and whether the follower indices are + * active or paused. * * @param fn * a function that initializes a builder to create the @@ -171,8 +181,9 @@ public final CompletableFuture followInfo( // ----- Endpoint: ccr.follow_stats /** - * Retrieves follower stats. return shard-level stats about the following tasks - * associated with each shard for the specified indices. + * Get follower stats. Get cross-cluster replication follower stats. The API + * returns shard-level stats about the "following tasks" associated + * with each shard for the specified indices. * * @see Documentation @@ -187,8 +198,9 @@ public CompletableFuture followStats(FollowStatsRequest req } /** - * Retrieves follower stats. return shard-level stats about the following tasks - * associated with each shard for the specified indices. + * Get follower stats. Get cross-cluster replication follower stats. The API + * returns shard-level stats about the "following tasks" associated + * with each shard for the specified indices. * * @param fn * a function that initializes a builder to create the @@ -206,7 +218,27 @@ public final CompletableFuture followStats( // ----- Endpoint: ccr.forget_follower /** - * Removes the follower retention leases from the leader. + * Forget a follower. Remove the cross-cluster replication follower retention + * leases from the leader. + *

+ * A following index takes out retention leases on its leader index. These + * leases are used to increase the likelihood that the shards of the leader + * index retain the history of operations that the shards of the following index + * need to run replication. When a follower index is converted to a regular + * index by the unfollow API (either by directly calling the API or by index + * lifecycle management tasks), these leases are removed. However, removal of + * the leases can fail, for example when the remote cluster containing the + * leader index is unavailable. While the leases will eventually expire on their + * own, their extended existence can cause the leader index to hold more history + * than necessary and prevent index lifecycle management from performing some + * operations on the leader index. This API exists to enable manually removing + * the leases when the unfollow API is unable to do so. + *

+ * NOTE: This API does not stop replication by a following index. If you use + * this API with a follower index that is still actively following, the + * following index will add back retention leases on the leader. The only + * purpose of this API is to handle the case of failure to remove the following + * retention leases after the unfollow API is invoked. * * @see Documentation @@ -221,7 +253,27 @@ public CompletableFuture forgetFollower(ForgetFollowerRe } /** - * Removes the follower retention leases from the leader. + * Forget a follower. Remove the cross-cluster replication follower retention + * leases from the leader. + *

+ * A following index takes out retention leases on its leader index. These + * leases are used to increase the likelihood that the shards of the leader + * index retain the history of operations that the shards of the following index + * need to run replication. When a follower index is converted to a regular + * index by the unfollow API (either by directly calling the API or by index + * lifecycle management tasks), these leases are removed. However, removal of + * the leases can fail, for example when the remote cluster containing the + * leader index is unavailable. While the leases will eventually expire on their + * own, their extended existence can cause the leader index to hold more history + * than necessary and prevent index lifecycle management from performing some + * operations on the leader index. This API exists to enable manually removing + * the leases when the unfollow API is unable to do so. + *

+ * NOTE: This API does not stop replication by a following index. If you use + * this API with a follower index that is still actively following, the + * following index will add back retention leases on the leader. The only + * purpose of this API is to handle the case of failure to remove the following + * retention leases after the unfollow API is invoked. * * @param fn * a function that initializes a builder to create the @@ -239,8 +291,7 @@ public final CompletableFuture forgetFollower( // ----- Endpoint: ccr.get_auto_follow_pattern /** - * Gets configured auto-follow patterns. Returns the specified auto-follow - * pattern collection. + * Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. * * @see Documentation @@ -255,8 +306,7 @@ public CompletableFuture getAutoFollowPattern(GetA } /** - * Gets configured auto-follow patterns. Returns the specified auto-follow - * pattern collection. + * Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. * * @param fn * a function that initializes a builder to create the @@ -272,8 +322,7 @@ public final CompletableFuture getAutoFollowPatter } /** - * Gets configured auto-follow patterns. Returns the specified auto-follow - * pattern collection. + * Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. * * @see Documentation @@ -288,7 +337,17 @@ public CompletableFuture getAutoFollowPattern() { // ----- Endpoint: ccr.pause_auto_follow_pattern /** - * Pauses an auto-follow pattern + * Pause an auto-follow pattern. Pause a cross-cluster replication auto-follow + * pattern. When the API returns, the auto-follow pattern is inactive. New + * indices that are created on the remote cluster and match the auto-follow + * patterns are ignored. + *

+ * You can resume auto-following with the resume auto-follow pattern API. When + * it resumes, the auto-follow pattern is active again and automatically + * configures follower indices for newly created indices on the remote cluster + * that match its patterns. Remote indices that were created while the pattern + * was paused will also be followed, unless they have been deleted or closed in + * the interim. * * @see Documentation @@ -304,7 +363,17 @@ public CompletableFuture pauseAutoFollowPattern( } /** - * Pauses an auto-follow pattern + * Pause an auto-follow pattern. Pause a cross-cluster replication auto-follow + * pattern. When the API returns, the auto-follow pattern is inactive. New + * indices that are created on the remote cluster and match the auto-follow + * patterns are ignored. + *

+ * You can resume auto-following with the resume auto-follow pattern API. When + * it resumes, the auto-follow pattern is active again and automatically + * configures follower indices for newly created indices on the remote cluster + * that match its patterns. Remote indices that were created while the pattern + * was paused will also be followed, unless they have been deleted or closed in + * the interim. * * @param fn * a function that initializes a builder to create the @@ -322,8 +391,11 @@ public final CompletableFuture pauseAutoFollowPa // ----- Endpoint: ccr.pause_follow /** - * Pauses a follower index. The follower index will not fetch any additional - * operations from the leader index. + * Pause a follower. Pause a cross-cluster replication follower index. The + * follower index will not fetch any additional operations from the leader + * index. You can resume following with the resume follower API. You can pause + * and resume a follower index to change the configuration of the following + * task. * * @see Documentation @@ -338,8 +410,11 @@ public CompletableFuture pauseFollow(PauseFollowRequest req } /** - * Pauses a follower index. The follower index will not fetch any additional - * operations from the leader index. + * Pause a follower. Pause a cross-cluster replication follower index. The + * follower index will not fetch any additional operations from the leader + * index. You can resume following with the resume follower API. You can pause + * and resume a follower index to change the configuration of the following + * task. * * @param fn * a function that initializes a builder to create the @@ -357,9 +432,17 @@ public final CompletableFuture pauseFollow( // ----- Endpoint: ccr.put_auto_follow_pattern /** - * Creates a new named collection of auto-follow patterns against a specified - * remote cluster. Newly created indices on the remote cluster matching any of - * the specified patterns will be automatically configured as follower indices. + * Create or update auto-follow patterns. Create a collection of cross-cluster + * replication auto-follow patterns for a remote cluster. Newly created indices + * on the remote cluster that match any of the patterns are automatically + * configured as follower indices. Indices on the remote cluster that were + * created before the auto-follow pattern was created will not be auto-followed + * even if they match the pattern. + *

+ * This API can also be used to update auto-follow patterns. NOTE: Follower + * indices that were configured automatically before updating an auto-follow + * pattern will remain unchanged even if they do not match against the new + * patterns. * * @see Documentation @@ -374,9 +457,17 @@ public CompletableFuture putAutoFollowPattern(PutA } /** - * Creates a new named collection of auto-follow patterns against a specified - * remote cluster. Newly created indices on the remote cluster matching any of - * the specified patterns will be automatically configured as follower indices. + * Create or update auto-follow patterns. Create a collection of cross-cluster + * replication auto-follow patterns for a remote cluster. Newly created indices + * on the remote cluster that match any of the patterns are automatically + * configured as follower indices. Indices on the remote cluster that were + * created before the auto-follow pattern was created will not be auto-followed + * even if they match the pattern. + *

+ * This API can also be used to update auto-follow patterns. NOTE: Follower + * indices that were configured automatically before updating an auto-follow + * pattern will remain unchanged even if they do not match against the new + * patterns. * * @param fn * a function that initializes a builder to create the @@ -394,7 +485,11 @@ public final CompletableFuture putAutoFollowPatter // ----- Endpoint: ccr.resume_auto_follow_pattern /** - * Resumes an auto-follow pattern that has been paused + * Resume an auto-follow pattern. Resume a cross-cluster replication auto-follow + * pattern that was paused. The auto-follow pattern will resume configuring + * following indices for newly created indices that match its patterns on the + * remote cluster. Remote indices created while the pattern was paused will also + * be followed unless they have been deleted or closed in the interim. * * @see Documentation @@ -410,7 +505,11 @@ public CompletableFuture resumeAutoFollowPatter } /** - * Resumes an auto-follow pattern that has been paused + * Resume an auto-follow pattern. Resume a cross-cluster replication auto-follow + * pattern that was paused. The auto-follow pattern will resume configuring + * following indices for newly created indices that match its patterns on the + * remote cluster. Remote indices created while the pattern was paused will also + * be followed unless they have been deleted or closed in the interim. * * @param fn * a function that initializes a builder to create the @@ -428,7 +527,11 @@ public final CompletableFuture resumeAutoFollow // ----- Endpoint: ccr.resume_follow /** - * Resumes a follower index that has been paused + * Resume a follower. Resume a cross-cluster replication follower index that was + * paused. The follower index could have been paused with the pause follower + * API. Alternatively it could be paused due to replication that cannot be + * retried due to failures during following tasks. When this API returns, the + * follower index will resume fetching operations from the leader index. * * @see Documentation @@ -443,7 +546,11 @@ public CompletableFuture resumeFollow(ResumeFollowRequest } /** - * Resumes a follower index that has been paused + * Resume a follower. Resume a cross-cluster replication follower index that was + * paused. The follower index could have been paused with the pause follower + * API. Alternatively it could be paused due to replication that cannot be + * retried due to failures during following tasks. When this API returns, the + * follower index will resume fetching operations from the leader index. * * @param fn * a function that initializes a builder to create the @@ -461,7 +568,8 @@ public final CompletableFuture resumeFollow( // ----- Endpoint: ccr.stats /** - * Gets all stats related to cross-cluster replication. + * Get cross-cluster replication stats. This API returns stats about + * auto-following and the same shard-level stats as the get follower stats API. * * @see Documentation @@ -475,8 +583,15 @@ public CompletableFuture stats() { // ----- Endpoint: ccr.unfollow /** - * Stops the following task associated with a follower index and removes index - * metadata and settings associated with cross-cluster replication. + * Unfollow an index. Convert a cross-cluster replication follower index to a + * regular index. The API stops the following task associated with a follower + * index and removes index metadata and settings associated with cross-cluster + * replication. The follower index must be paused and closed before you call the + * unfollow API. + *

+ * NOTE: Currently cross-cluster replication does not support converting an + * existing regular index to a follower index. Converting a follower index to a + * regular index is an irreversible operation. * * @see Documentation @@ -491,8 +606,15 @@ public CompletableFuture unfollow(UnfollowRequest request) { } /** - * Stops the following task associated with a follower index and removes index - * metadata and settings associated with cross-cluster replication. + * Unfollow an index. Convert a cross-cluster replication follower index to a + * regular index. The API stops the following task associated with a follower + * index and removes index metadata and settings associated with cross-cluster + * replication. The follower index must be paused and closed before you call the + * unfollow API. + *

+ * NOTE: Currently cross-cluster replication does not support converting an + * existing regular index to a follower index. Converting a follower index to a + * regular index is an irreversible operation. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ElasticsearchCcrClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ElasticsearchCcrClient.java index d12d23e60..122b3bc81 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ElasticsearchCcrClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ElasticsearchCcrClient.java @@ -68,7 +68,8 @@ public ElasticsearchCcrClient withTransportOptions(@Nullable TransportOptions tr // ----- Endpoint: ccr.delete_auto_follow_pattern /** - * Deletes auto-follow patterns. + * Delete auto-follow patterns. Delete a collection of cross-cluster replication + * auto-follow patterns. * * @see Documentation @@ -84,7 +85,8 @@ public DeleteAutoFollowPatternResponse deleteAutoFollowPattern(DeleteAutoFollowP } /** - * Deletes auto-follow patterns. + * Delete auto-follow patterns. Delete a collection of cross-cluster replication + * auto-follow patterns. * * @param fn * a function that initializes a builder to create the @@ -103,8 +105,10 @@ public final DeleteAutoFollowPatternResponse deleteAutoFollowPattern( // ----- Endpoint: ccr.follow /** - * Creates a new follower index configured to follow the referenced leader - * index. + * Create a follower. Create a cross-cluster replication follower index that + * follows a specific leader index. When the API returns, the follower index + * exists and cross-cluster replication starts replicating operations from the + * leader index to the follower index. * * @see Documentation @@ -119,8 +123,10 @@ public FollowResponse follow(FollowRequest request) throws IOException, Elastics } /** - * Creates a new follower index configured to follow the referenced leader - * index. + * Create a follower. Create a cross-cluster replication follower index that + * follows a specific leader index. When the API returns, the follower index + * exists and cross-cluster replication starts replicating operations from the + * leader index to the follower index. * * @param fn * a function that initializes a builder to create the @@ -138,8 +144,10 @@ public final FollowResponse follow(FunctionDocumentation @@ -154,8 +162,10 @@ public FollowInfoResponse followInfo(FollowInfoRequest request) throws IOExcepti } /** - * Retrieves information about all follower indices, including parameters and - * status for each follower index + * Get follower information. Get information about all cross-cluster replication + * follower indices. For example, the results include follower index names, + * leader index names, replication options, and whether the follower indices are + * active or paused. * * @param fn * a function that initializes a builder to create the @@ -173,8 +183,9 @@ public final FollowInfoResponse followInfo(FunctionDocumentation @@ -189,8 +200,9 @@ public FollowStatsResponse followStats(FollowStatsRequest request) throws IOExce } /** - * Retrieves follower stats. return shard-level stats about the following tasks - * associated with each shard for the specified indices. + * Get follower stats. Get cross-cluster replication follower stats. The API + * returns shard-level stats about the "following tasks" associated + * with each shard for the specified indices. * * @param fn * a function that initializes a builder to create the @@ -209,7 +221,27 @@ public final FollowStatsResponse followStats( // ----- Endpoint: ccr.forget_follower /** - * Removes the follower retention leases from the leader. + * Forget a follower. Remove the cross-cluster replication follower retention + * leases from the leader. + *

+ * A following index takes out retention leases on its leader index. These + * leases are used to increase the likelihood that the shards of the leader + * index retain the history of operations that the shards of the following index + * need to run replication. When a follower index is converted to a regular + * index by the unfollow API (either by directly calling the API or by index + * lifecycle management tasks), these leases are removed. However, removal of + * the leases can fail, for example when the remote cluster containing the + * leader index is unavailable. While the leases will eventually expire on their + * own, their extended existence can cause the leader index to hold more history + * than necessary and prevent index lifecycle management from performing some + * operations on the leader index. This API exists to enable manually removing + * the leases when the unfollow API is unable to do so. + *

+ * NOTE: This API does not stop replication by a following index. If you use + * this API with a follower index that is still actively following, the + * following index will add back retention leases on the leader. The only + * purpose of this API is to handle the case of failure to remove the following + * retention leases after the unfollow API is invoked. * * @see Documentation @@ -225,7 +257,27 @@ public ForgetFollowerResponse forgetFollower(ForgetFollowerRequest request) } /** - * Removes the follower retention leases from the leader. + * Forget a follower. Remove the cross-cluster replication follower retention + * leases from the leader. + *

+ * A following index takes out retention leases on its leader index. These + * leases are used to increase the likelihood that the shards of the leader + * index retain the history of operations that the shards of the following index + * need to run replication. When a follower index is converted to a regular + * index by the unfollow API (either by directly calling the API or by index + * lifecycle management tasks), these leases are removed. However, removal of + * the leases can fail, for example when the remote cluster containing the + * leader index is unavailable. While the leases will eventually expire on their + * own, their extended existence can cause the leader index to hold more history + * than necessary and prevent index lifecycle management from performing some + * operations on the leader index. This API exists to enable manually removing + * the leases when the unfollow API is unable to do so. + *

+ * NOTE: This API does not stop replication by a following index. If you use + * this API with a follower index that is still actively following, the + * following index will add back retention leases on the leader. The only + * purpose of this API is to handle the case of failure to remove the following + * retention leases after the unfollow API is invoked. * * @param fn * a function that initializes a builder to create the @@ -244,8 +296,7 @@ public final ForgetFollowerResponse forgetFollower( // ----- Endpoint: ccr.get_auto_follow_pattern /** - * Gets configured auto-follow patterns. Returns the specified auto-follow - * pattern collection. + * Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. * * @see Documentation @@ -261,8 +312,7 @@ public GetAutoFollowPatternResponse getAutoFollowPattern(GetAutoFollowPatternReq } /** - * Gets configured auto-follow patterns. Returns the specified auto-follow - * pattern collection. + * Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. * * @param fn * a function that initializes a builder to create the @@ -279,8 +329,7 @@ public final GetAutoFollowPatternResponse getAutoFollowPattern( } /** - * Gets configured auto-follow patterns. Returns the specified auto-follow - * pattern collection. + * Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. * * @see Documentation @@ -295,7 +344,17 @@ public GetAutoFollowPatternResponse getAutoFollowPattern() throws IOException, E // ----- Endpoint: ccr.pause_auto_follow_pattern /** - * Pauses an auto-follow pattern + * Pause an auto-follow pattern. Pause a cross-cluster replication auto-follow + * pattern. When the API returns, the auto-follow pattern is inactive. New + * indices that are created on the remote cluster and match the auto-follow + * patterns are ignored. + *

+ * You can resume auto-following with the resume auto-follow pattern API. When + * it resumes, the auto-follow pattern is active again and automatically + * configures follower indices for newly created indices on the remote cluster + * that match its patterns. Remote indices that were created while the pattern + * was paused will also be followed, unless they have been deleted or closed in + * the interim. * * @see Documentation @@ -311,7 +370,17 @@ public PauseAutoFollowPatternResponse pauseAutoFollowPattern(PauseAutoFollowPatt } /** - * Pauses an auto-follow pattern + * Pause an auto-follow pattern. Pause a cross-cluster replication auto-follow + * pattern. When the API returns, the auto-follow pattern is inactive. New + * indices that are created on the remote cluster and match the auto-follow + * patterns are ignored. + *

+ * You can resume auto-following with the resume auto-follow pattern API. When + * it resumes, the auto-follow pattern is active again and automatically + * configures follower indices for newly created indices on the remote cluster + * that match its patterns. Remote indices that were created while the pattern + * was paused will also be followed, unless they have been deleted or closed in + * the interim. * * @param fn * a function that initializes a builder to create the @@ -330,8 +399,11 @@ public final PauseAutoFollowPatternResponse pauseAutoFollowPattern( // ----- Endpoint: ccr.pause_follow /** - * Pauses a follower index. The follower index will not fetch any additional - * operations from the leader index. + * Pause a follower. Pause a cross-cluster replication follower index. The + * follower index will not fetch any additional operations from the leader + * index. You can resume following with the resume follower API. You can pause + * and resume a follower index to change the configuration of the following + * task. * * @see Documentation @@ -346,8 +418,11 @@ public PauseFollowResponse pauseFollow(PauseFollowRequest request) throws IOExce } /** - * Pauses a follower index. The follower index will not fetch any additional - * operations from the leader index. + * Pause a follower. Pause a cross-cluster replication follower index. The + * follower index will not fetch any additional operations from the leader + * index. You can resume following with the resume follower API. You can pause + * and resume a follower index to change the configuration of the following + * task. * * @param fn * a function that initializes a builder to create the @@ -366,9 +441,17 @@ public final PauseFollowResponse pauseFollow( // ----- Endpoint: ccr.put_auto_follow_pattern /** - * Creates a new named collection of auto-follow patterns against a specified - * remote cluster. Newly created indices on the remote cluster matching any of - * the specified patterns will be automatically configured as follower indices. + * Create or update auto-follow patterns. Create a collection of cross-cluster + * replication auto-follow patterns for a remote cluster. Newly created indices + * on the remote cluster that match any of the patterns are automatically + * configured as follower indices. Indices on the remote cluster that were + * created before the auto-follow pattern was created will not be auto-followed + * even if they match the pattern. + *

+ * This API can also be used to update auto-follow patterns. NOTE: Follower + * indices that were configured automatically before updating an auto-follow + * pattern will remain unchanged even if they do not match against the new + * patterns. * * @see Documentation @@ -384,9 +467,17 @@ public PutAutoFollowPatternResponse putAutoFollowPattern(PutAutoFollowPatternReq } /** - * Creates a new named collection of auto-follow patterns against a specified - * remote cluster. Newly created indices on the remote cluster matching any of - * the specified patterns will be automatically configured as follower indices. + * Create or update auto-follow patterns. Create a collection of cross-cluster + * replication auto-follow patterns for a remote cluster. Newly created indices + * on the remote cluster that match any of the patterns are automatically + * configured as follower indices. Indices on the remote cluster that were + * created before the auto-follow pattern was created will not be auto-followed + * even if they match the pattern. + *

+ * This API can also be used to update auto-follow patterns. NOTE: Follower + * indices that were configured automatically before updating an auto-follow + * pattern will remain unchanged even if they do not match against the new + * patterns. * * @param fn * a function that initializes a builder to create the @@ -405,7 +496,11 @@ public final PutAutoFollowPatternResponse putAutoFollowPattern( // ----- Endpoint: ccr.resume_auto_follow_pattern /** - * Resumes an auto-follow pattern that has been paused + * Resume an auto-follow pattern. Resume a cross-cluster replication auto-follow + * pattern that was paused. The auto-follow pattern will resume configuring + * following indices for newly created indices that match its patterns on the + * remote cluster. Remote indices created while the pattern was paused will also + * be followed unless they have been deleted or closed in the interim. * * @see Documentation @@ -421,7 +516,11 @@ public ResumeAutoFollowPatternResponse resumeAutoFollowPattern(ResumeAutoFollowP } /** - * Resumes an auto-follow pattern that has been paused + * Resume an auto-follow pattern. Resume a cross-cluster replication auto-follow + * pattern that was paused. The auto-follow pattern will resume configuring + * following indices for newly created indices that match its patterns on the + * remote cluster. Remote indices created while the pattern was paused will also + * be followed unless they have been deleted or closed in the interim. * * @param fn * a function that initializes a builder to create the @@ -440,7 +539,11 @@ public final ResumeAutoFollowPatternResponse resumeAutoFollowPattern( // ----- Endpoint: ccr.resume_follow /** - * Resumes a follower index that has been paused + * Resume a follower. Resume a cross-cluster replication follower index that was + * paused. The follower index could have been paused with the pause follower + * API. Alternatively it could be paused due to replication that cannot be + * retried due to failures during following tasks. When this API returns, the + * follower index will resume fetching operations from the leader index. * * @see Documentation @@ -455,7 +558,11 @@ public ResumeFollowResponse resumeFollow(ResumeFollowRequest request) throws IOE } /** - * Resumes a follower index that has been paused + * Resume a follower. Resume a cross-cluster replication follower index that was + * paused. The follower index could have been paused with the pause follower + * API. Alternatively it could be paused due to replication that cannot be + * retried due to failures during following tasks. When this API returns, the + * follower index will resume fetching operations from the leader index. * * @param fn * a function that initializes a builder to create the @@ -474,7 +581,8 @@ public final ResumeFollowResponse resumeFollow( // ----- Endpoint: ccr.stats /** - * Gets all stats related to cross-cluster replication. + * Get cross-cluster replication stats. This API returns stats about + * auto-following and the same shard-level stats as the get follower stats API. * * @see Documentation @@ -488,8 +596,15 @@ public CcrStatsResponse stats() throws IOException, ElasticsearchException { // ----- Endpoint: ccr.unfollow /** - * Stops the following task associated with a follower index and removes index - * metadata and settings associated with cross-cluster replication. + * Unfollow an index. Convert a cross-cluster replication follower index to a + * regular index. The API stops the following task associated with a follower + * index and removes index metadata and settings associated with cross-cluster + * replication. The follower index must be paused and closed before you call the + * unfollow API. + *

+ * NOTE: Currently cross-cluster replication does not support converting an + * existing regular index to a follower index. Converting a follower index to a + * regular index is an irreversible operation. * * @see Documentation @@ -504,8 +619,15 @@ public UnfollowResponse unfollow(UnfollowRequest request) throws IOException, El } /** - * Stops the following task associated with a follower index and removes index - * metadata and settings associated with cross-cluster replication. + * Unfollow an index. Convert a cross-cluster replication follower index to a + * regular index. The API stops the following task associated with a follower + * index and removes index metadata and settings associated with cross-cluster + * replication. The follower index must be paused and closed before you call the + * unfollow API. + *

+ * NOTE: Currently cross-cluster replication does not support converting an + * existing regular index to a follower index. Converting a follower index to a + * regular index is an irreversible operation. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/FollowInfoRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/FollowInfoRequest.java index 7fcf2db5b..78bc16ea7 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/FollowInfoRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/FollowInfoRequest.java @@ -58,8 +58,10 @@ // typedef: ccr.follow_info.Request /** - * Retrieves information about all follower indices, including parameters and - * status for each follower index + * Get follower information. Get information about all cross-cluster replication + * follower indices. For example, the results include follower index names, + * leader index names, replication options, and whether the follower indices are + * active or paused. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/FollowRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/FollowRequest.java index 22b6c7788..7bd6ab476 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/FollowRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/FollowRequest.java @@ -62,8 +62,10 @@ // typedef: ccr.follow.Request /** - * Creates a new follower index configured to follow the referenced leader - * index. + * Create a follower. Create a cross-cluster replication follower index that + * follows a specific leader index. When the API returns, the follower index + * exists and cross-cluster replication starts replicating operations from the + * leader index to the follower index. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/FollowStatsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/FollowStatsRequest.java index 9311a92fb..9048f3fdc 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/FollowStatsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/FollowStatsRequest.java @@ -58,8 +58,9 @@ // typedef: ccr.follow_stats.Request /** - * Retrieves follower stats. return shard-level stats about the following tasks - * associated with each shard for the specified indices. + * Get follower stats. Get cross-cluster replication follower stats. The API + * returns shard-level stats about the "following tasks" associated + * with each shard for the specified indices. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ForgetFollowerRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ForgetFollowerRequest.java index 4f61622ed..7bb407dfd 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ForgetFollowerRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ForgetFollowerRequest.java @@ -58,7 +58,27 @@ // typedef: ccr.forget_follower.Request /** - * Removes the follower retention leases from the leader. + * Forget a follower. Remove the cross-cluster replication follower retention + * leases from the leader. + *

+ * A following index takes out retention leases on its leader index. These + * leases are used to increase the likelihood that the shards of the leader + * index retain the history of operations that the shards of the following index + * need to run replication. When a follower index is converted to a regular + * index by the unfollow API (either by directly calling the API or by index + * lifecycle management tasks), these leases are removed. However, removal of + * the leases can fail, for example when the remote cluster containing the + * leader index is unavailable. While the leases will eventually expire on their + * own, their extended existence can cause the leader index to hold more history + * than necessary and prevent index lifecycle management from performing some + * operations on the leader index. This API exists to enable manually removing + * the leases when the unfollow API is unable to do so. + *

+ * NOTE: This API does not stop replication by a following index. If you use + * this API with a follower index that is still actively following, the + * following index will add back retention leases on the leader. The only + * purpose of this API is to handle the case of failure to remove the following + * retention leases after the unfollow API is invoked. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/GetAutoFollowPatternRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/GetAutoFollowPatternRequest.java index 8703d5b66..133675182 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/GetAutoFollowPatternRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/GetAutoFollowPatternRequest.java @@ -55,8 +55,7 @@ // typedef: ccr.get_auto_follow_pattern.Request /** - * Gets configured auto-follow patterns. Returns the specified auto-follow - * pattern collection. + * Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/PauseAutoFollowPatternRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/PauseAutoFollowPatternRequest.java index 1773672eb..941ff1f2b 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/PauseAutoFollowPatternRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/PauseAutoFollowPatternRequest.java @@ -56,7 +56,17 @@ // typedef: ccr.pause_auto_follow_pattern.Request /** - * Pauses an auto-follow pattern + * Pause an auto-follow pattern. Pause a cross-cluster replication auto-follow + * pattern. When the API returns, the auto-follow pattern is inactive. New + * indices that are created on the remote cluster and match the auto-follow + * patterns are ignored. + *

+ * You can resume auto-following with the resume auto-follow pattern API. When + * it resumes, the auto-follow pattern is active again and automatically + * configures follower indices for newly created indices on the remote cluster + * that match its patterns. Remote indices that were created while the pattern + * was paused will also be followed, unless they have been deleted or closed in + * the interim. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/PauseFollowRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/PauseFollowRequest.java index 6b288baf1..3d5e70251 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/PauseFollowRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/PauseFollowRequest.java @@ -56,8 +56,11 @@ // typedef: ccr.pause_follow.Request /** - * Pauses a follower index. The follower index will not fetch any additional - * operations from the leader index. + * Pause a follower. Pause a cross-cluster replication follower index. The + * follower index will not fetch any additional operations from the leader + * index. You can resume following with the resume follower API. You can pause + * and resume a follower index to change the configuration of the following + * task. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/PutAutoFollowPatternRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/PutAutoFollowPatternRequest.java index 69e13c60f..456b1d4db 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/PutAutoFollowPatternRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/PutAutoFollowPatternRequest.java @@ -62,9 +62,17 @@ // typedef: ccr.put_auto_follow_pattern.Request /** - * Creates a new named collection of auto-follow patterns against a specified - * remote cluster. Newly created indices on the remote cluster matching any of - * the specified patterns will be automatically configured as follower indices. + * Create or update auto-follow patterns. Create a collection of cross-cluster + * replication auto-follow patterns for a remote cluster. Newly created indices + * on the remote cluster that match any of the patterns are automatically + * configured as follower indices. Indices on the remote cluster that were + * created before the auto-follow pattern was created will not be auto-followed + * even if they match the pattern. + *

+ * This API can also be used to update auto-follow patterns. NOTE: Follower + * indices that were configured automatically before updating an auto-follow + * pattern will remain unchanged even if they do not match against the new + * patterns. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ResumeAutoFollowPatternRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ResumeAutoFollowPatternRequest.java index 3ebcb2bef..c0e833da4 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ResumeAutoFollowPatternRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ResumeAutoFollowPatternRequest.java @@ -56,7 +56,11 @@ // typedef: ccr.resume_auto_follow_pattern.Request /** - * Resumes an auto-follow pattern that has been paused + * Resume an auto-follow pattern. Resume a cross-cluster replication auto-follow + * pattern that was paused. The auto-follow pattern will resume configuring + * following indices for newly created indices that match its patterns on the + * remote cluster. Remote indices created while the pattern was paused will also + * be followed unless they have been deleted or closed in the interim. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ResumeFollowRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ResumeFollowRequest.java index 44594ad40..d4f2d6ef7 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ResumeFollowRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/ResumeFollowRequest.java @@ -60,7 +60,11 @@ // typedef: ccr.resume_follow.Request /** - * Resumes a follower index that has been paused + * Resume a follower. Resume a cross-cluster replication follower index that was + * paused. The follower index could have been paused with the pause follower + * API. Alternatively it could be paused due to replication that cannot be + * retried due to failures during following tasks. When this API returns, the + * follower index will resume fetching operations from the leader index. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/UnfollowRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/UnfollowRequest.java index 7ce510c12..50398cc41 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/UnfollowRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ccr/UnfollowRequest.java @@ -56,8 +56,15 @@ // typedef: ccr.unfollow.Request /** - * Stops the following task associated with a follower index and removes index - * metadata and settings associated with cross-cluster replication. + * Unfollow an index. Convert a cross-cluster replication follower index to a + * regular index. The API stops the following task associated with a follower + * index and removes index metadata and settings associated with cross-cluster + * replication. The follower index must be paused and closed before you call the + * unfollow API. + *

+ * NOTE: Currently cross-cluster replication does not support converting an + * existing regular index to a follower index. Converting a follower index to a + * regular index is an irreversible operation. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/AllocationExplainRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/AllocationExplainRequest.java index c8aff6a12..9233d9bde 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/AllocationExplainRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/AllocationExplainRequest.java @@ -59,7 +59,13 @@ // typedef: cluster.allocation_explain.Request /** - * Provides explanations for shard allocations in the cluster. + * Explain the shard allocations. Get explanations for shard allocations in the + * cluster. For unassigned shards, it provides an explanation for why the shard + * is unassigned. For assigned shards, it provides an explanation for why the + * shard is remaining on its current node and has not moved or rebalanced to + * another node. This API can be very useful when attempting to diagnose why a + * shard is unassigned or why a shard continues to remain on its current node + * when you might expect otherwise. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ClusterStatsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ClusterStatsRequest.java index 9f47b9971..0d4e38fc4 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ClusterStatsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ClusterStatsRequest.java @@ -59,10 +59,9 @@ // typedef: cluster.stats.Request /** - * Returns cluster statistics. It returns basic index metrics (shard numbers, - * store size, memory usage) and information about the current nodes that form - * the cluster (number, roles, os, jvm versions, memory usage, cpu and installed - * plugins). + * Get cluster statistics. Get basic index metrics (shard numbers, store size, + * memory usage) and information about the current nodes that form the cluster + * (number, roles, os, jvm versions, memory usage, cpu and installed plugins). * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/DeleteVotingConfigExclusionsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/DeleteVotingConfigExclusionsRequest.java index 37a889091..96c39da2c 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/DeleteVotingConfigExclusionsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/DeleteVotingConfigExclusionsRequest.java @@ -57,7 +57,8 @@ // typedef: cluster.delete_voting_config_exclusions.Request /** - * Clears cluster voting config exclusions. + * Clear cluster voting config exclusions. Remove master-eligible nodes from the + * voting configuration exclusion list. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterAsyncClient.java index 00ded6056..819edcc01 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterAsyncClient.java @@ -71,7 +71,13 @@ public ElasticsearchClusterAsyncClient withTransportOptions(@Nullable TransportO // ----- Endpoint: cluster.allocation_explain /** - * Provides explanations for shard allocations in the cluster. + * Explain the shard allocations. Get explanations for shard allocations in the + * cluster. For unassigned shards, it provides an explanation for why the shard + * is unassigned. For assigned shards, it provides an explanation for why the + * shard is remaining on its current node and has not moved or rebalanced to + * another node. This API can be very useful when attempting to diagnose why a + * shard is unassigned or why a shard continues to remain on its current node + * when you might expect otherwise. * * @see Documentation @@ -86,7 +92,13 @@ public CompletableFuture allocationExplain(Allocation } /** - * Provides explanations for shard allocations in the cluster. + * Explain the shard allocations. Get explanations for shard allocations in the + * cluster. For unassigned shards, it provides an explanation for why the shard + * is unassigned. For assigned shards, it provides an explanation for why the + * shard is remaining on its current node and has not moved or rebalanced to + * another node. This API can be very useful when attempting to diagnose why a + * shard is unassigned or why a shard continues to remain on its current node + * when you might expect otherwise. * * @param fn * a function that initializes a builder to create the @@ -102,7 +114,13 @@ public final CompletableFuture allocationExplain( } /** - * Provides explanations for shard allocations in the cluster. + * Explain the shard allocations. Get explanations for shard allocations in the + * cluster. For unassigned shards, it provides an explanation for why the shard + * is unassigned. For assigned shards, it provides an explanation for why the + * shard is remaining on its current node and has not moved or rebalanced to + * another node. This API can be very useful when attempting to diagnose why a + * shard is unassigned or why a shard continues to remain on its current node + * when you might expect otherwise. * * @see Documentation @@ -155,7 +173,8 @@ public final CompletableFuture deleteComponentT // ----- Endpoint: cluster.delete_voting_config_exclusions /** - * Clears cluster voting config exclusions. + * Clear cluster voting config exclusions. Remove master-eligible nodes from the + * voting configuration exclusion list. * * @see Documentation @@ -171,7 +190,8 @@ public CompletableFuture deleteVotingConfigExclusions( } /** - * Clears cluster voting config exclusions. + * Clear cluster voting config exclusions. Remove master-eligible nodes from the + * voting configuration exclusion list. * * @param fn * a function that initializes a builder to create the @@ -187,7 +207,8 @@ public final CompletableFuture deleteVotingConfigExclusions( } /** - * Clears cluster voting config exclusions. + * Clear cluster voting config exclusions. Remove master-eligible nodes from the + * voting configuration exclusion list. * * @see Documentation @@ -283,7 +304,7 @@ public CompletableFuture getComponentTemplate() { // ----- Endpoint: cluster.get_settings /** - * Returns cluster-wide settings. By default, it returns only settings that have + * Get cluster-wide settings. By default, it returns only settings that have * been explicitly defined. * * @see getSettings(GetClusterSetti } /** - * Returns cluster-wide settings. By default, it returns only settings that have + * Get cluster-wide settings. By default, it returns only settings that have * been explicitly defined. * * @param fn @@ -316,7 +337,7 @@ public final CompletableFuture getSettings( } /** - * Returns cluster-wide settings. By default, it returns only settings that have + * Get cluster-wide settings. By default, it returns only settings that have * been explicitly defined. * * @see getSettings() { // ----- Endpoint: cluster.health /** - * The cluster health API returns a simple status on the health of the cluster. - * You can also use the API to get the health status of only specified data - * streams and indices. For data streams, the API retrieves the health status of - * the stream’s backing indices. The cluster health status is: green, yellow or - * red. On the shard level, a red status indicates that the specific shard is - * not allocated in the cluster, yellow means that the primary shard is - * allocated but replicas are not, and green means that all shards are - * allocated. The index level status is controlled by the worst shard status. - * The cluster status is controlled by the worst index status. + * Get the cluster health status. You can also use the API to get the health + * status of only specified data streams and indices. For data streams, the API + * retrieves the health status of the stream’s backing indices. + *

+ * The cluster health status is: green, yellow or red. On the shard level, a red + * status indicates that the specific shard is not allocated in the cluster. + * Yellow means that the primary shard is allocated but replicas are not. Green + * means that all shards are allocated. The index level status is controlled by + * the worst shard status. + *

+ * One of the main benefits of the API is the ability to wait until the cluster + * reaches a certain high watermark health level. The cluster status is + * controlled by the worst index status. * * @see Documentation @@ -355,15 +380,19 @@ public CompletableFuture health(HealthRequest request) { } /** - * The cluster health API returns a simple status on the health of the cluster. - * You can also use the API to get the health status of only specified data - * streams and indices. For data streams, the API retrieves the health status of - * the stream’s backing indices. The cluster health status is: green, yellow or - * red. On the shard level, a red status indicates that the specific shard is - * not allocated in the cluster, yellow means that the primary shard is - * allocated but replicas are not, and green means that all shards are - * allocated. The index level status is controlled by the worst shard status. - * The cluster status is controlled by the worst index status. + * Get the cluster health status. You can also use the API to get the health + * status of only specified data streams and indices. For data streams, the API + * retrieves the health status of the stream’s backing indices. + *

+ * The cluster health status is: green, yellow or red. On the shard level, a red + * status indicates that the specific shard is not allocated in the cluster. + * Yellow means that the primary shard is allocated but replicas are not. Green + * means that all shards are allocated. The index level status is controlled by + * the worst shard status. + *

+ * One of the main benefits of the API is the ability to wait until the cluster + * reaches a certain high watermark health level. The cluster status is + * controlled by the worst index status. * * @param fn * a function that initializes a builder to create the @@ -379,15 +408,19 @@ public final CompletableFuture health( } /** - * The cluster health API returns a simple status on the health of the cluster. - * You can also use the API to get the health status of only specified data - * streams and indices. For data streams, the API retrieves the health status of - * the stream’s backing indices. The cluster health status is: green, yellow or - * red. On the shard level, a red status indicates that the specific shard is - * not allocated in the cluster, yellow means that the primary shard is - * allocated but replicas are not, and green means that all shards are - * allocated. The index level status is controlled by the worst shard status. - * The cluster status is controlled by the worst index status. + * Get the cluster health status. You can also use the API to get the health + * status of only specified data streams and indices. For data streams, the API + * retrieves the health status of the stream’s backing indices. + *

+ * The cluster health status is: green, yellow or red. On the shard level, a red + * status indicates that the specific shard is not allocated in the cluster. + * Yellow means that the primary shard is allocated but replicas are not. Green + * means that all shards are allocated. The index level status is controlled by + * the worst shard status. + *

+ * One of the main benefits of the API is the ability to wait until the cluster + * reaches a certain high watermark health level. The cluster status is + * controlled by the worst index status. * * @see Documentation @@ -435,14 +468,16 @@ public final CompletableFuture info( // ----- Endpoint: cluster.pending_tasks /** - * Returns cluster-level changes (such as create index, update mapping, allocate - * or fail shard) that have not yet been executed. NOTE: This API returns a list - * of any pending updates to the cluster state. These are distinct from the - * tasks reported by the Task Management API which include periodic tasks and - * tasks initiated by the user, such as node stats, search queries, or create - * index requests. However, if a user-initiated task such as a create index - * command causes a cluster state update, the activity of this task might be - * reported by both task api and pending cluster tasks API. + * Get the pending cluster tasks. Get information about cluster-level changes + * (such as create index, update mapping, allocate or fail shard) that have not + * yet taken effect. + *

+ * NOTE: This API returns a list of any pending updates to the cluster state. + * These are distinct from the tasks reported by the task management API which + * include periodic tasks and tasks initiated by the user, such as node stats, + * search queries, or create index requests. However, if a user-initiated task + * such as a create index command causes a cluster state update, the activity of + * this task might be reported by both task api and pending cluster tasks API. * * @see Documentation @@ -457,14 +492,16 @@ public CompletableFuture pendingTasks(PendingTasksRequest } /** - * Returns cluster-level changes (such as create index, update mapping, allocate - * or fail shard) that have not yet been executed. NOTE: This API returns a list - * of any pending updates to the cluster state. These are distinct from the - * tasks reported by the Task Management API which include periodic tasks and - * tasks initiated by the user, such as node stats, search queries, or create - * index requests. However, if a user-initiated task such as a create index - * command causes a cluster state update, the activity of this task might be - * reported by both task api and pending cluster tasks API. + * Get the pending cluster tasks. Get information about cluster-level changes + * (such as create index, update mapping, allocate or fail shard) that have not + * yet taken effect. + *

+ * NOTE: This API returns a list of any pending updates to the cluster state. + * These are distinct from the tasks reported by the task management API which + * include periodic tasks and tasks initiated by the user, such as node stats, + * search queries, or create index requests. However, if a user-initiated task + * such as a create index command causes a cluster state update, the activity of + * this task might be reported by both task api and pending cluster tasks API. * * @param fn * a function that initializes a builder to create the @@ -480,14 +517,16 @@ public final CompletableFuture pendingTasks( } /** - * Returns cluster-level changes (such as create index, update mapping, allocate - * or fail shard) that have not yet been executed. NOTE: This API returns a list - * of any pending updates to the cluster state. These are distinct from the - * tasks reported by the Task Management API which include periodic tasks and - * tasks initiated by the user, such as node stats, search queries, or create - * index requests. However, if a user-initiated task such as a create index - * command causes a cluster state update, the activity of this task might be - * reported by both task api and pending cluster tasks API. + * Get the pending cluster tasks. Get information about cluster-level changes + * (such as create index, update mapping, allocate or fail shard) that have not + * yet taken effect. + *

+ * NOTE: This API returns a list of any pending updates to the cluster state. + * These are distinct from the tasks reported by the task management API which + * include periodic tasks and tasks initiated by the user, such as node stats, + * search queries, or create index requests. However, if a user-initiated task + * such as a create index command causes a cluster state update, the activity of + * this task might be reported by both task api and pending cluster tasks API. * * @see Documentation @@ -502,7 +541,42 @@ public CompletableFuture pendingTasks() { // ----- Endpoint: cluster.post_voting_config_exclusions /** - * Updates the cluster voting config exclusions by node ids or node names. + * Update voting configuration exclusions. Update the cluster voting config + * exclusions by node IDs or node names. By default, if there are more than + * three master-eligible nodes in the cluster and you remove fewer than half of + * the master-eligible nodes in the cluster at once, the voting configuration + * automatically shrinks. If you want to shrink the voting configuration to + * contain fewer than three nodes or to remove half or more of the + * master-eligible nodes in the cluster at once, use this API to remove + * departing nodes from the voting configuration manually. The API adds an entry + * for each specified node to the cluster’s voting configuration exclusions + * list. It then waits until the cluster has reconfigured its voting + * configuration to exclude the specified nodes. + *

+ * Clusters should have no voting configuration exclusions in normal operation. + * Once the excluded nodes have stopped, clear the voting configuration + * exclusions with DELETE /_cluster/voting_config_exclusions. This + * API waits for the nodes to be fully removed from the cluster before it + * returns. If your cluster has voting configuration exclusions for nodes that + * you no longer intend to remove, use + * DELETE /_cluster/voting_config_exclusions?wait_for_removal=false + * to clear the voting configuration exclusions without waiting for the nodes to + * leave the cluster. + *

+ * A response to POST /_cluster/voting_config_exclusions with an + * HTTP status code of 200 OK guarantees that the node has been removed from the + * voting configuration and will not be reinstated until the voting + * configuration exclusions are cleared by calling + * DELETE /_cluster/voting_config_exclusions. If the call to + * POST /_cluster/voting_config_exclusions fails or returns a + * response with an HTTP status code other than 200 OK then the node may not + * have been removed from the voting configuration. In that case, you may safely + * retry the call. + *

+ * NOTE: Voting exclusions are required only when you remove at least half of + * the master-eligible nodes from a cluster in a short time period. They are not + * required when removing master-ineligible nodes or when removing fewer than + * half of the master-eligible nodes. * * @see Documentation @@ -517,7 +591,42 @@ public CompletableFuture postVotingConfigExclusions(PostVotingC } /** - * Updates the cluster voting config exclusions by node ids or node names. + * Update voting configuration exclusions. Update the cluster voting config + * exclusions by node IDs or node names. By default, if there are more than + * three master-eligible nodes in the cluster and you remove fewer than half of + * the master-eligible nodes in the cluster at once, the voting configuration + * automatically shrinks. If you want to shrink the voting configuration to + * contain fewer than three nodes or to remove half or more of the + * master-eligible nodes in the cluster at once, use this API to remove + * departing nodes from the voting configuration manually. The API adds an entry + * for each specified node to the cluster’s voting configuration exclusions + * list. It then waits until the cluster has reconfigured its voting + * configuration to exclude the specified nodes. + *

+ * Clusters should have no voting configuration exclusions in normal operation. + * Once the excluded nodes have stopped, clear the voting configuration + * exclusions with DELETE /_cluster/voting_config_exclusions. This + * API waits for the nodes to be fully removed from the cluster before it + * returns. If your cluster has voting configuration exclusions for nodes that + * you no longer intend to remove, use + * DELETE /_cluster/voting_config_exclusions?wait_for_removal=false + * to clear the voting configuration exclusions without waiting for the nodes to + * leave the cluster. + *

+ * A response to POST /_cluster/voting_config_exclusions with an + * HTTP status code of 200 OK guarantees that the node has been removed from the + * voting configuration and will not be reinstated until the voting + * configuration exclusions are cleared by calling + * DELETE /_cluster/voting_config_exclusions. If the call to + * POST /_cluster/voting_config_exclusions fails or returns a + * response with an HTTP status code other than 200 OK then the node may not + * have been removed from the voting configuration. In that case, you may safely + * retry the call. + *

+ * NOTE: Voting exclusions are required only when you remove at least half of + * the master-eligible nodes from a cluster in a short time period. They are not + * required when removing master-ineligible nodes or when removing fewer than + * half of the master-eligible nodes. * * @param fn * a function that initializes a builder to create the @@ -533,7 +642,42 @@ public final CompletableFuture postVotingConfigExclusions( } /** - * Updates the cluster voting config exclusions by node ids or node names. + * Update voting configuration exclusions. Update the cluster voting config + * exclusions by node IDs or node names. By default, if there are more than + * three master-eligible nodes in the cluster and you remove fewer than half of + * the master-eligible nodes in the cluster at once, the voting configuration + * automatically shrinks. If you want to shrink the voting configuration to + * contain fewer than three nodes or to remove half or more of the + * master-eligible nodes in the cluster at once, use this API to remove + * departing nodes from the voting configuration manually. The API adds an entry + * for each specified node to the cluster’s voting configuration exclusions + * list. It then waits until the cluster has reconfigured its voting + * configuration to exclude the specified nodes. + *

+ * Clusters should have no voting configuration exclusions in normal operation. + * Once the excluded nodes have stopped, clear the voting configuration + * exclusions with DELETE /_cluster/voting_config_exclusions. This + * API waits for the nodes to be fully removed from the cluster before it + * returns. If your cluster has voting configuration exclusions for nodes that + * you no longer intend to remove, use + * DELETE /_cluster/voting_config_exclusions?wait_for_removal=false + * to clear the voting configuration exclusions without waiting for the nodes to + * leave the cluster. + *

+ * A response to POST /_cluster/voting_config_exclusions with an + * HTTP status code of 200 OK guarantees that the node has been removed from the + * voting configuration and will not be reinstated until the voting + * configuration exclusions are cleared by calling + * DELETE /_cluster/voting_config_exclusions. If the call to + * POST /_cluster/voting_config_exclusions fails or returns a + * response with an HTTP status code other than 200 OK then the node may not + * have been removed from the voting configuration. In that case, you may safely + * retry the call. + *

+ * NOTE: Voting exclusions are required only when you remove at least half of + * the master-eligible nodes from a cluster in a short time period. They are not + * required when removing master-ineligible nodes or when removing fewer than + * half of the master-eligible nodes. * * @see Documentation @@ -621,7 +765,33 @@ public final CompletableFuture putComponentTemplat // ----- Endpoint: cluster.put_settings /** - * Updates the cluster settings. + * Update the cluster settings. Configure and update dynamic settings on a + * running cluster. You can also configure dynamic settings locally on an + * unstarted or shut down node in elasticsearch.yml. + *

+ * Updates made with this API can be persistent, which apply across cluster + * restarts, or transient, which reset after a cluster restart. You can also + * reset transient or persistent settings by assigning them a null value. + *

+ * If you configure the same setting using multiple methods, Elasticsearch + * applies the settings in following order of precedence: 1) Transient setting; + * 2) Persistent setting; 3) elasticsearch.yml setting; 4) Default + * setting value. For example, you can apply a transient setting to override a + * persistent setting or elasticsearch.yml setting. However, a + * change to an elasticsearch.yml setting will not override a + * defined transient or persistent setting. + *

+ * TIP: In Elastic Cloud, use the user settings feature to configure all cluster + * settings. This method automatically rejects unsafe settings that could break + * your cluster. If you run Elasticsearch on your own hardware, use this API to + * configure dynamic cluster settings. Only use elasticsearch.yml + * for static cluster settings and node settings. The API doesn’t require a + * restart and ensures a setting’s value is the same on all nodes. + *

+ * WARNING: Transient cluster settings are no longer recommended. Use persistent + * cluster settings instead. If a cluster becomes unstable, transient settings + * can clear unexpectedly, resulting in a potentially undesired cluster + * configuration. * * @see Documentation @@ -636,7 +806,33 @@ public CompletableFuture putSettings(PutClusterSetti } /** - * Updates the cluster settings. + * Update the cluster settings. Configure and update dynamic settings on a + * running cluster. You can also configure dynamic settings locally on an + * unstarted or shut down node in elasticsearch.yml. + *

+ * Updates made with this API can be persistent, which apply across cluster + * restarts, or transient, which reset after a cluster restart. You can also + * reset transient or persistent settings by assigning them a null value. + *

+ * If you configure the same setting using multiple methods, Elasticsearch + * applies the settings in following order of precedence: 1) Transient setting; + * 2) Persistent setting; 3) elasticsearch.yml setting; 4) Default + * setting value. For example, you can apply a transient setting to override a + * persistent setting or elasticsearch.yml setting. However, a + * change to an elasticsearch.yml setting will not override a + * defined transient or persistent setting. + *

+ * TIP: In Elastic Cloud, use the user settings feature to configure all cluster + * settings. This method automatically rejects unsafe settings that could break + * your cluster. If you run Elasticsearch on your own hardware, use this API to + * configure dynamic cluster settings. Only use elasticsearch.yml + * for static cluster settings and node settings. The API doesn’t require a + * restart and ensures a setting’s value is the same on all nodes. + *

+ * WARNING: Transient cluster settings are no longer recommended. Use persistent + * cluster settings instead. If a cluster becomes unstable, transient settings + * can clear unexpectedly, resulting in a potentially undesired cluster + * configuration. * * @param fn * a function that initializes a builder to create the @@ -652,7 +848,33 @@ public final CompletableFuture putSettings( } /** - * Updates the cluster settings. + * Update the cluster settings. Configure and update dynamic settings on a + * running cluster. You can also configure dynamic settings locally on an + * unstarted or shut down node in elasticsearch.yml. + *

+ * Updates made with this API can be persistent, which apply across cluster + * restarts, or transient, which reset after a cluster restart. You can also + * reset transient or persistent settings by assigning them a null value. + *

+ * If you configure the same setting using multiple methods, Elasticsearch + * applies the settings in following order of precedence: 1) Transient setting; + * 2) Persistent setting; 3) elasticsearch.yml setting; 4) Default + * setting value. For example, you can apply a transient setting to override a + * persistent setting or elasticsearch.yml setting. However, a + * change to an elasticsearch.yml setting will not override a + * defined transient or persistent setting. + *

+ * TIP: In Elastic Cloud, use the user settings feature to configure all cluster + * settings. This method automatically rejects unsafe settings that could break + * your cluster. If you run Elasticsearch on your own hardware, use this API to + * configure dynamic cluster settings. Only use elasticsearch.yml + * for static cluster settings and node settings. The API doesn’t require a + * restart and ensures a setting’s value is the same on all nodes. + *

+ * WARNING: Transient cluster settings are no longer recommended. Use persistent + * cluster settings instead. If a cluster becomes unstable, transient settings + * can clear unexpectedly, resulting in a potentially undesired cluster + * configuration. * * @see Documentation @@ -667,9 +889,9 @@ public CompletableFuture putSettings() { // ----- Endpoint: cluster.remote_info /** - * The cluster remote info API allows you to retrieve all of the configured - * remote cluster information. It returns connection and endpoint information - * keyed by the configured remote cluster alias. + * Get remote cluster information. Get all of the configured remote cluster + * information. This API returns connection and endpoint information keyed by + * the configured remote cluster alias. * * @see Documentation @@ -683,7 +905,33 @@ public CompletableFuture remoteInfo() { // ----- Endpoint: cluster.reroute /** - * Allows to manually change the allocation of individual shards in the cluster. + * Reroute the cluster. Manually change the allocation of individual shards in + * the cluster. For example, a shard can be moved from one node to another + * explicitly, an allocation can be canceled, and an unassigned shard can be + * explicitly allocated to a specific node. + *

+ * It is important to note that after processing any reroute commands + * Elasticsearch will perform rebalancing as normal (respecting the values of + * settings such as cluster.routing.rebalance.enable) in order to + * remain in a balanced state. For example, if the requested allocation includes + * moving a shard from node1 to node2 then this may cause a shard to be moved + * from node2 back to node1 to even things out. + *

+ * The cluster can be set to disable allocations using the + * cluster.routing.allocation.enable setting. If allocations are + * disabled then the only allocations that will be performed are explicit ones + * given using the reroute command, and consequent allocations due to + * rebalancing. + *

+ * The cluster will attempt to allocate a shard a maximum of + * index.allocation.max_retries times in a row (defaults to + * 5), before giving up and leaving the shard unallocated. This + * scenario can be caused by structural problems such as having an analyzer + * which refers to a stopwords file which doesn’t exist on all nodes. + *

+ * Once the problem has been corrected, allocation can be manually retried by + * calling the reroute API with the ?retry_failed URI query + * parameter, which will attempt a single retry round for these shards. * * @see Documentation @@ -698,7 +946,33 @@ public CompletableFuture reroute(RerouteRequest request) { } /** - * Allows to manually change the allocation of individual shards in the cluster. + * Reroute the cluster. Manually change the allocation of individual shards in + * the cluster. For example, a shard can be moved from one node to another + * explicitly, an allocation can be canceled, and an unassigned shard can be + * explicitly allocated to a specific node. + *

+ * It is important to note that after processing any reroute commands + * Elasticsearch will perform rebalancing as normal (respecting the values of + * settings such as cluster.routing.rebalance.enable) in order to + * remain in a balanced state. For example, if the requested allocation includes + * moving a shard from node1 to node2 then this may cause a shard to be moved + * from node2 back to node1 to even things out. + *

+ * The cluster can be set to disable allocations using the + * cluster.routing.allocation.enable setting. If allocations are + * disabled then the only allocations that will be performed are explicit ones + * given using the reroute command, and consequent allocations due to + * rebalancing. + *

+ * The cluster will attempt to allocate a shard a maximum of + * index.allocation.max_retries times in a row (defaults to + * 5), before giving up and leaving the shard unallocated. This + * scenario can be caused by structural problems such as having an analyzer + * which refers to a stopwords file which doesn’t exist on all nodes. + *

+ * Once the problem has been corrected, allocation can be manually retried by + * calling the reroute API with the ?retry_failed URI query + * parameter, which will attempt a single retry round for these shards. * * @param fn * a function that initializes a builder to create the @@ -714,7 +988,33 @@ public final CompletableFuture reroute( } /** - * Allows to manually change the allocation of individual shards in the cluster. + * Reroute the cluster. Manually change the allocation of individual shards in + * the cluster. For example, a shard can be moved from one node to another + * explicitly, an allocation can be canceled, and an unassigned shard can be + * explicitly allocated to a specific node. + *

+ * It is important to note that after processing any reroute commands + * Elasticsearch will perform rebalancing as normal (respecting the values of + * settings such as cluster.routing.rebalance.enable) in order to + * remain in a balanced state. For example, if the requested allocation includes + * moving a shard from node1 to node2 then this may cause a shard to be moved + * from node2 back to node1 to even things out. + *

+ * The cluster can be set to disable allocations using the + * cluster.routing.allocation.enable setting. If allocations are + * disabled then the only allocations that will be performed are explicit ones + * given using the reroute command, and consequent allocations due to + * rebalancing. + *

+ * The cluster will attempt to allocate a shard a maximum of + * index.allocation.max_retries times in a row (defaults to + * 5), before giving up and leaving the shard unallocated. This + * scenario can be caused by structural problems such as having an analyzer + * which refers to a stopwords file which doesn’t exist on all nodes. + *

+ * Once the problem has been corrected, allocation can be manually retried by + * calling the reroute API with the ?retry_failed URI query + * parameter, which will attempt a single retry round for these shards. * * @see Documentation @@ -729,7 +1029,36 @@ public CompletableFuture reroute() { // ----- Endpoint: cluster.state /** - * Returns a comprehensive information about the state of the cluster. + * Get the cluster state. Get comprehensive information about the state of the + * cluster. + *

+ * The cluster state is an internal data structure which keeps track of a + * variety of information needed by every node, including the identity and + * attributes of the other nodes in the cluster; cluster-wide settings; index + * metadata, including the mapping and settings for each index; the location and + * status of every shard copy in the cluster. + *

+ * The elected master node ensures that every node in the cluster has a copy of + * the same cluster state. This API lets you retrieve a representation of this + * internal state for debugging or diagnostic purposes. You may need to consult + * the Elasticsearch source code to determine the precise meaning of the + * response. + *

+ * By default the API will route requests to the elected master node since this + * node is the authoritative source of cluster states. You can also retrieve the + * cluster state held on the node handling the API request by adding the + * ?local=true query parameter. + *

+ * Elasticsearch may need to expend significant effort to compute a response to + * this API in larger clusters, and the response may comprise a very large + * quantity of data. If you use this API repeatedly, your cluster may become + * unstable. + *

+ * WARNING: The response is a representation of an internal data structure. Its + * format is not subject to the same compatibility guarantees as other more + * stable APIs and may change from version to version. Do not query this API + * using external monitoring tools. Instead, obtain the information you require + * using other more stable cluster APIs. * * @see Documentation @@ -744,7 +1073,36 @@ public CompletableFuture state(StateRequest request) { } /** - * Returns a comprehensive information about the state of the cluster. + * Get the cluster state. Get comprehensive information about the state of the + * cluster. + *

+ * The cluster state is an internal data structure which keeps track of a + * variety of information needed by every node, including the identity and + * attributes of the other nodes in the cluster; cluster-wide settings; index + * metadata, including the mapping and settings for each index; the location and + * status of every shard copy in the cluster. + *

+ * The elected master node ensures that every node in the cluster has a copy of + * the same cluster state. This API lets you retrieve a representation of this + * internal state for debugging or diagnostic purposes. You may need to consult + * the Elasticsearch source code to determine the precise meaning of the + * response. + *

+ * By default the API will route requests to the elected master node since this + * node is the authoritative source of cluster states. You can also retrieve the + * cluster state held on the node handling the API request by adding the + * ?local=true query parameter. + *

+ * Elasticsearch may need to expend significant effort to compute a response to + * this API in larger clusters, and the response may comprise a very large + * quantity of data. If you use this API repeatedly, your cluster may become + * unstable. + *

+ * WARNING: The response is a representation of an internal data structure. Its + * format is not subject to the same compatibility guarantees as other more + * stable APIs and may change from version to version. Do not query this API + * using external monitoring tools. Instead, obtain the information you require + * using other more stable cluster APIs. * * @param fn * a function that initializes a builder to create the @@ -760,7 +1118,36 @@ public final CompletableFuture state( } /** - * Returns a comprehensive information about the state of the cluster. + * Get the cluster state. Get comprehensive information about the state of the + * cluster. + *

+ * The cluster state is an internal data structure which keeps track of a + * variety of information needed by every node, including the identity and + * attributes of the other nodes in the cluster; cluster-wide settings; index + * metadata, including the mapping and settings for each index; the location and + * status of every shard copy in the cluster. + *

+ * The elected master node ensures that every node in the cluster has a copy of + * the same cluster state. This API lets you retrieve a representation of this + * internal state for debugging or diagnostic purposes. You may need to consult + * the Elasticsearch source code to determine the precise meaning of the + * response. + *

+ * By default the API will route requests to the elected master node since this + * node is the authoritative source of cluster states. You can also retrieve the + * cluster state held on the node handling the API request by adding the + * ?local=true query parameter. + *

+ * Elasticsearch may need to expend significant effort to compute a response to + * this API in larger clusters, and the response may comprise a very large + * quantity of data. If you use this API repeatedly, your cluster may become + * unstable. + *

+ * WARNING: The response is a representation of an internal data structure. Its + * format is not subject to the same compatibility guarantees as other more + * stable APIs and may change from version to version. Do not query this API + * using external monitoring tools. Instead, obtain the information you require + * using other more stable cluster APIs. * * @see Documentation @@ -775,10 +1162,9 @@ public CompletableFuture state() { // ----- Endpoint: cluster.stats /** - * Returns cluster statistics. It returns basic index metrics (shard numbers, - * store size, memory usage) and information about the current nodes that form - * the cluster (number, roles, os, jvm versions, memory usage, cpu and installed - * plugins). + * Get cluster statistics. Get basic index metrics (shard numbers, store size, + * memory usage) and information about the current nodes that form the cluster + * (number, roles, os, jvm versions, memory usage, cpu and installed plugins). * * @see Documentation @@ -793,10 +1179,9 @@ public CompletableFuture stats(ClusterStatsRequest request } /** - * Returns cluster statistics. It returns basic index metrics (shard numbers, - * store size, memory usage) and information about the current nodes that form - * the cluster (number, roles, os, jvm versions, memory usage, cpu and installed - * plugins). + * Get cluster statistics. Get basic index metrics (shard numbers, store size, + * memory usage) and information about the current nodes that form the cluster + * (number, roles, os, jvm versions, memory usage, cpu and installed plugins). * * @param fn * a function that initializes a builder to create the @@ -812,10 +1197,9 @@ public final CompletableFuture stats( } /** - * Returns cluster statistics. It returns basic index metrics (shard numbers, - * store size, memory usage) and information about the current nodes that form - * the cluster (number, roles, os, jvm versions, memory usage, cpu and installed - * plugins). + * Get cluster statistics. Get basic index metrics (shard numbers, store size, + * memory usage) and information about the current nodes that form the cluster + * (number, roles, os, jvm versions, memory usage, cpu and installed plugins). * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterClient.java index ca6848e1d..3eef299cb 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/ElasticsearchClusterClient.java @@ -69,7 +69,13 @@ public ElasticsearchClusterClient withTransportOptions(@Nullable TransportOption // ----- Endpoint: cluster.allocation_explain /** - * Provides explanations for shard allocations in the cluster. + * Explain the shard allocations. Get explanations for shard allocations in the + * cluster. For unassigned shards, it provides an explanation for why the shard + * is unassigned. For assigned shards, it provides an explanation for why the + * shard is remaining on its current node and has not moved or rebalanced to + * another node. This API can be very useful when attempting to diagnose why a + * shard is unassigned or why a shard continues to remain on its current node + * when you might expect otherwise. * * @see Documentation @@ -85,7 +91,13 @@ public AllocationExplainResponse allocationExplain(AllocationExplainRequest requ } /** - * Provides explanations for shard allocations in the cluster. + * Explain the shard allocations. Get explanations for shard allocations in the + * cluster. For unassigned shards, it provides an explanation for why the shard + * is unassigned. For assigned shards, it provides an explanation for why the + * shard is remaining on its current node and has not moved or rebalanced to + * another node. This API can be very useful when attempting to diagnose why a + * shard is unassigned or why a shard continues to remain on its current node + * when you might expect otherwise. * * @param fn * a function that initializes a builder to create the @@ -102,7 +114,13 @@ public final AllocationExplainResponse allocationExplain( } /** - * Provides explanations for shard allocations in the cluster. + * Explain the shard allocations. Get explanations for shard allocations in the + * cluster. For unassigned shards, it provides an explanation for why the shard + * is unassigned. For assigned shards, it provides an explanation for why the + * shard is remaining on its current node and has not moved or rebalanced to + * another node. This API can be very useful when attempting to diagnose why a + * shard is unassigned or why a shard continues to remain on its current node + * when you might expect otherwise. * * @see Documentation @@ -156,7 +174,8 @@ public final DeleteComponentTemplateResponse deleteComponentTemplate( // ----- Endpoint: cluster.delete_voting_config_exclusions /** - * Clears cluster voting config exclusions. + * Clear cluster voting config exclusions. Remove master-eligible nodes from the + * voting configuration exclusion list. * * @see Documentation @@ -172,7 +191,8 @@ public BooleanResponse deleteVotingConfigExclusions(DeleteVotingConfigExclusions } /** - * Clears cluster voting config exclusions. + * Clear cluster voting config exclusions. Remove master-eligible nodes from the + * voting configuration exclusion list. * * @param fn * a function that initializes a builder to create the @@ -189,7 +209,8 @@ public final BooleanResponse deleteVotingConfigExclusions( } /** - * Clears cluster voting config exclusions. + * Clear cluster voting config exclusions. Remove master-eligible nodes from the + * voting configuration exclusion list. * * @see Documentation @@ -289,7 +310,7 @@ public GetComponentTemplateResponse getComponentTemplate() throws IOException, E // ----- Endpoint: cluster.get_settings /** - * Returns cluster-wide settings. By default, it returns only settings that have + * Get cluster-wide settings. By default, it returns only settings that have * been explicitly defined. * * @see + * The cluster health status is: green, yellow or red. On the shard level, a red + * status indicates that the specific shard is not allocated in the cluster. + * Yellow means that the primary shard is allocated but replicas are not. Green + * means that all shards are allocated. The index level status is controlled by + * the worst shard status. + *

+ * One of the main benefits of the API is the ability to wait until the cluster + * reaches a certain high watermark health level. The cluster status is + * controlled by the worst index status. * * @see Documentation @@ -363,15 +388,19 @@ public HealthResponse health(HealthRequest request) throws IOException, Elastics } /** - * The cluster health API returns a simple status on the health of the cluster. - * You can also use the API to get the health status of only specified data - * streams and indices. For data streams, the API retrieves the health status of - * the stream’s backing indices. The cluster health status is: green, yellow or - * red. On the shard level, a red status indicates that the specific shard is - * not allocated in the cluster, yellow means that the primary shard is - * allocated but replicas are not, and green means that all shards are - * allocated. The index level status is controlled by the worst shard status. - * The cluster status is controlled by the worst index status. + * Get the cluster health status. You can also use the API to get the health + * status of only specified data streams and indices. For data streams, the API + * retrieves the health status of the stream’s backing indices. + *

+ * The cluster health status is: green, yellow or red. On the shard level, a red + * status indicates that the specific shard is not allocated in the cluster. + * Yellow means that the primary shard is allocated but replicas are not. Green + * means that all shards are allocated. The index level status is controlled by + * the worst shard status. + *

+ * One of the main benefits of the API is the ability to wait until the cluster + * reaches a certain high watermark health level. The cluster status is + * controlled by the worst index status. * * @param fn * a function that initializes a builder to create the @@ -387,15 +416,19 @@ public final HealthResponse health(Function + * The cluster health status is: green, yellow or red. On the shard level, a red + * status indicates that the specific shard is not allocated in the cluster. + * Yellow means that the primary shard is allocated but replicas are not. Green + * means that all shards are allocated. The index level status is controlled by + * the worst shard status. + *

+ * One of the main benefits of the API is the ability to wait until the cluster + * reaches a certain high watermark health level. The cluster status is + * controlled by the worst index status. * * @see Documentation @@ -443,14 +476,16 @@ public final ClusterInfoResponse info(Function + * NOTE: This API returns a list of any pending updates to the cluster state. + * These are distinct from the tasks reported by the task management API which + * include periodic tasks and tasks initiated by the user, such as node stats, + * search queries, or create index requests. However, if a user-initiated task + * such as a create index command causes a cluster state update, the activity of + * this task might be reported by both task api and pending cluster tasks API. * * @see Documentation @@ -465,14 +500,16 @@ public PendingTasksResponse pendingTasks(PendingTasksRequest request) throws IOE } /** - * Returns cluster-level changes (such as create index, update mapping, allocate - * or fail shard) that have not yet been executed. NOTE: This API returns a list - * of any pending updates to the cluster state. These are distinct from the - * tasks reported by the Task Management API which include periodic tasks and - * tasks initiated by the user, such as node stats, search queries, or create - * index requests. However, if a user-initiated task such as a create index - * command causes a cluster state update, the activity of this task might be - * reported by both task api and pending cluster tasks API. + * Get the pending cluster tasks. Get information about cluster-level changes + * (such as create index, update mapping, allocate or fail shard) that have not + * yet taken effect. + *

+ * NOTE: This API returns a list of any pending updates to the cluster state. + * These are distinct from the tasks reported by the task management API which + * include periodic tasks and tasks initiated by the user, such as node stats, + * search queries, or create index requests. However, if a user-initiated task + * such as a create index command causes a cluster state update, the activity of + * this task might be reported by both task api and pending cluster tasks API. * * @param fn * a function that initializes a builder to create the @@ -489,14 +526,16 @@ public final PendingTasksResponse pendingTasks( } /** - * Returns cluster-level changes (such as create index, update mapping, allocate - * or fail shard) that have not yet been executed. NOTE: This API returns a list - * of any pending updates to the cluster state. These are distinct from the - * tasks reported by the Task Management API which include periodic tasks and - * tasks initiated by the user, such as node stats, search queries, or create - * index requests. However, if a user-initiated task such as a create index - * command causes a cluster state update, the activity of this task might be - * reported by both task api and pending cluster tasks API. + * Get the pending cluster tasks. Get information about cluster-level changes + * (such as create index, update mapping, allocate or fail shard) that have not + * yet taken effect. + *

+ * NOTE: This API returns a list of any pending updates to the cluster state. + * These are distinct from the tasks reported by the task management API which + * include periodic tasks and tasks initiated by the user, such as node stats, + * search queries, or create index requests. However, if a user-initiated task + * such as a create index command causes a cluster state update, the activity of + * this task might be reported by both task api and pending cluster tasks API. * * @see Documentation @@ -511,7 +550,42 @@ public PendingTasksResponse pendingTasks() throws IOException, ElasticsearchExce // ----- Endpoint: cluster.post_voting_config_exclusions /** - * Updates the cluster voting config exclusions by node ids or node names. + * Update voting configuration exclusions. Update the cluster voting config + * exclusions by node IDs or node names. By default, if there are more than + * three master-eligible nodes in the cluster and you remove fewer than half of + * the master-eligible nodes in the cluster at once, the voting configuration + * automatically shrinks. If you want to shrink the voting configuration to + * contain fewer than three nodes or to remove half or more of the + * master-eligible nodes in the cluster at once, use this API to remove + * departing nodes from the voting configuration manually. The API adds an entry + * for each specified node to the cluster’s voting configuration exclusions + * list. It then waits until the cluster has reconfigured its voting + * configuration to exclude the specified nodes. + *

+ * Clusters should have no voting configuration exclusions in normal operation. + * Once the excluded nodes have stopped, clear the voting configuration + * exclusions with DELETE /_cluster/voting_config_exclusions. This + * API waits for the nodes to be fully removed from the cluster before it + * returns. If your cluster has voting configuration exclusions for nodes that + * you no longer intend to remove, use + * DELETE /_cluster/voting_config_exclusions?wait_for_removal=false + * to clear the voting configuration exclusions without waiting for the nodes to + * leave the cluster. + *

+ * A response to POST /_cluster/voting_config_exclusions with an + * HTTP status code of 200 OK guarantees that the node has been removed from the + * voting configuration and will not be reinstated until the voting + * configuration exclusions are cleared by calling + * DELETE /_cluster/voting_config_exclusions. If the call to + * POST /_cluster/voting_config_exclusions fails or returns a + * response with an HTTP status code other than 200 OK then the node may not + * have been removed from the voting configuration. In that case, you may safely + * retry the call. + *

+ * NOTE: Voting exclusions are required only when you remove at least half of + * the master-eligible nodes from a cluster in a short time period. They are not + * required when removing master-ineligible nodes or when removing fewer than + * half of the master-eligible nodes. * * @see Documentation @@ -527,7 +601,42 @@ public BooleanResponse postVotingConfigExclusions(PostVotingConfigExclusionsRequ } /** - * Updates the cluster voting config exclusions by node ids or node names. + * Update voting configuration exclusions. Update the cluster voting config + * exclusions by node IDs or node names. By default, if there are more than + * three master-eligible nodes in the cluster and you remove fewer than half of + * the master-eligible nodes in the cluster at once, the voting configuration + * automatically shrinks. If you want to shrink the voting configuration to + * contain fewer than three nodes or to remove half or more of the + * master-eligible nodes in the cluster at once, use this API to remove + * departing nodes from the voting configuration manually. The API adds an entry + * for each specified node to the cluster’s voting configuration exclusions + * list. It then waits until the cluster has reconfigured its voting + * configuration to exclude the specified nodes. + *

+ * Clusters should have no voting configuration exclusions in normal operation. + * Once the excluded nodes have stopped, clear the voting configuration + * exclusions with DELETE /_cluster/voting_config_exclusions. This + * API waits for the nodes to be fully removed from the cluster before it + * returns. If your cluster has voting configuration exclusions for nodes that + * you no longer intend to remove, use + * DELETE /_cluster/voting_config_exclusions?wait_for_removal=false + * to clear the voting configuration exclusions without waiting for the nodes to + * leave the cluster. + *

+ * A response to POST /_cluster/voting_config_exclusions with an + * HTTP status code of 200 OK guarantees that the node has been removed from the + * voting configuration and will not be reinstated until the voting + * configuration exclusions are cleared by calling + * DELETE /_cluster/voting_config_exclusions. If the call to + * POST /_cluster/voting_config_exclusions fails or returns a + * response with an HTTP status code other than 200 OK then the node may not + * have been removed from the voting configuration. In that case, you may safely + * retry the call. + *

+ * NOTE: Voting exclusions are required only when you remove at least half of + * the master-eligible nodes from a cluster in a short time period. They are not + * required when removing master-ineligible nodes or when removing fewer than + * half of the master-eligible nodes. * * @param fn * a function that initializes a builder to create the @@ -544,7 +653,42 @@ public final BooleanResponse postVotingConfigExclusions( } /** - * Updates the cluster voting config exclusions by node ids or node names. + * Update voting configuration exclusions. Update the cluster voting config + * exclusions by node IDs or node names. By default, if there are more than + * three master-eligible nodes in the cluster and you remove fewer than half of + * the master-eligible nodes in the cluster at once, the voting configuration + * automatically shrinks. If you want to shrink the voting configuration to + * contain fewer than three nodes or to remove half or more of the + * master-eligible nodes in the cluster at once, use this API to remove + * departing nodes from the voting configuration manually. The API adds an entry + * for each specified node to the cluster’s voting configuration exclusions + * list. It then waits until the cluster has reconfigured its voting + * configuration to exclude the specified nodes. + *

+ * Clusters should have no voting configuration exclusions in normal operation. + * Once the excluded nodes have stopped, clear the voting configuration + * exclusions with DELETE /_cluster/voting_config_exclusions. This + * API waits for the nodes to be fully removed from the cluster before it + * returns. If your cluster has voting configuration exclusions for nodes that + * you no longer intend to remove, use + * DELETE /_cluster/voting_config_exclusions?wait_for_removal=false + * to clear the voting configuration exclusions without waiting for the nodes to + * leave the cluster. + *

+ * A response to POST /_cluster/voting_config_exclusions with an + * HTTP status code of 200 OK guarantees that the node has been removed from the + * voting configuration and will not be reinstated until the voting + * configuration exclusions are cleared by calling + * DELETE /_cluster/voting_config_exclusions. If the call to + * POST /_cluster/voting_config_exclusions fails or returns a + * response with an HTTP status code other than 200 OK then the node may not + * have been removed from the voting configuration. In that case, you may safely + * retry the call. + *

+ * NOTE: Voting exclusions are required only when you remove at least half of + * the master-eligible nodes from a cluster in a short time period. They are not + * required when removing master-ineligible nodes or when removing fewer than + * half of the master-eligible nodes. * * @see Documentation @@ -634,7 +778,33 @@ public final PutComponentTemplateResponse putComponentTemplate( // ----- Endpoint: cluster.put_settings /** - * Updates the cluster settings. + * Update the cluster settings. Configure and update dynamic settings on a + * running cluster. You can also configure dynamic settings locally on an + * unstarted or shut down node in elasticsearch.yml. + *

+ * Updates made with this API can be persistent, which apply across cluster + * restarts, or transient, which reset after a cluster restart. You can also + * reset transient or persistent settings by assigning them a null value. + *

+ * If you configure the same setting using multiple methods, Elasticsearch + * applies the settings in following order of precedence: 1) Transient setting; + * 2) Persistent setting; 3) elasticsearch.yml setting; 4) Default + * setting value. For example, you can apply a transient setting to override a + * persistent setting or elasticsearch.yml setting. However, a + * change to an elasticsearch.yml setting will not override a + * defined transient or persistent setting. + *

+ * TIP: In Elastic Cloud, use the user settings feature to configure all cluster + * settings. This method automatically rejects unsafe settings that could break + * your cluster. If you run Elasticsearch on your own hardware, use this API to + * configure dynamic cluster settings. Only use elasticsearch.yml + * for static cluster settings and node settings. The API doesn’t require a + * restart and ensures a setting’s value is the same on all nodes. + *

+ * WARNING: Transient cluster settings are no longer recommended. Use persistent + * cluster settings instead. If a cluster becomes unstable, transient settings + * can clear unexpectedly, resulting in a potentially undesired cluster + * configuration. * * @see Documentation @@ -650,7 +820,33 @@ public PutClusterSettingsResponse putSettings(PutClusterSettingsRequest request) } /** - * Updates the cluster settings. + * Update the cluster settings. Configure and update dynamic settings on a + * running cluster. You can also configure dynamic settings locally on an + * unstarted or shut down node in elasticsearch.yml. + *

+ * Updates made with this API can be persistent, which apply across cluster + * restarts, or transient, which reset after a cluster restart. You can also + * reset transient or persistent settings by assigning them a null value. + *

+ * If you configure the same setting using multiple methods, Elasticsearch + * applies the settings in following order of precedence: 1) Transient setting; + * 2) Persistent setting; 3) elasticsearch.yml setting; 4) Default + * setting value. For example, you can apply a transient setting to override a + * persistent setting or elasticsearch.yml setting. However, a + * change to an elasticsearch.yml setting will not override a + * defined transient or persistent setting. + *

+ * TIP: In Elastic Cloud, use the user settings feature to configure all cluster + * settings. This method automatically rejects unsafe settings that could break + * your cluster. If you run Elasticsearch on your own hardware, use this API to + * configure dynamic cluster settings. Only use elasticsearch.yml + * for static cluster settings and node settings. The API doesn’t require a + * restart and ensures a setting’s value is the same on all nodes. + *

+ * WARNING: Transient cluster settings are no longer recommended. Use persistent + * cluster settings instead. If a cluster becomes unstable, transient settings + * can clear unexpectedly, resulting in a potentially undesired cluster + * configuration. * * @param fn * a function that initializes a builder to create the @@ -667,7 +863,33 @@ public final PutClusterSettingsResponse putSettings( } /** - * Updates the cluster settings. + * Update the cluster settings. Configure and update dynamic settings on a + * running cluster. You can also configure dynamic settings locally on an + * unstarted or shut down node in elasticsearch.yml. + *

+ * Updates made with this API can be persistent, which apply across cluster + * restarts, or transient, which reset after a cluster restart. You can also + * reset transient or persistent settings by assigning them a null value. + *

+ * If you configure the same setting using multiple methods, Elasticsearch + * applies the settings in following order of precedence: 1) Transient setting; + * 2) Persistent setting; 3) elasticsearch.yml setting; 4) Default + * setting value. For example, you can apply a transient setting to override a + * persistent setting or elasticsearch.yml setting. However, a + * change to an elasticsearch.yml setting will not override a + * defined transient or persistent setting. + *

+ * TIP: In Elastic Cloud, use the user settings feature to configure all cluster + * settings. This method automatically rejects unsafe settings that could break + * your cluster. If you run Elasticsearch on your own hardware, use this API to + * configure dynamic cluster settings. Only use elasticsearch.yml + * for static cluster settings and node settings. The API doesn’t require a + * restart and ensures a setting’s value is the same on all nodes. + *

+ * WARNING: Transient cluster settings are no longer recommended. Use persistent + * cluster settings instead. If a cluster becomes unstable, transient settings + * can clear unexpectedly, resulting in a potentially undesired cluster + * configuration. * * @see Documentation @@ -682,9 +904,9 @@ public PutClusterSettingsResponse putSettings() throws IOException, Elasticsearc // ----- Endpoint: cluster.remote_info /** - * The cluster remote info API allows you to retrieve all of the configured - * remote cluster information. It returns connection and endpoint information - * keyed by the configured remote cluster alias. + * Get remote cluster information. Get all of the configured remote cluster + * information. This API returns connection and endpoint information keyed by + * the configured remote cluster alias. * * @see Documentation @@ -698,7 +920,33 @@ public RemoteInfoResponse remoteInfo() throws IOException, ElasticsearchExceptio // ----- Endpoint: cluster.reroute /** - * Allows to manually change the allocation of individual shards in the cluster. + * Reroute the cluster. Manually change the allocation of individual shards in + * the cluster. For example, a shard can be moved from one node to another + * explicitly, an allocation can be canceled, and an unassigned shard can be + * explicitly allocated to a specific node. + *

+ * It is important to note that after processing any reroute commands + * Elasticsearch will perform rebalancing as normal (respecting the values of + * settings such as cluster.routing.rebalance.enable) in order to + * remain in a balanced state. For example, if the requested allocation includes + * moving a shard from node1 to node2 then this may cause a shard to be moved + * from node2 back to node1 to even things out. + *

+ * The cluster can be set to disable allocations using the + * cluster.routing.allocation.enable setting. If allocations are + * disabled then the only allocations that will be performed are explicit ones + * given using the reroute command, and consequent allocations due to + * rebalancing. + *

+ * The cluster will attempt to allocate a shard a maximum of + * index.allocation.max_retries times in a row (defaults to + * 5), before giving up and leaving the shard unallocated. This + * scenario can be caused by structural problems such as having an analyzer + * which refers to a stopwords file which doesn’t exist on all nodes. + *

+ * Once the problem has been corrected, allocation can be manually retried by + * calling the reroute API with the ?retry_failed URI query + * parameter, which will attempt a single retry round for these shards. * * @see Documentation @@ -713,7 +961,33 @@ public RerouteResponse reroute(RerouteRequest request) throws IOException, Elast } /** - * Allows to manually change the allocation of individual shards in the cluster. + * Reroute the cluster. Manually change the allocation of individual shards in + * the cluster. For example, a shard can be moved from one node to another + * explicitly, an allocation can be canceled, and an unassigned shard can be + * explicitly allocated to a specific node. + *

+ * It is important to note that after processing any reroute commands + * Elasticsearch will perform rebalancing as normal (respecting the values of + * settings such as cluster.routing.rebalance.enable) in order to + * remain in a balanced state. For example, if the requested allocation includes + * moving a shard from node1 to node2 then this may cause a shard to be moved + * from node2 back to node1 to even things out. + *

+ * The cluster can be set to disable allocations using the + * cluster.routing.allocation.enable setting. If allocations are + * disabled then the only allocations that will be performed are explicit ones + * given using the reroute command, and consequent allocations due to + * rebalancing. + *

+ * The cluster will attempt to allocate a shard a maximum of + * index.allocation.max_retries times in a row (defaults to + * 5), before giving up and leaving the shard unallocated. This + * scenario can be caused by structural problems such as having an analyzer + * which refers to a stopwords file which doesn’t exist on all nodes. + *

+ * Once the problem has been corrected, allocation can be manually retried by + * calling the reroute API with the ?retry_failed URI query + * parameter, which will attempt a single retry round for these shards. * * @param fn * a function that initializes a builder to create the @@ -729,7 +1003,33 @@ public final RerouteResponse reroute(Function + * It is important to note that after processing any reroute commands + * Elasticsearch will perform rebalancing as normal (respecting the values of + * settings such as cluster.routing.rebalance.enable) in order to + * remain in a balanced state. For example, if the requested allocation includes + * moving a shard from node1 to node2 then this may cause a shard to be moved + * from node2 back to node1 to even things out. + *

+ * The cluster can be set to disable allocations using the + * cluster.routing.allocation.enable setting. If allocations are + * disabled then the only allocations that will be performed are explicit ones + * given using the reroute command, and consequent allocations due to + * rebalancing. + *

+ * The cluster will attempt to allocate a shard a maximum of + * index.allocation.max_retries times in a row (defaults to + * 5), before giving up and leaving the shard unallocated. This + * scenario can be caused by structural problems such as having an analyzer + * which refers to a stopwords file which doesn’t exist on all nodes. + *

+ * Once the problem has been corrected, allocation can be manually retried by + * calling the reroute API with the ?retry_failed URI query + * parameter, which will attempt a single retry round for these shards. * * @see Documentation @@ -744,7 +1044,36 @@ public RerouteResponse reroute() throws IOException, ElasticsearchException { // ----- Endpoint: cluster.state /** - * Returns a comprehensive information about the state of the cluster. + * Get the cluster state. Get comprehensive information about the state of the + * cluster. + *

+ * The cluster state is an internal data structure which keeps track of a + * variety of information needed by every node, including the identity and + * attributes of the other nodes in the cluster; cluster-wide settings; index + * metadata, including the mapping and settings for each index; the location and + * status of every shard copy in the cluster. + *

+ * The elected master node ensures that every node in the cluster has a copy of + * the same cluster state. This API lets you retrieve a representation of this + * internal state for debugging or diagnostic purposes. You may need to consult + * the Elasticsearch source code to determine the precise meaning of the + * response. + *

+ * By default the API will route requests to the elected master node since this + * node is the authoritative source of cluster states. You can also retrieve the + * cluster state held on the node handling the API request by adding the + * ?local=true query parameter. + *

+ * Elasticsearch may need to expend significant effort to compute a response to + * this API in larger clusters, and the response may comprise a very large + * quantity of data. If you use this API repeatedly, your cluster may become + * unstable. + *

+ * WARNING: The response is a representation of an internal data structure. Its + * format is not subject to the same compatibility guarantees as other more + * stable APIs and may change from version to version. Do not query this API + * using external monitoring tools. Instead, obtain the information you require + * using other more stable cluster APIs. * * @see Documentation @@ -759,7 +1088,36 @@ public StateResponse state(StateRequest request) throws IOException, Elasticsear } /** - * Returns a comprehensive information about the state of the cluster. + * Get the cluster state. Get comprehensive information about the state of the + * cluster. + *

+ * The cluster state is an internal data structure which keeps track of a + * variety of information needed by every node, including the identity and + * attributes of the other nodes in the cluster; cluster-wide settings; index + * metadata, including the mapping and settings for each index; the location and + * status of every shard copy in the cluster. + *

+ * The elected master node ensures that every node in the cluster has a copy of + * the same cluster state. This API lets you retrieve a representation of this + * internal state for debugging or diagnostic purposes. You may need to consult + * the Elasticsearch source code to determine the precise meaning of the + * response. + *

+ * By default the API will route requests to the elected master node since this + * node is the authoritative source of cluster states. You can also retrieve the + * cluster state held on the node handling the API request by adding the + * ?local=true query parameter. + *

+ * Elasticsearch may need to expend significant effort to compute a response to + * this API in larger clusters, and the response may comprise a very large + * quantity of data. If you use this API repeatedly, your cluster may become + * unstable. + *

+ * WARNING: The response is a representation of an internal data structure. Its + * format is not subject to the same compatibility guarantees as other more + * stable APIs and may change from version to version. Do not query this API + * using external monitoring tools. Instead, obtain the information you require + * using other more stable cluster APIs. * * @param fn * a function that initializes a builder to create the @@ -775,7 +1133,36 @@ public final StateResponse state(Function + * The cluster state is an internal data structure which keeps track of a + * variety of information needed by every node, including the identity and + * attributes of the other nodes in the cluster; cluster-wide settings; index + * metadata, including the mapping and settings for each index; the location and + * status of every shard copy in the cluster. + *

+ * The elected master node ensures that every node in the cluster has a copy of + * the same cluster state. This API lets you retrieve a representation of this + * internal state for debugging or diagnostic purposes. You may need to consult + * the Elasticsearch source code to determine the precise meaning of the + * response. + *

+ * By default the API will route requests to the elected master node since this + * node is the authoritative source of cluster states. You can also retrieve the + * cluster state held on the node handling the API request by adding the + * ?local=true query parameter. + *

+ * Elasticsearch may need to expend significant effort to compute a response to + * this API in larger clusters, and the response may comprise a very large + * quantity of data. If you use this API repeatedly, your cluster may become + * unstable. + *

+ * WARNING: The response is a representation of an internal data structure. Its + * format is not subject to the same compatibility guarantees as other more + * stable APIs and may change from version to version. Do not query this API + * using external monitoring tools. Instead, obtain the information you require + * using other more stable cluster APIs. * * @see Documentation @@ -790,10 +1177,9 @@ public StateResponse state() throws IOException, ElasticsearchException { // ----- Endpoint: cluster.stats /** - * Returns cluster statistics. It returns basic index metrics (shard numbers, - * store size, memory usage) and information about the current nodes that form - * the cluster (number, roles, os, jvm versions, memory usage, cpu and installed - * plugins). + * Get cluster statistics. Get basic index metrics (shard numbers, store size, + * memory usage) and information about the current nodes that form the cluster + * (number, roles, os, jvm versions, memory usage, cpu and installed plugins). * * @see Documentation @@ -808,10 +1194,9 @@ public ClusterStatsResponse stats(ClusterStatsRequest request) throws IOExceptio } /** - * Returns cluster statistics. It returns basic index metrics (shard numbers, - * store size, memory usage) and information about the current nodes that form - * the cluster (number, roles, os, jvm versions, memory usage, cpu and installed - * plugins). + * Get cluster statistics. Get basic index metrics (shard numbers, store size, + * memory usage) and information about the current nodes that form the cluster + * (number, roles, os, jvm versions, memory usage, cpu and installed plugins). * * @param fn * a function that initializes a builder to create the @@ -828,10 +1213,9 @@ public final ClusterStatsResponse stats( } /** - * Returns cluster statistics. It returns basic index metrics (shard numbers, - * store size, memory usage) and information about the current nodes that form - * the cluster (number, roles, os, jvm versions, memory usage, cpu and installed - * plugins). + * Get cluster statistics. Get basic index metrics (shard numbers, store size, + * memory usage) and information about the current nodes that form the cluster + * (number, roles, os, jvm versions, memory usage, cpu and installed plugins). * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/GetClusterSettingsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/GetClusterSettingsRequest.java index eb5300a08..337291f80 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/GetClusterSettingsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/GetClusterSettingsRequest.java @@ -56,7 +56,7 @@ // typedef: cluster.get_settings.Request /** - * Returns cluster-wide settings. By default, it returns only settings that have + * Get cluster-wide settings. By default, it returns only settings that have * been explicitly defined. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/HealthRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/HealthRequest.java index 08bc59796..e3c230002 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/HealthRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/HealthRequest.java @@ -64,15 +64,19 @@ // typedef: cluster.health.Request /** - * The cluster health API returns a simple status on the health of the cluster. - * You can also use the API to get the health status of only specified data - * streams and indices. For data streams, the API retrieves the health status of - * the stream’s backing indices. The cluster health status is: green, yellow or - * red. On the shard level, a red status indicates that the specific shard is - * not allocated in the cluster, yellow means that the primary shard is - * allocated but replicas are not, and green means that all shards are - * allocated. The index level status is controlled by the worst shard status. - * The cluster status is controlled by the worst index status. + * Get the cluster health status. You can also use the API to get the health + * status of only specified data streams and indices. For data streams, the API + * retrieves the health status of the stream’s backing indices. + *

+ * The cluster health status is: green, yellow or red. On the shard level, a red + * status indicates that the specific shard is not allocated in the cluster. + * Yellow means that the primary shard is allocated but replicas are not. Green + * means that all shards are allocated. The index level status is controlled by + * the worst shard status. + *

+ * One of the main benefits of the API is the ability to wait until the cluster + * reaches a certain high watermark health level. The cluster status is + * controlled by the worst index status. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/PendingTasksRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/PendingTasksRequest.java index 1d3366fbf..dd3d45551 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/PendingTasksRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/PendingTasksRequest.java @@ -56,14 +56,16 @@ // typedef: cluster.pending_tasks.Request /** - * Returns cluster-level changes (such as create index, update mapping, allocate - * or fail shard) that have not yet been executed. NOTE: This API returns a list - * of any pending updates to the cluster state. These are distinct from the - * tasks reported by the Task Management API which include periodic tasks and - * tasks initiated by the user, such as node stats, search queries, or create - * index requests. However, if a user-initiated task such as a create index - * command causes a cluster state update, the activity of this task might be - * reported by both task api and pending cluster tasks API. + * Get the pending cluster tasks. Get information about cluster-level changes + * (such as create index, update mapping, allocate or fail shard) that have not + * yet taken effect. + *

+ * NOTE: This API returns a list of any pending updates to the cluster state. + * These are distinct from the tasks reported by the task management API which + * include periodic tasks and tasks initiated by the user, such as node stats, + * search queries, or create index requests. However, if a user-initiated task + * such as a create index command causes a cluster state update, the activity of + * this task might be reported by both task api and pending cluster tasks API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/PostVotingConfigExclusionsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/PostVotingConfigExclusionsRequest.java index e4ac6af9e..f729d0b3c 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/PostVotingConfigExclusionsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/PostVotingConfigExclusionsRequest.java @@ -61,7 +61,42 @@ // typedef: cluster.post_voting_config_exclusions.Request /** - * Updates the cluster voting config exclusions by node ids or node names. + * Update voting configuration exclusions. Update the cluster voting config + * exclusions by node IDs or node names. By default, if there are more than + * three master-eligible nodes in the cluster and you remove fewer than half of + * the master-eligible nodes in the cluster at once, the voting configuration + * automatically shrinks. If you want to shrink the voting configuration to + * contain fewer than three nodes or to remove half or more of the + * master-eligible nodes in the cluster at once, use this API to remove + * departing nodes from the voting configuration manually. The API adds an entry + * for each specified node to the cluster’s voting configuration exclusions + * list. It then waits until the cluster has reconfigured its voting + * configuration to exclude the specified nodes. + *

+ * Clusters should have no voting configuration exclusions in normal operation. + * Once the excluded nodes have stopped, clear the voting configuration + * exclusions with DELETE /_cluster/voting_config_exclusions. This + * API waits for the nodes to be fully removed from the cluster before it + * returns. If your cluster has voting configuration exclusions for nodes that + * you no longer intend to remove, use + * DELETE /_cluster/voting_config_exclusions?wait_for_removal=false + * to clear the voting configuration exclusions without waiting for the nodes to + * leave the cluster. + *

+ * A response to POST /_cluster/voting_config_exclusions with an + * HTTP status code of 200 OK guarantees that the node has been removed from the + * voting configuration and will not be reinstated until the voting + * configuration exclusions are cleared by calling + * DELETE /_cluster/voting_config_exclusions. If the call to + * POST /_cluster/voting_config_exclusions fails or returns a + * response with an HTTP status code other than 200 OK then the node may not + * have been removed from the voting configuration. In that case, you may safely + * retry the call. + *

+ * NOTE: Voting exclusions are required only when you remove at least half of + * the master-eligible nodes from a cluster in a short time period. They are not + * required when removing master-ineligible nodes or when removing fewer than + * half of the master-eligible nodes. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/PutClusterSettingsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/PutClusterSettingsRequest.java index c62cf1082..d1d0f6cfd 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/PutClusterSettingsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/PutClusterSettingsRequest.java @@ -61,7 +61,33 @@ // typedef: cluster.put_settings.Request /** - * Updates the cluster settings. + * Update the cluster settings. Configure and update dynamic settings on a + * running cluster. You can also configure dynamic settings locally on an + * unstarted or shut down node in elasticsearch.yml. + *

+ * Updates made with this API can be persistent, which apply across cluster + * restarts, or transient, which reset after a cluster restart. You can also + * reset transient or persistent settings by assigning them a null value. + *

+ * If you configure the same setting using multiple methods, Elasticsearch + * applies the settings in following order of precedence: 1) Transient setting; + * 2) Persistent setting; 3) elasticsearch.yml setting; 4) Default + * setting value. For example, you can apply a transient setting to override a + * persistent setting or elasticsearch.yml setting. However, a + * change to an elasticsearch.yml setting will not override a + * defined transient or persistent setting. + *

+ * TIP: In Elastic Cloud, use the user settings feature to configure all cluster + * settings. This method automatically rejects unsafe settings that could break + * your cluster. If you run Elasticsearch on your own hardware, use this API to + * configure dynamic cluster settings. Only use elasticsearch.yml + * for static cluster settings and node settings. The API doesn’t require a + * restart and ensures a setting’s value is the same on all nodes. + *

+ * WARNING: Transient cluster settings are no longer recommended. Use persistent + * cluster settings instead. If a cluster becomes unstable, transient settings + * can clear unexpectedly, resulting in a potentially undesired cluster + * configuration. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/RemoteInfoRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/RemoteInfoRequest.java index c804fee28..7fcef9eb8 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/RemoteInfoRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/RemoteInfoRequest.java @@ -50,9 +50,9 @@ // typedef: cluster.remote_info.Request /** - * The cluster remote info API allows you to retrieve all of the configured - * remote cluster information. It returns connection and endpoint information - * keyed by the configured remote cluster alias. + * Get remote cluster information. Get all of the configured remote cluster + * information. This API returns connection and endpoint information keyed by + * the configured remote cluster alias. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/RerouteRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/RerouteRequest.java index c9a9570a0..1f63486bf 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/RerouteRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/RerouteRequest.java @@ -63,7 +63,33 @@ // typedef: cluster.reroute.Request /** - * Allows to manually change the allocation of individual shards in the cluster. + * Reroute the cluster. Manually change the allocation of individual shards in + * the cluster. For example, a shard can be moved from one node to another + * explicitly, an allocation can be canceled, and an unassigned shard can be + * explicitly allocated to a specific node. + *

+ * It is important to note that after processing any reroute commands + * Elasticsearch will perform rebalancing as normal (respecting the values of + * settings such as cluster.routing.rebalance.enable) in order to + * remain in a balanced state. For example, if the requested allocation includes + * moving a shard from node1 to node2 then this may cause a shard to be moved + * from node2 back to node1 to even things out. + *

+ * The cluster can be set to disable allocations using the + * cluster.routing.allocation.enable setting. If allocations are + * disabled then the only allocations that will be performed are explicit ones + * given using the reroute command, and consequent allocations due to + * rebalancing. + *

+ * The cluster will attempt to allocate a shard a maximum of + * index.allocation.max_retries times in a row (defaults to + * 5), before giving up and leaving the shard unallocated. This + * scenario can be caused by structural problems such as having an analyzer + * which refers to a stopwords file which doesn’t exist on all nodes. + *

+ * Once the problem has been corrected, allocation can be manually retried by + * calling the reroute API with the ?retry_failed URI query + * parameter, which will attempt a single retry round for these shards. * * @see API * specification @@ -117,8 +143,10 @@ public final List commands() { } /** - * If true, then the request simulates the operation only and returns the - * resulting state. + * If true, then the request simulates the operation. It will calculate the + * result of applying the commands to the current cluster state and return the + * resulting cluster state after the commands (and rebalancing) have been + * applied; it will not actually perform the requested changes. *

* API name: {@code dry_run} */ @@ -129,7 +157,7 @@ public final Boolean dryRun() { /** * If true, then the response contains an explanation of why the commands can or - * cannot be executed. + * cannot run. *

* API name: {@code explain} */ @@ -268,8 +296,10 @@ public final Builder commands(Function> } /** - * If true, then the request simulates the operation only and returns the - * resulting state. + * If true, then the request simulates the operation. It will calculate the + * result of applying the commands to the current cluster state and return the + * resulting cluster state after the commands (and rebalancing) have been + * applied; it will not actually perform the requested changes. *

* API name: {@code dry_run} */ @@ -280,7 +310,7 @@ public final Builder dryRun(@Nullable Boolean value) { /** * If true, then the response contains an explanation of why the commands can or - * cannot be executed. + * cannot run. *

* API name: {@code explain} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/StateRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/StateRequest.java index 2728396f3..9ffea0b2f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/StateRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/cluster/StateRequest.java @@ -61,7 +61,36 @@ // typedef: cluster.state.Request /** - * Returns a comprehensive information about the state of the cluster. + * Get the cluster state. Get comprehensive information about the state of the + * cluster. + *

+ * The cluster state is an internal data structure which keeps track of a + * variety of information needed by every node, including the identity and + * attributes of the other nodes in the cluster; cluster-wide settings; index + * metadata, including the mapping and settings for each index; the location and + * status of every shard copy in the cluster. + *

+ * The elected master node ensures that every node in the cluster has a copy of + * the same cluster state. This API lets you retrieve a representation of this + * internal state for debugging or diagnostic purposes. You may need to consult + * the Elasticsearch source code to determine the precise meaning of the + * response. + *

+ * By default the API will route requests to the elected master node since this + * node is the authoritative source of cluster states. You can also retrieve the + * cluster state held on the node handling the API request by adding the + * ?local=true query parameter. + *

+ * Elasticsearch may need to expend significant effort to compute a response to + * this API in larger clusters, and the response may comprise a very large + * quantity of data. If you use this API repeatedly, your cluster may become + * unstable. + *

+ * WARNING: The response is a representation of an internal data structure. Its + * format is not subject to the same compatibility guarantees as other more + * stable APIs and may change from version to version. Do not query this API + * using external monitoring tools. Instead, obtain the information you require + * using other more stable cluster APIs. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/BulkRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/BulkRequest.java index 7b237e3a7..b81e9ab31 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/BulkRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/BulkRequest.java @@ -86,6 +86,9 @@ public class BulkRequest extends RequestBase implements NdJsonpSerializable, Jso @Nullable private final String index; + @Nullable + private final Boolean listExecutedPipelines; + @Nullable private final String pipeline; @@ -95,6 +98,9 @@ public class BulkRequest extends RequestBase implements NdJsonpSerializable, Jso @Nullable private final Boolean requireAlias; + @Nullable + private final Boolean requireDataStream; + @Nullable private final String routing; @@ -114,9 +120,11 @@ private BulkRequest(Builder builder) { this.sourceExcludes = ApiTypeHelper.unmodifiable(builder.sourceExcludes); this.sourceIncludes = ApiTypeHelper.unmodifiable(builder.sourceIncludes); this.index = builder.index; + this.listExecutedPipelines = builder.listExecutedPipelines; this.pipeline = builder.pipeline; this.refresh = builder.refresh; this.requireAlias = builder.requireAlias; + this.requireDataStream = builder.requireDataStream; this.routing = builder.routing; this.timeout = builder.timeout; this.waitForActiveShards = builder.waitForActiveShards; @@ -171,6 +179,17 @@ public final String index() { return this.index; } + /** + * If true, the response will include the ingest pipelines that + * were executed for each index or create. + *

+ * API name: {@code list_executed_pipelines} + */ + @Nullable + public final Boolean listExecutedPipelines() { + return this.listExecutedPipelines; + } + /** * ID of the pipeline to use to preprocess incoming documents. If the index has * a default ingest pipeline specified, then setting the value to @@ -209,6 +228,17 @@ public final Boolean requireAlias() { return this.requireAlias; } + /** + * If true, the request's actions must target a data stream + * (existing or to-be-created). + *

+ * API name: {@code require_data_stream} + */ + @Nullable + public final Boolean requireDataStream() { + return this.requireDataStream; + } + /** * Custom value used to route operations to a specific shard. *

@@ -281,6 +311,9 @@ public static class Builder extends RequestBase.AbstractBuilder impleme @Nullable private String index; + @Nullable + private Boolean listExecutedPipelines; + @Nullable private String pipeline; @@ -290,6 +323,9 @@ public static class Builder extends RequestBase.AbstractBuilder impleme @Nullable private Boolean requireAlias; + @Nullable + private Boolean requireDataStream; + @Nullable private String routing; @@ -380,6 +416,17 @@ public final Builder index(@Nullable String value) { return this; } + /** + * If true, the response will include the ingest pipelines that + * were executed for each index or create. + *

+ * API name: {@code list_executed_pipelines} + */ + public final Builder listExecutedPipelines(@Nullable Boolean value) { + this.listExecutedPipelines = value; + return this; + } + /** * ID of the pipeline to use to preprocess incoming documents. If the index has * a default ingest pipeline specified, then setting the value to @@ -418,6 +465,17 @@ public final Builder requireAlias(@Nullable Boolean value) { return this; } + /** + * If true, the request's actions must target a data stream + * (existing or to-be-created). + *

+ * API name: {@code require_data_stream} + */ + public final Builder requireDataStream(@Nullable Boolean value) { + this.requireDataStream = value; + return this; + } + /** * Custom value used to route operations to a specific shard. *

@@ -605,6 +663,12 @@ public BulkRequest build() { params.put("_source_includes", request.sourceIncludes.stream().map(v -> v).collect(Collectors.joining(","))); } + if (request.requireDataStream != null) { + params.put("require_data_stream", String.valueOf(request.requireDataStream)); + } + if (request.listExecutedPipelines != null) { + params.put("list_executed_pipelines", String.valueOf(request.listExecutedPipelines)); + } if (request.timeout != null) { params.put("timeout", request.timeout._toJsonString()); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/HealthReportRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/HealthReportRequest.java index defa784dc..9d409bf66 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/HealthReportRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/HealthReportRequest.java @@ -60,7 +60,34 @@ // typedef: _global.health_report.Request /** - * Returns the health of the cluster. + * Get the cluster health. Get a report with the health status of an + * Elasticsearch cluster. The report contains a list of indicators that compose + * Elasticsearch functionality. + *

+ * Each indicator has a health status of: green, unknown, yellow or red. The + * indicator will provide an explanation and metadata describing the reason for + * its current health status. + *

+ * The cluster’s status is controlled by the worst indicator status. + *

+ * In the event that an indicator’s status is non-green, a list of impacts may + * be present in the indicator result which detail the functionalities that are + * negatively affected by the health issue. Each impact carries with it a + * severity level, an area of the system that is affected, and a simple + * description of the impact on the system. + *

+ * Some health indicators can determine the root cause of a health problem and + * prescribe a set of steps that can be performed in order to improve the health + * of the system. The root cause and remediation steps are encapsulated in a + * diagnosis. A diagnosis contains a cause detailing a root cause analysis, an + * action containing a brief description of the steps to take to fix the + * problem, the list of affected resources (if applicable), and a detailed + * step-by-step troubleshooting guide to fix the diagnosed problem. + *

+ * NOTE: The health indicators perform root cause analysis of non-green health + * statuses. This can be computationally expensive when called frequently. When + * setting up automated polling of the API for health status, set verbose to + * false to disable the more expensive analysis logic. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/PingRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/PingRequest.java index 8c7213ad5..2a1edd3e6 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/core/PingRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/core/PingRequest.java @@ -52,7 +52,7 @@ // typedef: _global.ping.Request /** - * Ping the cluster. Returns whether the cluster is running. + * Ping the cluster. Get information about whether the cluster is running. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/doc-files/api-spec.html b/java-client/src/main/java/co/elastic/clients/elasticsearch/doc-files/api-spec.html index 83473e9f6..38ab7e139 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/doc-files/api-spec.html +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/doc-files/api-spec.html @@ -10,7 +10,7 @@ '_global.bulk.OperationBase': '_global/bulk/types.ts#L90-L107', '_global.bulk.OperationContainer': '_global/bulk/types.ts#L145-L167', '_global.bulk.OperationType': '_global/bulk/types.ts#L83-L88', -'_global.bulk.Request': '_global/bulk/BulkRequest.ts#L32-L105', +'_global.bulk.Request': '_global/bulk/BulkRequest.ts#L32-L115', '_global.bulk.Response': '_global/bulk/BulkResponse.ts#L24-L31', '_global.bulk.ResponseItem': '_global/bulk/types.ts#L37-L81', '_global.bulk.UpdateAction': '_global/bulk/types.ts#L169-L205', @@ -78,7 +78,7 @@ '_global.health_report.MasterIsStableIndicatorExceptionFetchingHistory': '_global/health_report/types.ts#L96-L99', '_global.health_report.RepositoryIntegrityIndicator': '_global/health_report/types.ts#L137-L141', '_global.health_report.RepositoryIntegrityIndicatorDetails': '_global/health_report/types.ts#L142-L146', -'_global.health_report.Request': '_global/health_report/Request.ts#L24-L52', +'_global.health_report.Request': '_global/health_report/Request.ts#L24-L70', '_global.health_report.Response': '_global/health_report/Response.ts#L22-L28', '_global.health_report.ShardsAvailabilityIndicator': '_global/health_report/types.ts#L106-L110', '_global.health_report.ShardsAvailabilityIndicatorDetails': '_global/health_report/types.ts#L111-L122', @@ -1102,39 +1102,39 @@ 'ccr._types.FollowIndexStats': 'ccr/_types/FollowIndexStats.ts#L30-L33', 'ccr._types.ReadException': 'ccr/_types/FollowIndexStats.ts#L71-L75', 'ccr._types.ShardStats': 'ccr/_types/FollowIndexStats.ts#L35-L69', -'ccr.delete_auto_follow_pattern.Request': 'ccr/delete_auto_follow_pattern/DeleteAutoFollowPatternRequest.ts#L23-L32', +'ccr.delete_auto_follow_pattern.Request': 'ccr/delete_auto_follow_pattern/DeleteAutoFollowPatternRequest.ts#L23-L35', 'ccr.delete_auto_follow_pattern.Response': 'ccr/delete_auto_follow_pattern/DeleteAutoFollowPatternResponse.ts#L22-L24', -'ccr.follow.Request': 'ccr/follow/CreateFollowIndexRequest.ts#L26-L110', +'ccr.follow.Request': 'ccr/follow/CreateFollowIndexRequest.ts#L26-L113', 'ccr.follow.Response': 'ccr/follow/CreateFollowIndexResponse.ts#L20-L26', 'ccr.follow_info.FollowerIndex': 'ccr/follow_info/types.ts#L24-L30', 'ccr.follow_info.FollowerIndexParameters': 'ccr/follow_info/types.ts#L37-L83', 'ccr.follow_info.FollowerIndexStatus': 'ccr/follow_info/types.ts#L32-L35', -'ccr.follow_info.Request': 'ccr/follow_info/FollowInfoRequest.ts#L23-L32', +'ccr.follow_info.Request': 'ccr/follow_info/FollowInfoRequest.ts#L23-L36', 'ccr.follow_info.Response': 'ccr/follow_info/FollowInfoResponse.ts#L22-L24', -'ccr.follow_stats.Request': 'ccr/follow_stats/FollowIndexStatsRequest.ts#L23-L32', +'ccr.follow_stats.Request': 'ccr/follow_stats/FollowIndexStatsRequest.ts#L23-L36', 'ccr.follow_stats.Response': 'ccr/follow_stats/FollowIndexStatsResponse.ts#L22-L24', -'ccr.forget_follower.Request': 'ccr/forget_follower/ForgetFollowerIndexRequest.ts#L23-L38', +'ccr.forget_follower.Request': 'ccr/forget_follower/ForgetFollowerIndexRequest.ts#L23-L51', 'ccr.forget_follower.Response': 'ccr/forget_follower/ForgetFollowerIndexResponse.ts#L22-L24', 'ccr.get_auto_follow_pattern.AutoFollowPattern': 'ccr/get_auto_follow_pattern/types.ts#L23-L26', 'ccr.get_auto_follow_pattern.AutoFollowPatternSummary': 'ccr/get_auto_follow_pattern/types.ts#L28-L52', -'ccr.get_auto_follow_pattern.Request': 'ccr/get_auto_follow_pattern/GetAutoFollowPatternRequest.ts#L23-L33', +'ccr.get_auto_follow_pattern.Request': 'ccr/get_auto_follow_pattern/GetAutoFollowPatternRequest.ts#L23-L36', 'ccr.get_auto_follow_pattern.Response': 'ccr/get_auto_follow_pattern/GetAutoFollowPatternResponse.ts#L22-L24', -'ccr.pause_auto_follow_pattern.Request': 'ccr/pause_auto_follow_pattern/PauseAutoFollowPatternRequest.ts#L23-L32', +'ccr.pause_auto_follow_pattern.Request': 'ccr/pause_auto_follow_pattern/PauseAutoFollowPatternRequest.ts#L23-L41', 'ccr.pause_auto_follow_pattern.Response': 'ccr/pause_auto_follow_pattern/PauseAutoFollowPatternResponse.ts#L22-L24', -'ccr.pause_follow.Request': 'ccr/pause_follow/PauseFollowIndexRequest.ts#L23-L32', +'ccr.pause_follow.Request': 'ccr/pause_follow/PauseFollowIndexRequest.ts#L23-L37', 'ccr.pause_follow.Response': 'ccr/pause_follow/PauseFollowIndexResponse.ts#L22-L24', -'ccr.put_auto_follow_pattern.Request': 'ccr/put_auto_follow_pattern/PutAutoFollowPatternRequest.ts#L27-L112', +'ccr.put_auto_follow_pattern.Request': 'ccr/put_auto_follow_pattern/PutAutoFollowPatternRequest.ts#L27-L120', 'ccr.put_auto_follow_pattern.Response': 'ccr/put_auto_follow_pattern/PutAutoFollowPatternResponse.ts#L22-L24', -'ccr.resume_auto_follow_pattern.Request': 'ccr/resume_auto_follow_pattern/ResumeAutoFollowPatternRequest.ts#L23-L32', +'ccr.resume_auto_follow_pattern.Request': 'ccr/resume_auto_follow_pattern/ResumeAutoFollowPatternRequest.ts#L23-L37', 'ccr.resume_auto_follow_pattern.Response': 'ccr/resume_auto_follow_pattern/ResumeAutoFollowPatternResponse.ts#L22-L24', -'ccr.resume_follow.Request': 'ccr/resume_follow/ResumeFollowIndexRequest.ts#L25-L46', +'ccr.resume_follow.Request': 'ccr/resume_follow/ResumeFollowIndexRequest.ts#L25-L52', 'ccr.resume_follow.Response': 'ccr/resume_follow/ResumeFollowIndexResponse.ts#L22-L24', 'ccr.stats.AutoFollowStats': 'ccr/stats/types.ts.ts#L32-L38', 'ccr.stats.AutoFollowedCluster': 'ccr/stats/types.ts.ts#L26-L30', 'ccr.stats.FollowStats': 'ccr/stats/types.ts.ts#L40-L42', -'ccr.stats.Request': 'ccr/stats/CcrStatsRequest.ts#L22-L27', +'ccr.stats.Request': 'ccr/stats/CcrStatsRequest.ts#L22-L29', 'ccr.stats.Response': 'ccr/stats/CcrStatsResponse.ts#L22-L27', -'ccr.unfollow.Request': 'ccr/unfollow/UnfollowIndexRequest.ts#L23-L32', +'ccr.unfollow.Request': 'ccr/unfollow/UnfollowIndexRequest.ts#L23-L39', 'ccr.unfollow.Response': 'ccr/unfollow/UnfollowIndexResponse.ts#L22-L24', 'cluster._types.ComponentTemplate': 'cluster/_types/ComponentTemplate.ts#L27-L30', 'cluster._types.ComponentTemplateNode': 'cluster/_types/ComponentTemplate.ts#L32-L37', @@ -1148,14 +1148,14 @@ 'cluster.allocation_explain.DiskUsage': 'cluster/allocation_explain/types.ts#L63-L70', 'cluster.allocation_explain.NodeAllocationExplanation': 'cluster/allocation_explain/types.ts#L103-L117', 'cluster.allocation_explain.NodeDiskUsage': 'cluster/allocation_explain/types.ts#L57-L61', -'cluster.allocation_explain.Request': 'cluster/allocation_explain/ClusterAllocationExplainRequest.ts#L24-L61', +'cluster.allocation_explain.Request': 'cluster/allocation_explain/ClusterAllocationExplainRequest.ts#L24-L66', 'cluster.allocation_explain.ReservedSize': 'cluster/allocation_explain/types.ts#L72-L77', 'cluster.allocation_explain.Response': 'cluster/allocation_explain/ClusterAllocationExplainResponse.ts#L32-L64', 'cluster.allocation_explain.UnassignedInformation': 'cluster/allocation_explain/types.ts#L128-L136', 'cluster.allocation_explain.UnassignedInformationReason': 'cluster/allocation_explain/types.ts#L138-L157', 'cluster.delete_component_template.Request': 'cluster/delete_component_template/ClusterDeleteComponentTemplateRequest.ts#L24-L56', 'cluster.delete_component_template.Response': 'cluster/delete_component_template/ClusterDeleteComponentTemplateResponse.ts#L22-L24', -'cluster.delete_voting_config_exclusions.Request': 'cluster/delete_voting_config_exclusions/ClusterDeleteVotingConfigExclusionsRequest.ts#L22-L40', +'cluster.delete_voting_config_exclusions.Request': 'cluster/delete_voting_config_exclusions/ClusterDeleteVotingConfigExclusionsRequest.ts#L22-L43', 'cluster.exists_component_template.Request': 'cluster/exists_component_template/ClusterComponentTemplateExistsRequest.ts#L24-L56', 'cluster.get_component_template.Request': 'cluster/get_component_template/ClusterGetComponentTemplateRequest.ts#L24-L67', 'cluster.get_component_template.Response': 'cluster/get_component_template/ClusterGetComponentTemplateResponse.ts#L22-L24', @@ -1163,18 +1163,18 @@ 'cluster.get_settings.Response': 'cluster/get_settings/ClusterGetSettingsResponse.ts#L23-L29', 'cluster.health.HealthResponseBody': 'cluster/health/ClusterHealthResponse.ts#L39-L74', 'cluster.health.IndexHealthStats': 'cluster/health/types.ts#L24-L35', -'cluster.health.Request': 'cluster/health/ClusterHealthRequest.ts#L32-L99', +'cluster.health.Request': 'cluster/health/ClusterHealthRequest.ts#L32-L107', 'cluster.health.Response': 'cluster/health/ClusterHealthResponse.ts#L26-L37', 'cluster.health.ShardHealthStats': 'cluster/health/types.ts#L37-L45', 'cluster.info.Request': 'cluster/info/ClusterInfoRequest.ts#L23-L36', 'cluster.info.Response': 'cluster/info/ClusterInfoResponse.ts#L26-L34', 'cluster.pending_tasks.PendingTask': 'cluster/pending_tasks/types.ts#L23-L47', -'cluster.pending_tasks.Request': 'cluster/pending_tasks/ClusterPendingTasksRequest.ts#L23-L48', +'cluster.pending_tasks.Request': 'cluster/pending_tasks/ClusterPendingTasksRequest.ts#L23-L50', 'cluster.pending_tasks.Response': 'cluster/pending_tasks/ClusterPendingTasksResponse.ts#L22-L24', -'cluster.post_voting_config_exclusions.Request': 'cluster/post_voting_config_exclusions/ClusterPostVotingConfigExclusionsRequest.ts#L24-L50', +'cluster.post_voting_config_exclusions.Request': 'cluster/post_voting_config_exclusions/ClusterPostVotingConfigExclusionsRequest.ts#L24-L69', 'cluster.put_component_template.Request': 'cluster/put_component_template/ClusterPutComponentTemplateRequest.ts#L25-L95', 'cluster.put_component_template.Response': 'cluster/put_component_template/ClusterPutComponentTemplateResponse.ts#L22-L24', -'cluster.put_settings.Request': 'cluster/put_settings/ClusterPutSettingsRequest.ts#L25-L43', +'cluster.put_settings.Request': 'cluster/put_settings/ClusterPutSettingsRequest.ts#L25-L61', 'cluster.put_settings.Response': 'cluster/put_settings/ClusterPutSettingsResponse.ts#L23-L29', 'cluster.remote_info.ClusterRemoteInfo': 'cluster/remote_info/ClusterRemoteInfoResponse.ts#L29-L30', 'cluster.remote_info.ClusterRemoteProxyInfo': 'cluster/remote_info/ClusterRemoteInfoResponse.ts#L42-L51', @@ -1186,12 +1186,12 @@ 'cluster.reroute.CommandAllocateReplicaAction': 'cluster/reroute/types.ts#L69-L76', 'cluster.reroute.CommandCancelAction': 'cluster/reroute/types.ts#L45-L50', 'cluster.reroute.CommandMoveAction': 'cluster/reroute/types.ts#L60-L67', -'cluster.reroute.Request': 'cluster/reroute/ClusterRerouteRequest.ts#L25-L70', +'cluster.reroute.Request': 'cluster/reroute/ClusterRerouteRequest.ts#L25-L85', 'cluster.reroute.RerouteDecision': 'cluster/reroute/types.ts#L86-L90', 'cluster.reroute.RerouteExplanation': 'cluster/reroute/types.ts#L92-L96', 'cluster.reroute.RerouteParameters': 'cluster/reroute/types.ts#L98-L105', 'cluster.reroute.Response': 'cluster/reroute/ClusterRerouteResponse.ts#L23-L34', -'cluster.state.Request': 'cluster/state/ClusterStateRequest.ts#L29-L56', +'cluster.state.Request': 'cluster/state/ClusterStateRequest.ts#L29-L75', 'cluster.state.Response': 'cluster/state/ClusterStateResponse.ts#L22-L29', 'cluster.stats.CharFilterTypes': 'cluster/stats/types.ts#L228-L261', 'cluster.stats.ClusterFileSystem': 'cluster/stats/types.ts#L34-L49', @@ -1352,9 +1352,9 @@ 'esql.query.Request': 'esql/query/QueryRequest.ts#L26-L91', 'esql.query.Response': 'esql/query/QueryResponse.ts#L22-L25', 'features._types.Feature': 'features/_types/Feature.ts#L20-L23', -'features.get_features.Request': 'features/get_features/GetFeaturesRequest.ts#L22-L26', +'features.get_features.Request': 'features/get_features/GetFeaturesRequest.ts#L22-L37', 'features.get_features.Response': 'features/get_features/GetFeaturesResponse.ts#L22-L26', -'features.reset_features.Request': 'features/reset_features/ResetFeaturesRequest.ts#L22-L27', +'features.reset_features.Request': 'features/reset_features/ResetFeaturesRequest.ts#L22-L44', 'features.reset_features.Response': 'features/reset_features/ResetFeaturesResponse.ts#L22-L26', 'fleet.search.Request': 'fleet/search/SearchRequest.ts#L54-L259', 'fleet.search.Response': 'fleet/search/SearchResponse.ts#L33-L50', @@ -1387,27 +1387,27 @@ 'ilm.explain_lifecycle.LifecycleExplainManaged': 'ilm/explain_lifecycle/types.ts#L26-L52', 'ilm.explain_lifecycle.LifecycleExplainPhaseExecution': 'ilm/explain_lifecycle/types.ts#L64-L68', 'ilm.explain_lifecycle.LifecycleExplainUnmanaged': 'ilm/explain_lifecycle/types.ts#L54-L57', -'ilm.explain_lifecycle.Request': 'ilm/explain_lifecycle/ExplainLifecycleRequest.ts#L24-L58', +'ilm.explain_lifecycle.Request': 'ilm/explain_lifecycle/ExplainLifecycleRequest.ts#L24-L62', 'ilm.explain_lifecycle.Response': 'ilm/explain_lifecycle/ExplainLifecycleResponse.ts#L24-L28', 'ilm.get_lifecycle.Lifecycle': 'ilm/get_lifecycle/types.ts#L24-L28', 'ilm.get_lifecycle.Request': 'ilm/get_lifecycle/GetLifecycleRequest.ts#L24-L50', 'ilm.get_lifecycle.Response': 'ilm/get_lifecycle/GetLifecycleResponse.ts#L23-L26', -'ilm.get_status.Request': 'ilm/get_status/GetIlmStatusRequest.ts#L22-L26', +'ilm.get_status.Request': 'ilm/get_status/GetIlmStatusRequest.ts#L22-L29', 'ilm.get_status.Response': 'ilm/get_status/GetIlmStatusResponse.ts#L22-L24', -'ilm.migrate_to_data_tiers.Request': 'ilm/migrate_to_data_tiers/Request.ts#L22-L43', +'ilm.migrate_to_data_tiers.Request': 'ilm/migrate_to_data_tiers/Request.ts#L22-L54', 'ilm.migrate_to_data_tiers.Response': 'ilm/migrate_to_data_tiers/Response.ts#L22-L32', -'ilm.move_to_step.Request': 'ilm/move_to_step/MoveToStepRequest.ts#L24-L36', +'ilm.move_to_step.Request': 'ilm/move_to_step/MoveToStepRequest.ts#L24-L51', 'ilm.move_to_step.Response': 'ilm/move_to_step/MoveToStepResponse.ts#L22-L24', 'ilm.move_to_step.StepKey': 'ilm/move_to_step/types.ts#L20-L25', -'ilm.put_lifecycle.Request': 'ilm/put_lifecycle/PutLifecycleRequest.ts#L25-L55', +'ilm.put_lifecycle.Request': 'ilm/put_lifecycle/PutLifecycleRequest.ts#L25-L59', 'ilm.put_lifecycle.Response': 'ilm/put_lifecycle/PutLifecycleResponse.ts#L22-L24', -'ilm.remove_policy.Request': 'ilm/remove_policy/RemovePolicyRequest.ts#L23-L31', +'ilm.remove_policy.Request': 'ilm/remove_policy/RemovePolicyRequest.ts#L23-L35', 'ilm.remove_policy.Response': 'ilm/remove_policy/RemovePolicyResponse.ts#L22-L27', -'ilm.retry.Request': 'ilm/retry/RetryIlmRequest.ts#L23-L31', +'ilm.retry.Request': 'ilm/retry/RetryIlmRequest.ts#L23-L36', 'ilm.retry.Response': 'ilm/retry/RetryIlmResponse.ts#L22-L24', -'ilm.start.Request': 'ilm/start/StartIlmRequest.ts#L23-L32', +'ilm.start.Request': 'ilm/start/StartIlmRequest.ts#L23-L37', 'ilm.start.Response': 'ilm/start/StartIlmResponse.ts#L22-L24', -'ilm.stop.Request': 'ilm/stop/StopIlmRequest.ts#L23-L32', +'ilm.stop.Request': 'ilm/stop/StopIlmRequest.ts#L23-L39', 'ilm.stop.Response': 'ilm/stop/StopIlmResponse.ts#L22-L24', 'indices._types.Alias': 'indices/_types/Alias.ts#L23-L53', 'indices._types.AliasDefinition': 'indices/_types/AliasDefinition.ts#L22-L54', @@ -1502,13 +1502,13 @@ 'indices.analyze.Request': 'indices/analyze/IndicesAnalyzeRequest.ts#L27-L93', 'indices.analyze.Response': 'indices/analyze/IndicesAnalyzeResponse.ts#L22-L27', 'indices.analyze.TokenDetail': 'indices/analyze/types.ts#L71-L74', -'indices.clear_cache.Request': 'indices/clear_cache/IndicesIndicesClearCacheRequest.ts#L23-L77', +'indices.clear_cache.Request': 'indices/clear_cache/IndicesIndicesClearCacheRequest.ts#L23-L78', 'indices.clear_cache.Response': 'indices/clear_cache/IndicesClearCacheResponse.ts#L22-L24', -'indices.clone.Request': 'indices/clone/IndicesCloneRequest.ts#L27-L75', +'indices.clone.Request': 'indices/clone/IndicesCloneRequest.ts#L27-L98', 'indices.clone.Response': 'indices/clone/IndicesCloneResponse.ts#L22-L28', 'indices.close.CloseIndexResult': 'indices/close/CloseIndexResponse.ts#L32-L35', 'indices.close.CloseShardResult': 'indices/close/CloseIndexResponse.ts#L37-L39', -'indices.close.Request': 'indices/close/CloseIndexRequest.ts#L24-L77', +'indices.close.Request': 'indices/close/CloseIndexRequest.ts#L24-L94', 'indices.close.Response': 'indices/close/CloseIndexResponse.ts#L24-L30', 'indices.create.Request': 'indices/create/IndicesCreateRequest.ts#L28-L82', 'indices.create.Response': 'indices/create/IndicesCreateResponse.ts#L22-L28', @@ -1529,9 +1529,9 @@ 'indices.delete_index_template.Response': 'indices/delete_index_template/IndicesDeleteIndexTemplateResponse.ts#L22-L24', 'indices.delete_template.Request': 'indices/delete_template/IndicesDeleteTemplateRequest.ts#L24-L52', 'indices.delete_template.Response': 'indices/delete_template/IndicesDeleteTemplateResponse.ts#L22-L24', -'indices.disk_usage.Request': 'indices/disk_usage/IndicesDiskUsageRequest.ts#L23-L71', +'indices.disk_usage.Request': 'indices/disk_usage/IndicesDiskUsageRequest.ts#L23-L74', 'indices.disk_usage.Response': 'indices/disk_usage/IndicesDiskUsageResponse.ts#L22-L25', -'indices.downsample.Request': 'indices/downsample/Request.ts#L24-L44', +'indices.downsample.Request': 'indices/downsample/Request.ts#L24-L51', 'indices.downsample.Response': 'indices/downsample/Response.ts#L22-L25', 'indices.exists.Request': 'indices/exists/IndicesExistsRequest.ts#L23-L73', 'indices.exists_alias.Request': 'indices/exists_alias/IndicesExistsAliasRequest.ts#L24-L70', @@ -1543,14 +1543,14 @@ 'indices.field_usage_stats.FieldSummary': 'indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L57-L66', 'indices.field_usage_stats.FieldsUsageBody': 'indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L32-L39', 'indices.field_usage_stats.InvertedIndex': 'indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L68-L76', -'indices.field_usage_stats.Request': 'indices/field_usage_stats/IndicesFieldUsageStatsRequest.ts#L29-L84', +'indices.field_usage_stats.Request': 'indices/field_usage_stats/IndicesFieldUsageStatsRequest.ts#L29-L87', 'indices.field_usage_stats.Response': 'indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L28-L30', 'indices.field_usage_stats.ShardsStats': 'indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L52-L55', 'indices.field_usage_stats.UsageStatsIndex': 'indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L41-L43', 'indices.field_usage_stats.UsageStatsShards': 'indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L45-L50', -'indices.flush.Request': 'indices/flush/IndicesFlushRequest.ts#L23-L71', +'indices.flush.Request': 'indices/flush/IndicesFlushRequest.ts#L23-L81', 'indices.flush.Response': 'indices/flush/IndicesFlushResponse.ts#L22-L24', -'indices.forcemerge.Request': 'indices/forcemerge/IndicesForceMergeRequest.ts#L24-L42', +'indices.forcemerge.Request': 'indices/forcemerge/IndicesForceMergeRequest.ts#L24-L56', 'indices.forcemerge.Response': 'indices/forcemerge/IndicesForceMergeResponse.ts#L22-L24', 'indices.forcemerge._types.ForceMergeResponseBody': 'indices/forcemerge/_types/response.ts#L22-L28', 'indices.get.Feature': 'indices/get/IndicesGetRequest.ts#L91-L95', @@ -1585,7 +1585,7 @@ 'indices.modify_data_stream.Response': 'indices/modify_data_stream/IndicesModifyDataStreamResponse.ts#L22-L24', 'indices.open.Request': 'indices/open/IndicesOpenRequest.ts#L24-L82', 'indices.open.Response': 'indices/open/IndicesOpenResponse.ts#L20-L25', -'indices.promote_data_stream.Request': 'indices/promote_data_stream/IndicesPromoteDataStreamRequest.ts#L24-L39', +'indices.promote_data_stream.Request': 'indices/promote_data_stream/IndicesPromoteDataStreamRequest.ts#L24-L50', 'indices.promote_data_stream.Response': 'indices/promote_data_stream/IndicesPromoteDataStreamResponse.ts#L22-L25', 'indices.put_alias.Request': 'indices/put_alias/IndicesPutAliasRequest.ts#L25-L92', 'indices.put_alias.Response': 'indices/put_alias/IndicesPutAliasResponse.ts#L22-L24', @@ -1598,7 +1598,7 @@ 'indices.put_mapping.Response': 'indices/put_mapping/IndicesPutMappingResponse.ts#L22-L24', 'indices.put_settings.Request': 'indices/put_settings/IndicesPutSettingsRequest.ts#L25-L93', 'indices.put_settings.Response': 'indices/put_settings/IndicesPutSettingsResponse.ts#L22-L24', -'indices.put_template.Request': 'indices/put_template/IndicesPutTemplateRequest.ts#L29-L94', +'indices.put_template.Request': 'indices/put_template/IndicesPutTemplateRequest.ts#L29-L106', 'indices.put_template.Response': 'indices/put_template/IndicesPutTemplateResponse.ts#L22-L24', 'indices.recovery.FileDetails': 'indices/recovery/types.ts#L50-L54', 'indices.recovery.RecoveryBytes': 'indices/recovery/types.ts#L38-L48', @@ -1607,7 +1607,7 @@ 'indices.recovery.RecoveryOrigin': 'indices/recovery/types.ts#L76-L89', 'indices.recovery.RecoveryStartStatus': 'indices/recovery/types.ts#L91-L96', 'indices.recovery.RecoveryStatus': 'indices/recovery/types.ts#L98-L100', -'indices.recovery.Request': 'indices/recovery/IndicesRecoveryRequest.ts#L23-L51', +'indices.recovery.Request': 'indices/recovery/IndicesRecoveryRequest.ts#L23-L70', 'indices.recovery.Response': 'indices/recovery/IndicesRecoveryResponse.ts#L24-L27', 'indices.recovery.ShardRecovery': 'indices/recovery/types.ts#L118-L135', 'indices.recovery.TranslogStatus': 'indices/recovery/types.ts#L102-L109', @@ -1616,9 +1616,9 @@ 'indices.refresh.Response': 'indices/refresh/IndicesRefreshResponse.ts#L22-L24', 'indices.reload_search_analyzers.ReloadDetails': 'indices/reload_search_analyzers/types.ts#L27-L31', 'indices.reload_search_analyzers.ReloadResult': 'indices/reload_search_analyzers/types.ts#L22-L25', -'indices.reload_search_analyzers.Request': 'indices/reload_search_analyzers/ReloadSearchAnalyzersRequest.ts#L23-L36', +'indices.reload_search_analyzers.Request': 'indices/reload_search_analyzers/ReloadSearchAnalyzersRequest.ts#L23-L51', 'indices.reload_search_analyzers.Response': 'indices/reload_search_analyzers/ReloadSearchAnalyzersResponse.ts#L22-L24', -'indices.resolve_cluster.Request': 'indices/resolve_cluster/ResolveClusterRequest.ts#L23-L62', +'indices.resolve_cluster.Request': 'indices/resolve_cluster/ResolveClusterRequest.ts#L23-L76', 'indices.resolve_cluster.ResolveClusterInfo': 'indices/resolve_cluster/ResolveClusterResponse.ts#L29-L55', 'indices.resolve_cluster.Response': 'indices/resolve_cluster/ResolveClusterResponse.ts#L24-L27', 'indices.resolve_index.Request': 'indices/resolve_index/ResolveIndexRequest.ts#L23-L61', @@ -1630,13 +1630,13 @@ 'indices.rollover.Response': 'indices/rollover/IndicesRolloverResponse.ts#L22-L32', 'indices.rollover.RolloverConditions': 'indices/rollover/types.ts#L24-L40', 'indices.segments.IndexSegment': 'indices/segments/types.ts#L24-L26', -'indices.segments.Request': 'indices/segments/IndicesSegmentsRequest.ts#L23-L59', +'indices.segments.Request': 'indices/segments/IndicesSegmentsRequest.ts#L23-L61', 'indices.segments.Response': 'indices/segments/IndicesSegmentsResponse.ts#L24-L29', 'indices.segments.Segment': 'indices/segments/types.ts#L28-L38', 'indices.segments.ShardSegmentRouting': 'indices/segments/types.ts#L40-L44', 'indices.segments.ShardsSegment': 'indices/segments/types.ts#L46-L51', 'indices.shard_stores.IndicesShardStores': 'indices/shard_stores/types.ts#L25-L27', -'indices.shard_stores.Request': 'indices/shard_stores/IndicesShardStoresRequest.ts#L24-L60', +'indices.shard_stores.Request': 'indices/shard_stores/IndicesShardStoresRequest.ts#L24-L71', 'indices.shard_stores.Response': 'indices/shard_stores/IndicesShardStoresResponse.ts#L24-L26', 'indices.shard_stores.ShardStore': 'indices/shard_stores/types.ts#L29-L36', 'indices.shard_stores.ShardStoreAllocation': 'indices/shard_stores/types.ts#L47-L51', @@ -1644,7 +1644,7 @@ 'indices.shard_stores.ShardStoreNode': 'indices/shard_stores/types.ts#L38-L45', 'indices.shard_stores.ShardStoreStatus': 'indices/shard_stores/types.ts#L62-L71', 'indices.shard_stores.ShardStoreWrapper': 'indices/shard_stores/types.ts#L58-L60', -'indices.shrink.Request': 'indices/shrink/IndicesShrinkRequest.ts#L27-L75', +'indices.shrink.Request': 'indices/shrink/IndicesShrinkRequest.ts#L27-L107', 'indices.shrink.Response': 'indices/shrink/IndicesShrinkResponse.ts#L22-L28', 'indices.simulate_index_template.Request': 'indices/simulate_index_template/IndicesSimulateIndexTemplateRequest.ts#L24-L50', 'indices.simulate_index_template.Response': 'indices/simulate_index_template/IndicesSimulateIndexTemplateResponse.ts#L25-L30', @@ -1652,13 +1652,13 @@ 'indices.simulate_template.Request': 'indices/simulate_template/IndicesSimulateTemplateRequest.ts#L27-L120', 'indices.simulate_template.Response': 'indices/simulate_template/IndicesSimulateTemplateResponse.ts#L26-L31', 'indices.simulate_template.Template': 'indices/simulate_template/IndicesSimulateTemplateResponse.ts#L33-L37', -'indices.split.Request': 'indices/split/IndicesSplitRequest.ts#L27-L74', +'indices.split.Request': 'indices/split/IndicesSplitRequest.ts#L27-L98', 'indices.split.Response': 'indices/split/IndicesSplitResponse.ts#L22-L28', 'indices.stats.IndexMetadataState': 'indices/stats/types.ts#L225-L232', 'indices.stats.IndexStats': 'indices/stats/types.ts#L52-L93', 'indices.stats.IndicesStats': 'indices/stats/types.ts#L95-L110', 'indices.stats.MappingStats': 'indices/stats/types.ts#L186-L190', -'indices.stats.Request': 'indices/stats/IndicesStatsRequest.ts#L29-L85', +'indices.stats.Request': 'indices/stats/IndicesStatsRequest.ts#L29-L94', 'indices.stats.Response': 'indices/stats/IndicesStatsResponse.ts#L24-L30', 'indices.stats.ShardCommit': 'indices/stats/types.ts#L112-L117', 'indices.stats.ShardFileSizeInfo': 'indices/stats/types.ts#L124-L131', @@ -1671,7 +1671,7 @@ 'indices.stats.ShardSequenceNumber': 'indices/stats/types.ts#L176-L180', 'indices.stats.ShardStats': 'indices/stats/types.ts#L192-L223', 'indices.stats.ShardsTotalStats': 'indices/stats/types.ts#L182-L184', -'indices.unfreeze.Request': 'indices/unfreeze/IndicesUnfreezeRequest.ts#L24-L75', +'indices.unfreeze.Request': 'indices/unfreeze/IndicesUnfreezeRequest.ts#L24-L77', 'indices.unfreeze.Response': 'indices/unfreeze/IndicesUnfreezeResponse.ts#L20-L25', 'indices.update_aliases.Action': 'indices/update_aliases/types.ts#L23-L39', 'indices.update_aliases.AddAction': 'indices/update_aliases/types.ts#L41-L95', @@ -1698,7 +1698,7 @@ 'inference.get.Response': 'inference/get/GetResponse.ts#L22-L26', 'inference.inference.Request': 'inference/inference/InferenceRequest.ts#L26-L66', 'inference.inference.Response': 'inference/inference/InferenceResponse.ts#L22-L24', -'inference.put.Request': 'inference/put/PutRequest.ts#L25-L44', +'inference.put.Request': 'inference/put/PutRequest.ts#L25-L54', 'inference.put.Response': 'inference/put/PutResponse.ts#L22-L24', 'ingest._types.AppendProcessor': 'ingest/_types/Processors.ts#L328-L343', 'ingest._types.AttachmentProcessor': 'ingest/_types/Processors.ts#L345-L386', @@ -1791,41 +1791,42 @@ 'license._types.License': 'license/_types/License.ts#L42-L53', 'license._types.LicenseStatus': 'license/_types/License.ts#L35-L40', 'license._types.LicenseType': 'license/_types/License.ts#L23-L33', -'license.delete.Request': 'license/delete/DeleteLicenseRequest.ts#L22-L26', +'license.delete.Request': 'license/delete/DeleteLicenseRequest.ts#L22-L32', 'license.delete.Response': 'license/delete/DeleteLicenseResponse.ts#L22-L24', 'license.get.LicenseInformation': 'license/get/types.ts#L25-L38', -'license.get.Request': 'license/get/GetLicenseRequest.ts#L22-L45', +'license.get.Request': 'license/get/GetLicenseRequest.ts#L22-L47', 'license.get.Response': 'license/get/GetLicenseResponse.ts#L22-L24', -'license.get_basic_status.Request': 'license/get_basic_status/GetBasicLicenseStatusRequest.ts#L22-L26', +'license.get_basic_status.Request': 'license/get_basic_status/GetBasicLicenseStatusRequest.ts#L22-L28', 'license.get_basic_status.Response': 'license/get_basic_status/GetBasicLicenseStatusResponse.ts#L20-L22', -'license.get_trial_status.Request': 'license/get_trial_status/GetTrialLicenseStatusRequest.ts#L22-L26', +'license.get_trial_status.Request': 'license/get_trial_status/GetTrialLicenseStatusRequest.ts#L22-L28', 'license.get_trial_status.Response': 'license/get_trial_status/GetTrialLicenseStatusResponse.ts#L20-L22', 'license.post.Acknowledgement': 'license/post/types.ts#L20-L23', -'license.post.Request': 'license/post/PostLicenseRequest.ts#L23-L43', +'license.post.Request': 'license/post/PostLicenseRequest.ts#L23-L51', 'license.post.Response': 'license/post/PostLicenseResponse.ts#L23-L29', -'license.post_start_basic.Request': 'license/post_start_basic/StartBasicLicenseRequest.ts#L22-L32', +'license.post_start_basic.Request': 'license/post_start_basic/StartBasicLicenseRequest.ts#L22-L40', 'license.post_start_basic.Response': 'license/post_start_basic/StartBasicLicenseResponse.ts#L23-L31', -'license.post_start_trial.Request': 'license/post_start_trial/StartTrialLicenseRequest.ts#L22-L33', +'license.post_start_trial.Request': 'license/post_start_trial/StartTrialLicenseRequest.ts#L22-L39', 'license.post_start_trial.Response': 'license/post_start_trial/StartTrialLicenseResponse.ts#L22-L29', -'logstash._types.Pipeline': 'logstash/_types/Pipeline.ts#L60-L92', +'logstash._types.Pipeline': 'logstash/_types/Pipeline.ts#L60-L91', 'logstash._types.PipelineMetadata': 'logstash/_types/Pipeline.ts#L23-L26', 'logstash._types.PipelineSettings': 'logstash/_types/Pipeline.ts#L28-L59', -'logstash.delete_pipeline.Request': 'logstash/delete_pipeline/LogstashDeletePipelineRequest.ts#L23-L37', -'logstash.get_pipeline.Request': 'logstash/get_pipeline/LogstashGetPipelineRequest.ts#L23-L37', +'logstash.delete_pipeline.Request': 'logstash/delete_pipeline/LogstashDeletePipelineRequest.ts#L23-L40', +'logstash.get_pipeline.Request': 'logstash/get_pipeline/LogstashGetPipelineRequest.ts#L23-L40', 'logstash.get_pipeline.Response': 'logstash/get_pipeline/LogstashGetPipelineResponse.ts#L24-L27', -'logstash.put_pipeline.Request': 'logstash/put_pipeline/LogstashPutPipelineRequest.ts#L24-L39', +'logstash.put_pipeline.Request': 'logstash/put_pipeline/LogstashPutPipelineRequest.ts#L24-L44', 'migration.deprecations.Deprecation': 'migration/deprecations/types.ts#L32-L40', 'migration.deprecations.DeprecationLevel': 'migration/deprecations/types.ts#L23-L30', -'migration.deprecations.Request': 'migration/deprecations/DeprecationInfoRequest.ts#L23-L32', +'migration.deprecations.Request': 'migration/deprecations/DeprecationInfoRequest.ts#L23-L37', 'migration.deprecations.Response': 'migration/deprecations/DeprecationInfoResponse.ts#L23-L31', 'migration.get_feature_upgrade_status.MigrationFeature': 'migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L37-L42', 'migration.get_feature_upgrade_status.MigrationFeatureIndexInfo': 'migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L44-L48', 'migration.get_feature_upgrade_status.MigrationStatus': 'migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L30-L35', -'migration.get_feature_upgrade_status.Request': 'migration/get_feature_upgrade_status/GetFeatureUpgradeStatusRequest.ts#L22-L27', +'migration.get_feature_upgrade_status.Request': 'migration/get_feature_upgrade_status/GetFeatureUpgradeStatusRequest.ts#L22-L34', 'migration.get_feature_upgrade_status.Response': 'migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L23-L28', 'migration.post_feature_upgrade.MigrationFeature': 'migration/post_feature_upgrade/PostFeatureUpgradeResponse.ts#L27-L29', -'migration.post_feature_upgrade.Request': 'migration/post_feature_upgrade/PostFeatureUpgradeRequest.ts#L22-L27', +'migration.post_feature_upgrade.Request': 'migration/post_feature_upgrade/PostFeatureUpgradeRequest.ts#L22-L35', 'migration.post_feature_upgrade.Response': 'migration/post_feature_upgrade/PostFeatureUpgradeResponse.ts#L20-L25', +'ml._types.AdaptiveAllocationsSettings': 'ml/_types/TrainedModel.ts#L109-L113', 'ml._types.AnalysisConfig': 'ml/_types/Analysis.ts#L29-L77', 'ml._types.AnalysisConfigRead': 'ml/_types/Analysis.ts#L79-L148', 'ml._types.AnalysisLimits': 'ml/_types/Analysis.ts#L161-L172', @@ -1840,46 +1841,47 @@ 'ml._types.CalendarEvent': 'ml/_types/CalendarEvent.ts#L24-L44', 'ml._types.CategorizationAnalyzer': 'ml/_types/Analysis.ts#L181-L182', 'ml._types.CategorizationAnalyzerDefinition': 'ml/_types/Analysis.ts#L184-L198', -'ml._types.CategorizationStatus': 'ml/_types/Model.ts#L83-L86', +'ml._types.CategorizationStatus': 'ml/_types/Model.ts#L84-L87', 'ml._types.Category': 'ml/_types/Category.ts#L23-L49', -'ml._types.ChunkingConfig': 'ml/_types/Datafeed.ts#L241-L254', -'ml._types.ChunkingMode': 'ml/_types/Datafeed.ts#L235-L239', +'ml._types.ChunkingConfig': 'ml/_types/Datafeed.ts#L251-L264', +'ml._types.ChunkingMode': 'ml/_types/Datafeed.ts#L245-L249', 'ml._types.ClassificationInferenceOptions': 'ml/_types/inference.ts#L93-L108', +'ml._types.CommonTokenizationConfig': 'ml/_types/inference.ts#L133-L159', 'ml._types.ConditionOperator': 'ml/_types/Rule.ts#L74-L79', 'ml._types.DataCounts': 'ml/_types/Job.ts#L352-L372', 'ml._types.DataDescription': 'ml/_types/Job.ts#L374-L390', -'ml._types.Datafeed': 'ml/_types/Datafeed.ts#L36-L60', +'ml._types.Datafeed': 'ml/_types/Datafeed.ts#L37-L61', 'ml._types.DatafeedAuthorization': 'ml/_types/Authorization.ts#L31-L43', -'ml._types.DatafeedConfig': 'ml/_types/Datafeed.ts#L62-L119', -'ml._types.DatafeedRunningState': 'ml/_types/Datafeed.ts#L200-L214', -'ml._types.DatafeedState': 'ml/_types/Datafeed.ts#L135-L140', -'ml._types.DatafeedStats': 'ml/_types/Datafeed.ts#L142-L171', -'ml._types.DatafeedTimingStats': 'ml/_types/Datafeed.ts#L173-L198', -'ml._types.DataframeAnalysis': 'ml/_types/DataframeAnalytics.ts#L133-L212', -'ml._types.DataframeAnalysisAnalyzedFields': 'ml/_types/DataframeAnalytics.ts#L237-L243', -'ml._types.DataframeAnalysisClassification': 'ml/_types/DataframeAnalytics.ts#L226-L235', -'ml._types.DataframeAnalysisContainer': 'ml/_types/DataframeAnalytics.ts#L83-L100', -'ml._types.DataframeAnalysisFeatureProcessor': 'ml/_types/DataframeAnalytics.ts#L245-L257', -'ml._types.DataframeAnalysisFeatureProcessorFrequencyEncoding': 'ml/_types/DataframeAnalytics.ts#L259-L266', -'ml._types.DataframeAnalysisFeatureProcessorMultiEncoding': 'ml/_types/DataframeAnalytics.ts#L268-L271', -'ml._types.DataframeAnalysisFeatureProcessorNGramEncoding': 'ml/_types/DataframeAnalytics.ts#L273-L285', -'ml._types.DataframeAnalysisFeatureProcessorOneHotEncoding': 'ml/_types/DataframeAnalytics.ts#L287-L292', -'ml._types.DataframeAnalysisFeatureProcessorTargetMeanEncoding': 'ml/_types/DataframeAnalytics.ts#L294-L303', -'ml._types.DataframeAnalysisOutlierDetection': 'ml/_types/DataframeAnalytics.ts#L102-L131', -'ml._types.DataframeAnalysisRegression': 'ml/_types/DataframeAnalytics.ts#L214-L224', -'ml._types.DataframeAnalytics': 'ml/_types/DataframeAnalytics.ts#L323-L343', +'ml._types.DatafeedConfig': 'ml/_types/Datafeed.ts#L63-L120', +'ml._types.DatafeedRunningState': 'ml/_types/Datafeed.ts#L210-L224', +'ml._types.DatafeedState': 'ml/_types/Datafeed.ts#L136-L141', +'ml._types.DatafeedStats': 'ml/_types/Datafeed.ts#L143-L172', +'ml._types.DatafeedTimingStats': 'ml/_types/Datafeed.ts#L174-L202', +'ml._types.DataframeAnalysis': 'ml/_types/DataframeAnalytics.ts#L134-L213', +'ml._types.DataframeAnalysisAnalyzedFields': 'ml/_types/DataframeAnalytics.ts#L238-L244', +'ml._types.DataframeAnalysisClassification': 'ml/_types/DataframeAnalytics.ts#L227-L236', +'ml._types.DataframeAnalysisContainer': 'ml/_types/DataframeAnalytics.ts#L84-L101', +'ml._types.DataframeAnalysisFeatureProcessor': 'ml/_types/DataframeAnalytics.ts#L246-L258', +'ml._types.DataframeAnalysisFeatureProcessorFrequencyEncoding': 'ml/_types/DataframeAnalytics.ts#L260-L267', +'ml._types.DataframeAnalysisFeatureProcessorMultiEncoding': 'ml/_types/DataframeAnalytics.ts#L269-L272', +'ml._types.DataframeAnalysisFeatureProcessorNGramEncoding': 'ml/_types/DataframeAnalytics.ts#L274-L286', +'ml._types.DataframeAnalysisFeatureProcessorOneHotEncoding': 'ml/_types/DataframeAnalytics.ts#L288-L293', +'ml._types.DataframeAnalysisFeatureProcessorTargetMeanEncoding': 'ml/_types/DataframeAnalytics.ts#L295-L304', +'ml._types.DataframeAnalysisOutlierDetection': 'ml/_types/DataframeAnalytics.ts#L103-L132', +'ml._types.DataframeAnalysisRegression': 'ml/_types/DataframeAnalytics.ts#L215-L225', +'ml._types.DataframeAnalytics': 'ml/_types/DataframeAnalytics.ts#L325-L345', 'ml._types.DataframeAnalyticsAuthorization': 'ml/_types/Authorization.ts#L45-L57', -'ml._types.DataframeAnalyticsDestination': 'ml/_types/DataframeAnalytics.ts#L76-L81', -'ml._types.DataframeAnalyticsFieldSelection': 'ml/_types/DataframeAnalytics.ts#L54-L67', -'ml._types.DataframeAnalyticsMemoryEstimation': 'ml/_types/DataframeAnalytics.ts#L69-L74', -'ml._types.DataframeAnalyticsSource': 'ml/_types/DataframeAnalytics.ts#L38-L52', -'ml._types.DataframeAnalyticsStatsContainer': 'ml/_types/DataframeAnalytics.ts#L372-L380', -'ml._types.DataframeAnalyticsStatsDataCounts': 'ml/_types/DataframeAnalytics.ts#L363-L370', -'ml._types.DataframeAnalyticsStatsHyperparameters': 'ml/_types/DataframeAnalytics.ts#L382-L401', -'ml._types.DataframeAnalyticsStatsMemoryUsage': 'ml/_types/DataframeAnalytics.ts#L352-L361', -'ml._types.DataframeAnalyticsStatsOutlierDetection': 'ml/_types/DataframeAnalytics.ts#L403-L416', -'ml._types.DataframeAnalyticsStatsProgress': 'ml/_types/DataframeAnalytics.ts#L345-L350', -'ml._types.DataframeAnalyticsSummary': 'ml/_types/DataframeAnalytics.ts#L305-L321', +'ml._types.DataframeAnalyticsDestination': 'ml/_types/DataframeAnalytics.ts#L77-L82', +'ml._types.DataframeAnalyticsFieldSelection': 'ml/_types/DataframeAnalytics.ts#L55-L68', +'ml._types.DataframeAnalyticsMemoryEstimation': 'ml/_types/DataframeAnalytics.ts#L70-L75', +'ml._types.DataframeAnalyticsSource': 'ml/_types/DataframeAnalytics.ts#L39-L53', +'ml._types.DataframeAnalyticsStatsContainer': 'ml/_types/DataframeAnalytics.ts#L374-L382', +'ml._types.DataframeAnalyticsStatsDataCounts': 'ml/_types/DataframeAnalytics.ts#L365-L372', +'ml._types.DataframeAnalyticsStatsHyperparameters': 'ml/_types/DataframeAnalytics.ts#L384-L403', +'ml._types.DataframeAnalyticsStatsMemoryUsage': 'ml/_types/DataframeAnalytics.ts#L354-L363', +'ml._types.DataframeAnalyticsStatsOutlierDetection': 'ml/_types/DataframeAnalytics.ts#L405-L418', +'ml._types.DataframeAnalyticsStatsProgress': 'ml/_types/DataframeAnalytics.ts#L347-L352', +'ml._types.DataframeAnalyticsSummary': 'ml/_types/DataframeAnalytics.ts#L306-L323', 'ml._types.DataframeEvaluationClassification': 'ml/_types/DataframeEvaluation.ts#L35-L44', 'ml._types.DataframeEvaluationClassificationMetrics': 'ml/_types/DataframeEvaluation.ts#L73-L78', 'ml._types.DataframeEvaluationClassificationMetricsAucRoc': 'ml/_types/DataframeEvaluation.ts#L85-L90', @@ -1892,26 +1894,29 @@ 'ml._types.DataframeEvaluationRegressionMetricsHuber': 'ml/_types/DataframeEvaluation.ts#L117-L120', 'ml._types.DataframeEvaluationRegressionMetricsMsle': 'ml/_types/DataframeEvaluation.ts#L112-L115', 'ml._types.DataframeState': 'ml/_types/Dataframe.ts#L20-L26', -'ml._types.DelayedDataCheckConfig': 'ml/_types/Datafeed.ts#L121-L132', -'ml._types.DeploymentAllocationState': 'ml/_types/TrainedModel.ts#L273-L286', -'ml._types.DeploymentAssignmentState': 'ml/_types/TrainedModel.ts#L288-L305', +'ml._types.DelayedDataCheckConfig': 'ml/_types/Datafeed.ts#L122-L133', +'ml._types.DeploymentAllocationState': 'ml/_types/TrainedModel.ts#L318-L331', +'ml._types.DeploymentAssignmentState': 'ml/_types/TrainedModel.ts#L333-L350', 'ml._types.DetectionRule': 'ml/_types/Rule.ts#L25-L39', 'ml._types.Detector': 'ml/_types/Detector.ts#L25-L67', 'ml._types.DetectorRead': 'ml/_types/Detector.ts#L69-L125', -'ml._types.DiscoveryNode': 'ml/_types/DiscoveryNode.ts#L24-L30', -'ml._types.ExcludeFrequent': 'ml/_types/Detector.ts#L127-L132', -'ml._types.FillMaskInferenceOptions': 'ml/_types/inference.ts#L266-L280', -'ml._types.FillMaskInferenceUpdateOptions': 'ml/_types/inference.ts#L411-L418', +'ml._types.DetectorUpdate': 'ml/_types/Detector.ts#L127-L143', +'ml._types.DiscoveryNodeCompact': 'ml/_types/DiscoveryNode.ts#L39-L48', +'ml._types.DiscoveryNodeContent': 'ml/_types/DiscoveryNode.ts#L27-L37', +'ml._types.ExcludeFrequent': 'ml/_types/Detector.ts#L145-L150', +'ml._types.ExponentialAverageCalculationContext': 'ml/_types/Datafeed.ts#L204-L208', +'ml._types.FillMaskInferenceOptions': 'ml/_types/inference.ts#L253-L268', +'ml._types.FillMaskInferenceUpdateOptions': 'ml/_types/inference.ts#L399-L406', 'ml._types.Filter': 'ml/_types/Filter.ts#L22-L29', 'ml._types.FilterRef': 'ml/_types/Filter.ts#L31-L41', 'ml._types.FilterType': 'ml/_types/Filter.ts#L43-L46', 'ml._types.GeoResults': 'ml/_types/Anomaly.ts#L145-L154', -'ml._types.Hyperparameter': 'ml/_types/TrainedModel.ts#L216-L230', -'ml._types.Hyperparameters': 'ml/_types/DataframeAnalytics.ts#L418-L524', +'ml._types.Hyperparameter': 'ml/_types/TrainedModel.ts#L261-L275', +'ml._types.Hyperparameters': 'ml/_types/DataframeAnalytics.ts#L420-L526', 'ml._types.Include': 'ml/_types/Include.ts#L20-L47', 'ml._types.InferenceConfigCreateContainer': 'ml/_types/inference.ts#L23-L80', -'ml._types.InferenceConfigUpdateContainer': 'ml/_types/inference.ts#L296-L318', -'ml._types.InferenceResponseResult': 'ml/_types/inference.ts#L459-L507', +'ml._types.InferenceConfigUpdateContainer': 'ml/_types/inference.ts#L284-L306', +'ml._types.InferenceResponseResult': 'ml/_types/inference.ts#L447-L495', 'ml._types.Influence': 'ml/_types/Anomaly.ts#L140-L143', 'ml._types.Influencer': 'ml/_types/Influencer.ts#L24-L76', 'ml._types.Job': 'ml/_types/Job.ts#L61-L180', @@ -1923,69 +1928,70 @@ 'ml._types.JobStatistics': 'ml/_types/Job.ts#L54-L59', 'ml._types.JobStats': 'ml/_types/Job.ts#L284-L330', 'ml._types.JobTimingStats': 'ml/_types/Job.ts#L332-L341', -'ml._types.MemoryStatus': 'ml/_types/Model.ts#L88-L92', +'ml._types.MemoryStatus': 'ml/_types/Model.ts#L89-L93', +'ml._types.ModelPackageConfig': 'ml/_types/TrainedModel.ts#L244-L259', 'ml._types.ModelPlotConfig': 'ml/_types/ModelPlot.ts#L23-L42', -'ml._types.ModelSizeStats': 'ml/_types/Model.ts#L59-L81', +'ml._types.ModelSizeStats': 'ml/_types/Model.ts#L59-L82', 'ml._types.ModelSnapshot': 'ml/_types/Model.ts#L25-L46', 'ml._types.ModelSnapshotUpgrade': 'ml/_types/Model.ts#L48-L57', -'ml._types.NerInferenceOptions': 'ml/_types/inference.ts#L255-L264', -'ml._types.NerInferenceUpdateOptions': 'ml/_types/inference.ts#L404-L409', -'ml._types.NlpBertTokenizationConfig': 'ml/_types/inference.ts#L131-L158', -'ml._types.NlpRobertaTokenizationConfig': 'ml/_types/inference.ts#L160-L187', -'ml._types.NlpTokenizationUpdateOptions': 'ml/_types/inference.ts#L356-L361', -'ml._types.OutlierDetectionParameters': 'ml/_types/DataframeAnalytics.ts#L526-L560', +'ml._types.NerInferenceOptions': 'ml/_types/inference.ts#L242-L251', +'ml._types.NerInferenceUpdateOptions': 'ml/_types/inference.ts#L392-L397', +'ml._types.NlpBertTokenizationConfig': 'ml/_types/inference.ts#L161-L162', +'ml._types.NlpRobertaTokenizationConfig': 'ml/_types/inference.ts#L164-L171', +'ml._types.NlpTokenizationUpdateOptions': 'ml/_types/inference.ts#L344-L349', +'ml._types.OutlierDetectionParameters': 'ml/_types/DataframeAnalytics.ts#L528-L562', 'ml._types.OverallBucket': 'ml/_types/Bucket.ts#L129-L144', 'ml._types.OverallBucketJob': 'ml/_types/Bucket.ts#L145-L148', 'ml._types.Page': 'ml/_types/Page.ts#L22-L33', -'ml._types.PassThroughInferenceOptions': 'ml/_types/inference.ts#L224-L231', -'ml._types.PassThroughInferenceUpdateOptions': 'ml/_types/inference.ts#L385-L390', +'ml._types.PassThroughInferenceOptions': 'ml/_types/inference.ts#L208-L215', +'ml._types.PassThroughInferenceUpdateOptions': 'ml/_types/inference.ts#L373-L378', 'ml._types.PerPartitionCategorization': 'ml/_types/Analysis.ts#L150-L159', -'ml._types.QuestionAnsweringInferenceOptions': 'ml/_types/inference.ts#L282-L292', -'ml._types.QuestionAnsweringInferenceUpdateOptions': 'ml/_types/inference.ts#L420-L431', +'ml._types.QuestionAnsweringInferenceOptions': 'ml/_types/inference.ts#L270-L280', +'ml._types.QuestionAnsweringInferenceUpdateOptions': 'ml/_types/inference.ts#L408-L419', 'ml._types.RegressionInferenceOptions': 'ml/_types/inference.ts#L82-L91', -'ml._types.RoutingState': 'ml/_types/TrainedModel.ts#L347-L368', +'ml._types.RoutingState': 'ml/_types/TrainedModel.ts#L395-L416', 'ml._types.RuleAction': 'ml/_types/Rule.ts#L41-L50', 'ml._types.RuleCondition': 'ml/_types/Rule.ts#L52-L65', -'ml._types.RunningStateSearchInterval': 'ml/_types/Datafeed.ts#L216-L233', -'ml._types.SnapshotUpgradeState': 'ml/_types/Model.ts#L94-L99', -'ml._types.TextClassificationInferenceOptions': 'ml/_types/inference.ts#L189-L199', -'ml._types.TextClassificationInferenceUpdateOptions': 'ml/_types/inference.ts#L363-L372', -'ml._types.TextEmbeddingInferenceOptions': 'ml/_types/inference.ts#L237-L245', -'ml._types.TextEmbeddingInferenceUpdateOptions': 'ml/_types/inference.ts#L392-L396', -'ml._types.TextExpansionInferenceOptions': 'ml/_types/inference.ts#L247-L253', -'ml._types.TextExpansionInferenceUpdateOptions': 'ml/_types/inference.ts#L398-L402', -'ml._types.TimingStats': 'ml/_types/DataframeAnalytics.ts#L562-L567', -'ml._types.TokenizationConfigContainer': 'ml/_types/inference.ts#L110-L129', -'ml._types.TokenizationTruncate': 'ml/_types/inference.ts#L350-L354', -'ml._types.TopClassEntry': 'ml/_types/inference.ts#L440-L444', -'ml._types.TotalFeatureImportance': 'ml/_types/TrainedModel.ts#L232-L239', -'ml._types.TotalFeatureImportanceClass': 'ml/_types/TrainedModel.ts#L241-L246', -'ml._types.TotalFeatureImportanceStatistics': 'ml/_types/TrainedModel.ts#L248-L255', -'ml._types.TrainedModelAssignment': 'ml/_types/TrainedModel.ts#L399-L414', -'ml._types.TrainedModelAssignmentRoutingTable': 'ml/_types/TrainedModel.ts#L370-L388', -'ml._types.TrainedModelAssignmentTaskParameters': 'ml/_types/TrainedModel.ts#L312-L345', -'ml._types.TrainedModelConfig': 'ml/_types/TrainedModel.ts#L164-L199', -'ml._types.TrainedModelConfigInput': 'ml/_types/TrainedModel.ts#L201-L204', -'ml._types.TrainedModelConfigMetadata': 'ml/_types/TrainedModel.ts#L206-L214', -'ml._types.TrainedModelDeploymentAllocationStatus': 'ml/_types/TrainedModel.ts#L390-L397', -'ml._types.TrainedModelDeploymentNodesStats': 'ml/_types/TrainedModel.ts#L132-L162', -'ml._types.TrainedModelDeploymentStats': 'ml/_types/TrainedModel.ts#L61-L101', -'ml._types.TrainedModelEntities': 'ml/_types/inference.ts#L433-L439', -'ml._types.TrainedModelInferenceClassImportance': 'ml/_types/inference.ts#L446-L449', -'ml._types.TrainedModelInferenceFeatureImportance': 'ml/_types/inference.ts#L451-L455', -'ml._types.TrainedModelInferenceStats': 'ml/_types/TrainedModel.ts#L103-L123', -'ml._types.TrainedModelLocation': 'ml/_types/TrainedModel.ts#L416-L418', -'ml._types.TrainedModelLocationIndex': 'ml/_types/TrainedModel.ts#L420-L422', -'ml._types.TrainedModelPrefixStrings': 'ml/_types/TrainedModel.ts#L424-L433', -'ml._types.TrainedModelSizeStats': 'ml/_types/TrainedModel.ts#L125-L130', -'ml._types.TrainedModelStats': 'ml/_types/TrainedModel.ts#L41-L59', -'ml._types.TrainedModelType': 'ml/_types/TrainedModel.ts#L257-L271', -'ml._types.TrainingPriority': 'ml/_types/TrainedModel.ts#L307-L310', +'ml._types.RunningStateSearchInterval': 'ml/_types/Datafeed.ts#L226-L243', +'ml._types.SnapshotUpgradeState': 'ml/_types/Model.ts#L95-L100', +'ml._types.TextClassificationInferenceOptions': 'ml/_types/inference.ts#L173-L183', +'ml._types.TextClassificationInferenceUpdateOptions': 'ml/_types/inference.ts#L351-L360', +'ml._types.TextEmbeddingInferenceOptions': 'ml/_types/inference.ts#L221-L231', +'ml._types.TextEmbeddingInferenceUpdateOptions': 'ml/_types/inference.ts#L380-L384', +'ml._types.TextExpansionInferenceOptions': 'ml/_types/inference.ts#L233-L240', +'ml._types.TextExpansionInferenceUpdateOptions': 'ml/_types/inference.ts#L386-L390', +'ml._types.TimingStats': 'ml/_types/DataframeAnalytics.ts#L564-L569', +'ml._types.TokenizationConfigContainer': 'ml/_types/inference.ts#L110-L131', +'ml._types.TokenizationTruncate': 'ml/_types/inference.ts#L338-L342', +'ml._types.TopClassEntry': 'ml/_types/inference.ts#L428-L432', +'ml._types.TotalFeatureImportance': 'ml/_types/TrainedModel.ts#L277-L284', +'ml._types.TotalFeatureImportanceClass': 'ml/_types/TrainedModel.ts#L286-L291', +'ml._types.TotalFeatureImportanceStatistics': 'ml/_types/TrainedModel.ts#L293-L300', +'ml._types.TrainedModelAssignment': 'ml/_types/TrainedModel.ts#L447-L464', +'ml._types.TrainedModelAssignmentRoutingTable': 'ml/_types/TrainedModel.ts#L418-L436', +'ml._types.TrainedModelAssignmentTaskParameters': 'ml/_types/TrainedModel.ts#L357-L393', +'ml._types.TrainedModelConfig': 'ml/_types/TrainedModel.ts#L191-L227', +'ml._types.TrainedModelConfigInput': 'ml/_types/TrainedModel.ts#L229-L232', +'ml._types.TrainedModelConfigMetadata': 'ml/_types/TrainedModel.ts#L234-L242', +'ml._types.TrainedModelDeploymentAllocationStatus': 'ml/_types/TrainedModel.ts#L438-L445', +'ml._types.TrainedModelDeploymentNodesStats': 'ml/_types/TrainedModel.ts#L144-L189', +'ml._types.TrainedModelDeploymentStats': 'ml/_types/TrainedModel.ts#L62-L107', +'ml._types.TrainedModelEntities': 'ml/_types/inference.ts#L421-L427', +'ml._types.TrainedModelInferenceClassImportance': 'ml/_types/inference.ts#L434-L437', +'ml._types.TrainedModelInferenceFeatureImportance': 'ml/_types/inference.ts#L439-L443', +'ml._types.TrainedModelInferenceStats': 'ml/_types/TrainedModel.ts#L115-L135', +'ml._types.TrainedModelLocation': 'ml/_types/TrainedModel.ts#L466-L468', +'ml._types.TrainedModelLocationIndex': 'ml/_types/TrainedModel.ts#L470-L472', +'ml._types.TrainedModelPrefixStrings': 'ml/_types/TrainedModel.ts#L474-L483', +'ml._types.TrainedModelSizeStats': 'ml/_types/TrainedModel.ts#L137-L142', +'ml._types.TrainedModelStats': 'ml/_types/TrainedModel.ts#L42-L60', +'ml._types.TrainedModelType': 'ml/_types/TrainedModel.ts#L302-L316', +'ml._types.TrainingPriority': 'ml/_types/TrainedModel.ts#L352-L355', 'ml._types.TransformAuthorization': 'ml/_types/Authorization.ts#L59-L71', -'ml._types.ValidationLoss': 'ml/_types/DataframeAnalytics.ts#L569-L574', -'ml._types.Vocabulary': 'ml/_types/inference.ts#L233-L235', -'ml._types.ZeroShotClassificationInferenceOptions': 'ml/_types/inference.ts#L201-L222', -'ml._types.ZeroShotClassificationInferenceUpdateOptions': 'ml/_types/inference.ts#L374-L383', +'ml._types.ValidationLoss': 'ml/_types/DataframeAnalytics.ts#L571-L576', +'ml._types.Vocabulary': 'ml/_types/inference.ts#L217-L219', +'ml._types.ZeroShotClassificationInferenceOptions': 'ml/_types/inference.ts#L185-L206', +'ml._types.ZeroShotClassificationInferenceUpdateOptions': 'ml/_types/inference.ts#L362-L371', 'ml.clear_trained_model_deployment_cache.Request': 'ml/clear_trained_model_deployment_cache/MlClearTrainedModelDeploymentCacheRequest.ts#L23-L42', 'ml.clear_trained_model_deployment_cache.Response': 'ml/clear_trained_model_deployment_cache/MlClearTrainedModelDeploymentCacheResponse.ts#L20-L24', 'ml.close_job.Request': 'ml/close_job/MlCloseJobRequest.ts#L24-L78', @@ -2077,16 +2083,16 @@ 'ml.get_overall_buckets.Response': 'ml/get_overall_buckets/MlGetOverallBucketsResponse.ts#L23-L29', 'ml.get_records.Request': 'ml/get_records/MlGetAnomalyRecordsRequest.ts#L26-L128', 'ml.get_records.Response': 'ml/get_records/MlGetAnomalyRecordsResponse.ts#L23-L28', -'ml.get_trained_models.Request': 'ml/get_trained_models/MlGetTrainedModelRequest.ts#L25-L92', +'ml.get_trained_models.Request': 'ml/get_trained_models/MlGetTrainedModelRequest.ts#L25-L99', 'ml.get_trained_models.Response': 'ml/get_trained_models/MlGetTrainedModelResponse.ts#L23-L34', 'ml.get_trained_models_stats.Request': 'ml/get_trained_models_stats/MlGetTrainedModelStatsRequest.ts#L24-L66', 'ml.get_trained_models_stats.Response': 'ml/get_trained_models_stats/MlGetTrainedModelStatsResponse.ts#L23-L33', 'ml.infer_trained_model.Request': 'ml/infer_trained_model/MlInferTrainedModelRequest.ts#L27-L60', 'ml.infer_trained_model.Response': 'ml/infer_trained_model/MlInferTrainedModelResponse.ts#L22-L26', -'ml.info.AnomalyDetectors': 'ml/info/types.ts#L44-L50', -'ml.info.Datafeeds': 'ml/info/types.ts#L40-L42', +'ml.info.AnomalyDetectors': 'ml/info/types.ts#L46-L52', +'ml.info.Datafeeds': 'ml/info/types.ts#L42-L44', 'ml.info.Defaults': 'ml/info/types.ts#L24-L27', -'ml.info.Limits': 'ml/info/types.ts#L34-L38', +'ml.info.Limits': 'ml/info/types.ts#L34-L40', 'ml.info.NativeCode': 'ml/info/types.ts#L29-L32', 'ml.info.Request': 'ml/info/MlInfoRequest.ts#L22-L36', 'ml.info.Response': 'ml/info/MlInfoResponse.ts#L22-L29', @@ -2095,7 +2101,7 @@ 'ml.post_calendar_events.Request': 'ml/post_calendar_events/MlPostCalendarEventsRequest.ts#L24-L41', 'ml.post_calendar_events.Response': 'ml/post_calendar_events/MlPostCalendarEventsResponse.ts#L22-L24', 'ml.post_data.Request': 'ml/post_data/MlPostJobDataRequest.ts#L24-L69', -'ml.post_data.Response': 'ml/post_data/MlPostJobDataResponse.ts#L23-L41', +'ml.post_data.Response': 'ml/post_data/MlPostJobDataResponse.ts#L24-L45', 'ml.preview_data_frame_analytics.DataframePreviewConfig': 'ml/preview_data_frame_analytics/types.ts#L27-L33', 'ml.preview_data_frame_analytics.Request': 'ml/preview_data_frame_analytics/MlPreviewDataFrameAnalyticsRequest.ts#L24-L49', 'ml.preview_data_frame_analytics.Response': 'ml/preview_data_frame_analytics/MlPreviewDataFrameAnalyticsResponse.ts#L23-L28', @@ -2105,13 +2111,13 @@ 'ml.put_calendar.Response': 'ml/put_calendar/MlPutCalendarResponse.ts#L22-L31', 'ml.put_calendar_job.Request': 'ml/put_calendar_job/MlPutCalendarJobRequest.ts#L23-L38', 'ml.put_calendar_job.Response': 'ml/put_calendar_job/MlPutCalendarJobResponse.ts#L22-L31', -'ml.put_data_frame_analytics.Request': 'ml/put_data_frame_analytics/MlPutDataFrameAnalyticsRequest.ts#L30-L142', -'ml.put_data_frame_analytics.Response': 'ml/put_data_frame_analytics/MlPutDataFrameAnalyticsResponse.ts#L31-L46', -'ml.put_datafeed.Request': 'ml/put_datafeed/MlPutDatafeedRequest.ts#L37-L173', +'ml.put_data_frame_analytics.Request': 'ml/put_data_frame_analytics/MlPutDataFrameAnalyticsRequest.ts#L30-L144', +'ml.put_data_frame_analytics.Response': 'ml/put_data_frame_analytics/MlPutDataFrameAnalyticsResponse.ts#L31-L47', +'ml.put_datafeed.Request': 'ml/put_datafeed/MlPutDatafeedRequest.ts#L37-L175', 'ml.put_datafeed.Response': 'ml/put_datafeed/MlPutDatafeedResponse.ts#L31-L49', 'ml.put_filter.Request': 'ml/put_filter/MlPutFilterRequest.ts#L23-L51', 'ml.put_filter.Response': 'ml/put_filter/MlPutFilterResponse.ts#L22-L28', -'ml.put_job.Request': 'ml/put_job/MlPutJobRequest.ts#L30-L113', +'ml.put_job.Request': 'ml/put_job/MlPutJobRequest.ts#L30-L148', 'ml.put_job.Response': 'ml/put_job/MlPutJobResponse.ts#L29-L52', 'ml.put_trained_model.AggregateOutput': 'ml/put_trained_model/types.ts#L101-L106', 'ml.put_trained_model.Definition': 'ml/put_trained_model/types.ts#L24-L29', @@ -2167,9 +2173,9 @@ 'ml.upgrade_job_snapshot.Response': 'ml/upgrade_job_snapshot/MlUpgradeJobSnapshotResponse.ts#L22-L31', 'ml.validate.Request': 'ml/validate/MlValidateJobRequest.ts#L27-L44', 'ml.validate.Response': 'ml/validate/MlValidateJobResponse.ts#L22-L24', -'ml.validate_detector.Request': 'ml/validate_detector/MlValidateDetectorRequest.ts#L23-L31', +'ml.validate_detector.Request': 'ml/validate_detector/MlValidateDetectorRequest.ts#L23-L33', 'ml.validate_detector.Response': 'ml/validate_detector/MlValidateDetectorResponse.ts#L22-L24', -'monitoring.bulk.Request': 'monitoring/bulk/BulkMonitoringRequest.ts#L24-L59', +'monitoring.bulk.Request': 'monitoring/bulk/BulkMonitoringRequest.ts#L24-L61', 'monitoring.bulk.Response': 'monitoring/bulk/BulkMonitoringResponse.ts#L23-L32', 'nodes._types.AdaptiveSelection': 'nodes/_types/Stats.ts#L439-L468', 'nodes._types.Breaker': 'nodes/_types/Stats.ts#L470-L495', @@ -2232,13 +2238,13 @@ 'nodes._types.TimeHttpHistogram': 'nodes/_types/Stats.ts#L708-L712', 'nodes._types.Transport': 'nodes/_types/Stats.ts#L1118-L1161', 'nodes._types.TransportHistogram': 'nodes/_types/Stats.ts#L1163-L1177', -'nodes.clear_repositories_metering_archive.Request': 'nodes/clear_repositories_metering_archive/ClearRepositoriesMeteringArchiveRequest.ts#L24-L44', +'nodes.clear_repositories_metering_archive.Request': 'nodes/clear_repositories_metering_archive/ClearRepositoriesMeteringArchiveRequest.ts#L24-L45', 'nodes.clear_repositories_metering_archive.Response': 'nodes/clear_repositories_metering_archive/ClearRepositoriesMeteringArchiveResponse.ts#L36-L38', 'nodes.clear_repositories_metering_archive.ResponseBase': 'nodes/clear_repositories_metering_archive/ClearRepositoriesMeteringArchiveResponse.ts#L25-L34', 'nodes.get_repositories_metering_info.Request': 'nodes/get_repositories_metering_info/GetRepositoriesMeteringInfoRequest.ts#L23-L42', 'nodes.get_repositories_metering_info.Response': 'nodes/get_repositories_metering_info/GetRepositoriesMeteringInfoResponse.ts#L36-L38', 'nodes.get_repositories_metering_info.ResponseBase': 'nodes/get_repositories_metering_info/GetRepositoriesMeteringInfoResponse.ts#L25-L34', -'nodes.hot_threads.Request': 'nodes/hot_threads/NodesHotThreadsRequest.ts#L25-L84', +'nodes.hot_threads.Request': 'nodes/hot_threads/NodesHotThreadsRequest.ts#L25-L85', 'nodes.hot_threads.Response': 'nodes/hot_threads/NodesHotThreadsResponse.ts#L20-L22', 'nodes.info.DeprecationIndexing': 'nodes/info/types.ts#L144-L146', 'nodes.info.NodeInfo': 'nodes/info/types.ts#L31-L67', @@ -2289,13 +2295,13 @@ 'nodes.info.NodeOperatingSystemInfo': 'nodes/info/types.ts#L380-L397', 'nodes.info.NodeProcessInfo': 'nodes/info/types.ts#L399-L406', 'nodes.info.NodeThreadPoolInfo': 'nodes/info/types.ts#L302-L309', -'nodes.info.Request': 'nodes/info/NodesInfoRequest.ts#L24-L56', +'nodes.info.Request': 'nodes/info/NodesInfoRequest.ts#L24-L57', 'nodes.info.Response': 'nodes/info/NodesInfoResponse.ts#L30-L32', 'nodes.info.ResponseBase': 'nodes/info/NodesInfoResponse.ts#L25-L28', -'nodes.reload_secure_settings.Request': 'nodes/reload_secure_settings/ReloadSecureSettingsRequest.ts#L24-L51', +'nodes.reload_secure_settings.Request': 'nodes/reload_secure_settings/ReloadSecureSettingsRequest.ts#L24-L59', 'nodes.reload_secure_settings.Response': 'nodes/reload_secure_settings/ReloadSecureSettingsResponse.ts#L30-L32', 'nodes.reload_secure_settings.ResponseBase': 'nodes/reload_secure_settings/ReloadSecureSettingsResponse.ts#L25-L28', -'nodes.stats.Request': 'nodes/stats/NodesStatsRequest.ts#L24-L76', +'nodes.stats.Request': 'nodes/stats/NodesStatsRequest.ts#L24-L78', 'nodes.stats.Response': 'nodes/stats/NodesStatsResponse.ts#L30-L32', 'nodes.stats.ResponseBase': 'nodes/stats/NodesStatsResponse.ts#L25-L28', 'nodes.usage.NodeUsage': 'nodes/usage/types.ts#L25-L30', @@ -2332,32 +2338,32 @@ 'rollup._types.HistogramGrouping': 'rollup/_types/Groupings.ts#L84-L97', 'rollup._types.Metric': 'rollup/_types/Metric.ts#L22-L28', 'rollup._types.TermsGrouping': 'rollup/_types/Groupings.ts#L75-L82', -'rollup.delete_job.Request': 'rollup/delete_job/DeleteRollupJobRequest.ts#L23-L35', +'rollup.delete_job.Request': 'rollup/delete_job/DeleteRollupJobRequest.ts#L23-L59', 'rollup.delete_job.Response': 'rollup/delete_job/DeleteRollupJobResponse.ts#L22-L27', 'rollup.get_jobs.IndexingJobState': 'rollup/get_jobs/types.ts#L66-L72', -'rollup.get_jobs.Request': 'rollup/get_jobs/GetRollupJobRequest.ts#L23-L36', +'rollup.get_jobs.Request': 'rollup/get_jobs/GetRollupJobRequest.ts#L23-L42', 'rollup.get_jobs.Response': 'rollup/get_jobs/GetRollupJobResponse.ts#L22-L24', 'rollup.get_jobs.RollupJob': 'rollup/get_jobs/types.ts#L28-L32', 'rollup.get_jobs.RollupJobConfiguration': 'rollup/get_jobs/types.ts#L34-L43', 'rollup.get_jobs.RollupJobStats': 'rollup/get_jobs/types.ts#L45-L58', 'rollup.get_jobs.RollupJobStatus': 'rollup/get_jobs/types.ts#L60-L64', -'rollup.get_rollup_caps.Request': 'rollup/get_rollup_caps/GetRollupCapabilitiesRequest.ts#L23-L36', +'rollup.get_rollup_caps.Request': 'rollup/get_rollup_caps/GetRollupCapabilitiesRequest.ts#L23-L45', 'rollup.get_rollup_caps.Response': 'rollup/get_rollup_caps/GetRollupCapabilitiesResponse.ts#L24-L27', 'rollup.get_rollup_caps.RollupCapabilities': 'rollup/get_rollup_caps/types.ts#L24-L26', 'rollup.get_rollup_caps.RollupCapabilitySummary': 'rollup/get_rollup_caps/types.ts#L28-L33', 'rollup.get_rollup_caps.RollupFieldSummary': 'rollup/get_rollup_caps/types.ts#L35-L39', 'rollup.get_rollup_index_caps.IndexCapabilities': 'rollup/get_rollup_index_caps/types.ts#L24-L26', -'rollup.get_rollup_index_caps.Request': 'rollup/get_rollup_index_caps/GetRollupIndexCapabilitiesRequest.ts#L23-L36', +'rollup.get_rollup_index_caps.Request': 'rollup/get_rollup_index_caps/GetRollupIndexCapabilitiesRequest.ts#L23-L42', 'rollup.get_rollup_index_caps.Response': 'rollup/get_rollup_index_caps/GetRollupIndexCapabilitiesResponse.ts#L24-L27', 'rollup.get_rollup_index_caps.RollupJobSummary': 'rollup/get_rollup_index_caps/types.ts#L28-L33', 'rollup.get_rollup_index_caps.RollupJobSummaryField': 'rollup/get_rollup_index_caps/types.ts#L35-L39', -'rollup.put_job.Request': 'rollup/put_job/CreateRollupJobRequest.ts#L27-L89', +'rollup.put_job.Request': 'rollup/put_job/CreateRollupJobRequest.ts#L27-L97', 'rollup.put_job.Response': 'rollup/put_job/CreateRollupJobResponse.ts#L22-L24', -'rollup.rollup_search.Request': 'rollup/rollup_search/RollupSearchRequest.ts#L27-L57', +'rollup.rollup_search.Request': 'rollup/rollup_search/RollupSearchRequest.ts#L27-L59', 'rollup.rollup_search.Response': 'rollup/rollup_search/RollupSearchResponse.ts#L27-L36', -'rollup.start_job.Request': 'rollup/start_job/StartRollupJobRequest.ts#L23-L35', +'rollup.start_job.Request': 'rollup/start_job/StartRollupJobRequest.ts#L23-L38', 'rollup.start_job.Response': 'rollup/start_job/StartRollupJobResponse.ts#L20-L22', -'rollup.stop_job.Request': 'rollup/stop_job/StopRollupJobRequest.ts#L24-L50', +'rollup.stop_job.Request': 'rollup/stop_job/StopRollupJobRequest.ts#L24-L53', 'rollup.stop_job.Response': 'rollup/stop_job/StopRollupJobResponse.ts#L20-L22', 'search_application._types.AnalyticsCollection': 'search_application/_types/BehavioralAnalytics.ts#L22-L27', 'search_application._types.EventDataStream': 'search_application/_types/BehavioralAnalytics.ts#L29-L31', @@ -2383,15 +2389,15 @@ 'search_application.search.Response': 'search_application/search/SearchApplicationsSearchResponse.ts#L22-L24', 'searchable_snapshots._types.StatsLevel': 'searchable_snapshots/_types/stats.ts#L20-L24', 'searchable_snapshots.cache_stats.Node': 'searchable_snapshots/cache_stats/Response.ts#L30-L32', -'searchable_snapshots.cache_stats.Request': 'searchable_snapshots/cache_stats/Request.ts#L24-L35', +'searchable_snapshots.cache_stats.Request': 'searchable_snapshots/cache_stats/Request.ts#L24-L38', 'searchable_snapshots.cache_stats.Response': 'searchable_snapshots/cache_stats/Response.ts#L24-L28', 'searchable_snapshots.cache_stats.Shared': 'searchable_snapshots/cache_stats/Response.ts#L34-L43', -'searchable_snapshots.clear_cache.Request': 'searchable_snapshots/clear_cache/SearchableSnapshotsClearCacheRequest.ts#L23-L38', +'searchable_snapshots.clear_cache.Request': 'searchable_snapshots/clear_cache/SearchableSnapshotsClearCacheRequest.ts#L23-L42', 'searchable_snapshots.clear_cache.Response': 'searchable_snapshots/clear_cache/SearchableSnapshotsClearCacheResponse.ts#L22-L25', 'searchable_snapshots.mount.MountedSnapshot': 'searchable_snapshots/mount/types.ts#L23-L27', -'searchable_snapshots.mount.Request': 'searchable_snapshots/mount/SearchableSnapshotsMountRequest.ts#L26-L49', +'searchable_snapshots.mount.Request': 'searchable_snapshots/mount/SearchableSnapshotsMountRequest.ts#L26-L55', 'searchable_snapshots.mount.Response': 'searchable_snapshots/mount/SearchableSnapshotsMountResponse.ts#L22-L26', -'searchable_snapshots.stats.Request': 'searchable_snapshots/stats/SearchableSnapshotsStatsRequest.ts#L24-L35', +'searchable_snapshots.stats.Request': 'searchable_snapshots/stats/SearchableSnapshotsStatsRequest.ts#L24-L38', 'searchable_snapshots.stats.Response': 'searchable_snapshots/stats/SearchableSnapshotsStatsResponse.ts#L22-L27', 'security._types.Access': 'security/_types/Access.ts#L22-L31', 'security._types.ApiKey': 'security/_types/ApiKey.ts#L27-L113', @@ -2578,17 +2584,17 @@ 'security.update_user_profile_data.Request': 'security/update_user_profile_data/Request.ts#L27-L72', 'security.update_user_profile_data.Response': 'security/update_user_profile_data/Response.ts#L22-L24', 'shutdown._types.Type': 'shutdown/_types/types.ts#L20-L24', -'shutdown.delete_node.Request': 'shutdown/delete_node/ShutdownDeleteNodeRequest.ts#L24-L44', +'shutdown.delete_node.Request': 'shutdown/delete_node/ShutdownDeleteNodeRequest.ts#L24-L54', 'shutdown.delete_node.Response': 'shutdown/delete_node/ShutdownDeleteNodeResponse.ts#L22-L24', 'shutdown.get_node.NodeShutdownStatus': 'shutdown/get_node/ShutdownGetNodeResponse.ts#L29-L38', 'shutdown.get_node.PersistentTaskStatus': 'shutdown/get_node/ShutdownGetNodeResponse.ts#L56-L58', 'shutdown.get_node.PluginsStatus': 'shutdown/get_node/ShutdownGetNodeResponse.ts#L60-L62', -'shutdown.get_node.Request': 'shutdown/get_node/ShutdownGetNodeRequest.ts#L24-L44', +'shutdown.get_node.Request': 'shutdown/get_node/ShutdownGetNodeRequest.ts#L24-L53', 'shutdown.get_node.Response': 'shutdown/get_node/ShutdownGetNodeResponse.ts#L23-L27', 'shutdown.get_node.ShardMigrationStatus': 'shutdown/get_node/ShutdownGetNodeResponse.ts#L52-L54', 'shutdown.get_node.ShutdownStatus': 'shutdown/get_node/ShutdownGetNodeResponse.ts#L45-L50', 'shutdown.get_node.ShutdownType': 'shutdown/get_node/ShutdownGetNodeResponse.ts#L40-L43', -'shutdown.put_node.Request': 'shutdown/put_node/ShutdownPutNodeRequest.ts#L25-L76', +'shutdown.put_node.Request': 'shutdown/put_node/ShutdownPutNodeRequest.ts#L25-L91', 'shutdown.put_node.Response': 'shutdown/put_node/ShutdownPutNodeResponse.ts#L22-L24', 'slm._types.Configuration': 'slm/_types/SnapshotLifecycle.ts#L99-L129', 'slm._types.InProgress': 'slm/_types/SnapshotLifecycle.ts#L131-L136', @@ -2597,23 +2603,23 @@ 'slm._types.Retention': 'slm/_types/SnapshotLifecycle.ts#L84-L97', 'slm._types.SnapshotLifecycle': 'slm/_types/SnapshotLifecycle.ts#L38-L49', 'slm._types.Statistics': 'slm/_types/SnapshotLifecycle.ts#L51-L74', -'slm.delete_lifecycle.Request': 'slm/delete_lifecycle/DeleteSnapshotLifecycleRequest.ts#L23-L32', +'slm.delete_lifecycle.Request': 'slm/delete_lifecycle/DeleteSnapshotLifecycleRequest.ts#L23-L36', 'slm.delete_lifecycle.Response': 'slm/delete_lifecycle/DeleteSnapshotLifecycleResponse.ts#L22-L24', -'slm.execute_lifecycle.Request': 'slm/execute_lifecycle/ExecuteSnapshotLifecycleRequest.ts#L23-L32', +'slm.execute_lifecycle.Request': 'slm/execute_lifecycle/ExecuteSnapshotLifecycleRequest.ts#L23-L36', 'slm.execute_lifecycle.Response': 'slm/execute_lifecycle/ExecuteSnapshotLifecycleResponse.ts#L22-L24', -'slm.execute_retention.Request': 'slm/execute_retention/ExecuteRetentionRequest.ts#L22-L27', +'slm.execute_retention.Request': 'slm/execute_retention/ExecuteRetentionRequest.ts#L22-L31', 'slm.execute_retention.Response': 'slm/execute_retention/ExecuteRetentionResponse.ts#L22-L24', -'slm.get_lifecycle.Request': 'slm/get_lifecycle/GetSnapshotLifecycleRequest.ts#L23-L32', +'slm.get_lifecycle.Request': 'slm/get_lifecycle/GetSnapshotLifecycleRequest.ts#L23-L38', 'slm.get_lifecycle.Response': 'slm/get_lifecycle/GetSnapshotLifecycleResponse.ts#L24-L27', -'slm.get_stats.Request': 'slm/get_stats/GetSnapshotLifecycleStatsRequest.ts#L22-L27', +'slm.get_stats.Request': 'slm/get_stats/GetSnapshotLifecycleStatsRequest.ts#L22-L30', 'slm.get_stats.Response': 'slm/get_stats/GetSnapshotLifecycleStatsResponse.ts#L23-L36', -'slm.get_status.Request': 'slm/get_status/GetSnapshotLifecycleManagementStatusRequest.ts#L22-L27', +'slm.get_status.Request': 'slm/get_status/GetSnapshotLifecycleManagementStatusRequest.ts#L22-L29', 'slm.get_status.Response': 'slm/get_status/GetSnapshotLifecycleManagementStatusResponse.ts#L22-L24', -'slm.put_lifecycle.Request': 'slm/put_lifecycle/PutSnapshotLifecycleRequest.ts#L26-L72', +'slm.put_lifecycle.Request': 'slm/put_lifecycle/PutSnapshotLifecycleRequest.ts#L26-L78', 'slm.put_lifecycle.Response': 'slm/put_lifecycle/PutSnapshotLifecycleResponse.ts#L22-L24', -'slm.start.Request': 'slm/start/StartSnapshotLifecycleManagementRequest.ts#L22-L27', +'slm.start.Request': 'slm/start/StartSnapshotLifecycleManagementRequest.ts#L22-L31', 'slm.start.Response': 'slm/start/StartSnapshotLifecycleManagementResponse.ts#L22-L24', -'slm.stop.Request': 'slm/stop/StopSnapshotLifecycleManagementRequest.ts#L22-L27', +'slm.stop.Request': 'slm/stop/StopSnapshotLifecycleManagementRequest.ts#L22-L35', 'slm.stop.Response': 'slm/stop/StopSnapshotLifecycleManagementResponse.ts#L22-L24', 'snapshot._types.AzureRepository': 'snapshot/_types/SnapshotRepository.ts#L40-L43', 'snapshot._types.AzureRepositorySettings': 'snapshot/_types/SnapshotRepository.ts#L77-L83', @@ -2645,32 +2651,32 @@ 'snapshot._types.SourceOnlyRepositorySettings': 'snapshot/_types/SnapshotRepository.ts#L117-L124', 'snapshot._types.Status': 'snapshot/_types/SnapshotStatus.ts#L26-L35', 'snapshot.cleanup_repository.CleanupRepositoryResults': 'snapshot/cleanup_repository/SnapshotCleanupRepositoryResponse.ts#L29-L34', -'snapshot.cleanup_repository.Request': 'snapshot/cleanup_repository/SnapshotCleanupRepositoryRequest.ts#L24-L49', +'snapshot.cleanup_repository.Request': 'snapshot/cleanup_repository/SnapshotCleanupRepositoryRequest.ts#L24-L52', 'snapshot.cleanup_repository.Response': 'snapshot/cleanup_repository/SnapshotCleanupRepositoryResponse.ts#L22-L27', -'snapshot.clone.Request': 'snapshot/clone/SnapshotCloneRequest.ts#L24-L42', +'snapshot.clone.Request': 'snapshot/clone/SnapshotCloneRequest.ts#L24-L45', 'snapshot.clone.Response': 'snapshot/clone/SnapshotCloneResponse.ts#L22-L24', -'snapshot.create.Request': 'snapshot/create/SnapshotCreateRequest.ts#L24-L81', +'snapshot.create.Request': 'snapshot/create/SnapshotCreateRequest.ts#L24-L85', 'snapshot.create.Response': 'snapshot/create/SnapshotCreateResponse.ts#L22-L35', -'snapshot.create_repository.Request': 'snapshot/create_repository/SnapshotCreateRepositoryRequest.ts#L25-L42', +'snapshot.create_repository.Request': 'snapshot/create_repository/SnapshotCreateRepositoryRequest.ts#L25-L48', 'snapshot.create_repository.Response': 'snapshot/create_repository/SnapshotCreateRepositoryResponse.ts#L22-L24', -'snapshot.delete.Request': 'snapshot/delete/SnapshotDeleteRequest.ts#L24-L37', +'snapshot.delete.Request': 'snapshot/delete/SnapshotDeleteRequest.ts#L24-L39', 'snapshot.delete.Response': 'snapshot/delete/SnapshotDeleteResponse.ts#L22-L24', -'snapshot.delete_repository.Request': 'snapshot/delete_repository/SnapshotDeleteRepositoryRequest.ts#L24-L38', +'snapshot.delete_repository.Request': 'snapshot/delete_repository/SnapshotDeleteRepositoryRequest.ts#L24-L42', 'snapshot.delete_repository.Response': 'snapshot/delete_repository/SnapshotDeleteRepositoryResponse.ts#L22-L24', -'snapshot.get.Request': 'snapshot/get/SnapshotGetRequest.ts#L27-L127', +'snapshot.get.Request': 'snapshot/get/SnapshotGetRequest.ts#L27-L129', 'snapshot.get.Response': 'snapshot/get/SnapshotGetResponse.ts#L25-L42', 'snapshot.get.SnapshotResponseItem': 'snapshot/get/SnapshotGetResponse.ts#L44-L48', -'snapshot.get_repository.Request': 'snapshot/get_repository/SnapshotGetRepositoryRequest.ts#L24-L38', +'snapshot.get_repository.Request': 'snapshot/get_repository/SnapshotGetRepositoryRequest.ts#L24-L40', 'snapshot.get_repository.Response': 'snapshot/get_repository/SnapshotGetRepositoryResponse.ts#L23-L25', -'snapshot.repository_verify_integrity.Request': 'snapshot/repository_verify_integrity/SnapshotRepositoryVerifyIntegrityRequest.ts#L24-L43', +'snapshot.repository_verify_integrity.Request': 'snapshot/repository_verify_integrity/SnapshotRepositoryVerifyIntegrityRequest.ts#L24-L72', 'snapshot.repository_verify_integrity.Response': 'snapshot/repository_verify_integrity/SnapshotRepositoryVerifyIntegrityResponse.ts#L22-L24', -'snapshot.restore.Request': 'snapshot/restore/SnapshotRestoreRequest.ts#L25-L51', +'snapshot.restore.Request': 'snapshot/restore/SnapshotRestoreRequest.ts#L25-L71', 'snapshot.restore.Response': 'snapshot/restore/SnapshotRestoreResponse.ts#L23-L28', 'snapshot.restore.SnapshotRestore': 'snapshot/restore/SnapshotRestoreResponse.ts#L30-L34', -'snapshot.status.Request': 'snapshot/status/SnapshotStatusRequest.ts#L24-L38', +'snapshot.status.Request': 'snapshot/status/SnapshotStatusRequest.ts#L24-L50', 'snapshot.status.Response': 'snapshot/status/SnapshotStatusResponse.ts#L22-L24', 'snapshot.verify_repository.CompactNodeInfo': 'snapshot/verify_repository/SnapshotVerifyRepositoryResponse.ts#L27-L29', -'snapshot.verify_repository.Request': 'snapshot/verify_repository/SnapshotVerifyRepositoryRequest.ts#L24-L38', +'snapshot.verify_repository.Request': 'snapshot/verify_repository/SnapshotVerifyRepositoryRequest.ts#L24-L42', 'snapshot.verify_repository.Response': 'snapshot/verify_repository/SnapshotVerifyRepositoryResponse.ts#L23-L25', 'sql.Column': 'sql/types.ts#L23-L26', 'sql.clear_cursor.Request': 'sql/clear_cursor/ClearSqlCursorRequest.ts#L22-L35', @@ -2961,10 +2967,10 @@ if (hash.length > 1) { hash = hash.substring(1); } - window.location = "https://github.com/elastic/elasticsearch-specification/tree/54858439f77f353d64fec90ccdb70bc54b43a7c8/specification/" + (paths[hash] || ""); + window.location = "https://github.com/elastic/elasticsearch-specification/tree/fd5d7cff3f7e72348ac3ebf32bfadd7be9243b86/specification/" + (paths[hash] || ""); - Please see the Elasticsearch API specification. + Please see the Elasticsearch API specification. diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/features/ElasticsearchFeaturesAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/features/ElasticsearchFeaturesAsyncClient.java index 1c8e84d32..ed2d8e7f9 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/features/ElasticsearchFeaturesAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/features/ElasticsearchFeaturesAsyncClient.java @@ -68,8 +68,20 @@ public ElasticsearchFeaturesAsyncClient withTransportOptions(@Nullable Transport // ----- Endpoint: features.get_features /** - * Gets a list of features which can be included in snapshots using the - * feature_states field when creating a snapshot + * Get the features. Get a list of features that can be included in snapshots + * using the feature_states field when creating a snapshot. You can + * use this API to determine which feature states to include when taking a + * snapshot. By default, all feature states are included in a snapshot if that + * snapshot includes the global state, or none if it does not. + *

+ * A feature state includes one or more system indices necessary for a given + * feature to function. In order to ensure data integrity, all system indices + * that comprise a feature state are snapshotted and restored together. + *

+ * The features listed by this API are a combination of built-in features and + * features defined by plugins. In order for a feature state to be listed in + * this API and recognized as a valid feature state by the create snapshot API, + * the plugin that defines that feature must be installed on the master node. * * @see Documentation @@ -83,7 +95,29 @@ public CompletableFuture getFeatures() { // ----- Endpoint: features.reset_features /** - * Resets the internal state of features, usually by deleting system indices + * Reset the features. Clear all of the state information stored in system + * indices by Elasticsearch features, including the security and machine + * learning indices. + *

+ * WARNING: Intended for development and testing use only. Do not reset features + * on a production cluster. + *

+ * Return a cluster to the same state as a new installation by resetting the + * feature state for all Elasticsearch features. This deletes all state + * information stored in system indices. + *

+ * The response code is HTTP 200 if the state is successfully reset for all + * features. It is HTTP 500 if the reset operation failed for any feature. + *

+ * Note that select features might provide a way to reset particular system + * indices. Using this API resets all features, both those that are built-in and + * implemented as plugins. + *

+ * To list the features that will be affected, use the get features API. + *

+ * IMPORTANT: The features installed on the node you submit this request to are + * the features that will be reset. Run on the master node if you have any + * doubts about which plugins are installed on individual nodes. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/features/ElasticsearchFeaturesClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/features/ElasticsearchFeaturesClient.java index 966d0072f..1f00d20f0 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/features/ElasticsearchFeaturesClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/features/ElasticsearchFeaturesClient.java @@ -66,8 +66,20 @@ public ElasticsearchFeaturesClient withTransportOptions(@Nullable TransportOptio // ----- Endpoint: features.get_features /** - * Gets a list of features which can be included in snapshots using the - * feature_states field when creating a snapshot + * Get the features. Get a list of features that can be included in snapshots + * using the feature_states field when creating a snapshot. You can + * use this API to determine which feature states to include when taking a + * snapshot. By default, all feature states are included in a snapshot if that + * snapshot includes the global state, or none if it does not. + *

+ * A feature state includes one or more system indices necessary for a given + * feature to function. In order to ensure data integrity, all system indices + * that comprise a feature state are snapshotted and restored together. + *

+ * The features listed by this API are a combination of built-in features and + * features defined by plugins. In order for a feature state to be listed in + * this API and recognized as a valid feature state by the create snapshot API, + * the plugin that defines that feature must be installed on the master node. * * @see Documentation @@ -81,7 +93,29 @@ public GetFeaturesResponse getFeatures() throws IOException, ElasticsearchExcept // ----- Endpoint: features.reset_features /** - * Resets the internal state of features, usually by deleting system indices + * Reset the features. Clear all of the state information stored in system + * indices by Elasticsearch features, including the security and machine + * learning indices. + *

+ * WARNING: Intended for development and testing use only. Do not reset features + * on a production cluster. + *

+ * Return a cluster to the same state as a new installation by resetting the + * feature state for all Elasticsearch features. This deletes all state + * information stored in system indices. + *

+ * The response code is HTTP 200 if the state is successfully reset for all + * features. It is HTTP 500 if the reset operation failed for any feature. + *

+ * Note that select features might provide a way to reset particular system + * indices. Using this API resets all features, both those that are built-in and + * implemented as plugins. + *

+ * To list the features that will be affected, use the get features API. + *

+ * IMPORTANT: The features installed on the node you submit this request to are + * the features that will be reset. Run on the master node if you have any + * doubts about which plugins are installed on individual nodes. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/features/GetFeaturesRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/features/GetFeaturesRequest.java index 605b769ee..19780899f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/features/GetFeaturesRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/features/GetFeaturesRequest.java @@ -50,8 +50,20 @@ // typedef: features.get_features.Request /** - * Gets a list of features which can be included in snapshots using the - * feature_states field when creating a snapshot + * Get the features. Get a list of features that can be included in snapshots + * using the feature_states field when creating a snapshot. You can + * use this API to determine which feature states to include when taking a + * snapshot. By default, all feature states are included in a snapshot if that + * snapshot includes the global state, or none if it does not. + *

+ * A feature state includes one or more system indices necessary for a given + * feature to function. In order to ensure data integrity, all system indices + * that comprise a feature state are snapshotted and restored together. + *

+ * The features listed by this API are a combination of built-in features and + * features defined by plugins. In order for a feature state to be listed in + * this API and recognized as a valid feature state by the create snapshot API, + * the plugin that defines that feature must be installed on the master node. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/features/ResetFeaturesRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/features/ResetFeaturesRequest.java index 970689148..ecfabbcda 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/features/ResetFeaturesRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/features/ResetFeaturesRequest.java @@ -50,7 +50,29 @@ // typedef: features.reset_features.Request /** - * Resets the internal state of features, usually by deleting system indices + * Reset the features. Clear all of the state information stored in system + * indices by Elasticsearch features, including the security and machine + * learning indices. + *

+ * WARNING: Intended for development and testing use only. Do not reset features + * on a production cluster. + *

+ * Return a cluster to the same state as a new installation by resetting the + * feature state for all Elasticsearch features. This deletes all state + * information stored in system indices. + *

+ * The response code is HTTP 200 if the state is successfully reset for all + * features. It is HTTP 500 if the reset operation failed for any feature. + *

+ * Note that select features might provide a way to reset particular system + * indices. Using this API resets all features, both those that are built-in and + * implemented as plugins. + *

+ * To list the features that will be affected, use the get features API. + *

+ * IMPORTANT: The features installed on the node you submit this request to are + * the features that will be reset. Run on the master node if you have any + * doubts about which plugins are installed on individual nodes. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/DeleteLifecycleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/DeleteLifecycleRequest.java index ddc52159a..006b224a1 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/DeleteLifecycleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/DeleteLifecycleRequest.java @@ -56,9 +56,9 @@ // typedef: ilm.delete_lifecycle.Request /** - * Deletes the specified lifecycle policy definition. You cannot delete policies - * that are currently in use. If the policy is being used to manage any indices, - * the request fails and returns an error. + * Delete a lifecycle policy. You cannot delete policies that are currently in + * use. If the policy is being used to manage any indices, the request fails and + * returns an error. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/ElasticsearchIlmAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/ElasticsearchIlmAsyncClient.java index 02b185d2d..e33bb0ace 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/ElasticsearchIlmAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/ElasticsearchIlmAsyncClient.java @@ -67,9 +67,9 @@ public ElasticsearchIlmAsyncClient withTransportOptions(@Nullable TransportOptio // ----- Endpoint: ilm.delete_lifecycle /** - * Deletes the specified lifecycle policy definition. You cannot delete policies - * that are currently in use. If the policy is being used to manage any indices, - * the request fails and returns an error. + * Delete a lifecycle policy. You cannot delete policies that are currently in + * use. If the policy is being used to manage any indices, the request fails and + * returns an error. * * @see Documentation @@ -84,9 +84,9 @@ public CompletableFuture deleteLifecycle(DeleteLifecycl } /** - * Deletes the specified lifecycle policy definition. You cannot delete policies - * that are currently in use. If the policy is being used to manage any indices, - * the request fails and returns an error. + * Delete a lifecycle policy. You cannot delete policies that are currently in + * use. If the policy is being used to manage any indices, the request fails and + * returns an error. * * @param fn * a function that initializes a builder to create the @@ -104,10 +104,12 @@ public final CompletableFuture deleteLifecycle( // ----- Endpoint: ilm.explain_lifecycle /** - * Retrieves information about the index’s current lifecycle state, such as the - * currently executing phase, action, and step. Shows when the index entered - * each one, the definition of the running phase, and information about any - * failures. + * Explain the lifecycle state. Get the current lifecycle status for one or more + * indices. For data streams, the API retrieves the current lifecycle status for + * the stream's backing indices. + *

+ * The response indicates when the index entered each lifecycle state, provides + * the definition of the running phase, and information about any failures. * * @see Documentation @@ -122,10 +124,12 @@ public CompletableFuture explainLifecycle(ExplainLifec } /** - * Retrieves information about the index’s current lifecycle state, such as the - * currently executing phase, action, and step. Shows when the index entered - * each one, the definition of the running phase, and information about any - * failures. + * Explain the lifecycle state. Get the current lifecycle status for one or more + * indices. For data streams, the API retrieves the current lifecycle status for + * the stream's backing indices. + *

+ * The response indicates when the index entered each lifecycle state, provides + * the definition of the running phase, and information about any failures. * * @param fn * a function that initializes a builder to create the @@ -143,7 +147,7 @@ public final CompletableFuture explainLifecycle( // ----- Endpoint: ilm.get_lifecycle /** - * Retrieves a lifecycle policy. + * Get lifecycle policies. * * @see Documentation @@ -158,7 +162,7 @@ public CompletableFuture getLifecycle(GetLifecycleRequest } /** - * Retrieves a lifecycle policy. + * Get lifecycle policies. * * @param fn * a function that initializes a builder to create the @@ -174,7 +178,7 @@ public final CompletableFuture getLifecycle( } /** - * Retrieves a lifecycle policy. + * Get lifecycle policies. * * @see Documentation @@ -189,7 +193,7 @@ public CompletableFuture getLifecycle() { // ----- Endpoint: ilm.get_status /** - * Retrieves the current index lifecycle management (ILM) status. + * Get the ILM status. Get the current index lifecycle management status. * * @see Documentation @@ -203,11 +207,25 @@ public CompletableFuture getStatus() { // ----- Endpoint: ilm.migrate_to_data_tiers /** - * Switches the indices, ILM policies, and legacy, composable and component - * templates from using custom node attributes and attribute-based allocation - * filters to using data tiers, and optionally deletes one legacy index - * template.+ Using node roles enables ILM to automatically move the indices - * between data tiers. + * Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, + * composable, and component templates from using custom node attributes and + * attribute-based allocation filters to using data tiers. Optionally, delete + * one legacy index template. Using node roles enables ILM to automatically move + * the indices between data tiers. + *

+ * Migrating away from custom node attributes routing can be manually performed. + * This API provides an automated way of performing three out of the four manual + * steps listed in the migration guide: + *

    + *
  1. Stop setting the custom hot attribute on new indices.
  2. + *
  3. Remove custom allocation settings from existing ILM policies.
  4. + *
  5. Replace custom allocation settings from existing indices with the + * corresponding tier preference.
  6. + *
+ *

+ * ILM must be stopped before performing the migration. Use the stop ILM and get + * ILM status APIs to wait until the reported operation mode is + * STOPPED. * * @see Documentation @@ -222,11 +240,25 @@ public CompletableFuture migrateToDataTiers(MigrateT } /** - * Switches the indices, ILM policies, and legacy, composable and component - * templates from using custom node attributes and attribute-based allocation - * filters to using data tiers, and optionally deletes one legacy index - * template.+ Using node roles enables ILM to automatically move the indices - * between data tiers. + * Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, + * composable, and component templates from using custom node attributes and + * attribute-based allocation filters to using data tiers. Optionally, delete + * one legacy index template. Using node roles enables ILM to automatically move + * the indices between data tiers. + *

+ * Migrating away from custom node attributes routing can be manually performed. + * This API provides an automated way of performing three out of the four manual + * steps listed in the migration guide: + *

    + *
  1. Stop setting the custom hot attribute on new indices.
  2. + *
  3. Remove custom allocation settings from existing ILM policies.
  4. + *
  5. Replace custom allocation settings from existing indices with the + * corresponding tier preference.
  6. + *
+ *

+ * ILM must be stopped before performing the migration. Use the stop ILM and get + * ILM status APIs to wait until the reported operation mode is + * STOPPED. * * @param fn * a function that initializes a builder to create the @@ -242,11 +274,25 @@ public final CompletableFuture migrateToDataTiers( } /** - * Switches the indices, ILM policies, and legacy, composable and component - * templates from using custom node attributes and attribute-based allocation - * filters to using data tiers, and optionally deletes one legacy index - * template.+ Using node roles enables ILM to automatically move the indices - * between data tiers. + * Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, + * composable, and component templates from using custom node attributes and + * attribute-based allocation filters to using data tiers. Optionally, delete + * one legacy index template. Using node roles enables ILM to automatically move + * the indices between data tiers. + *

+ * Migrating away from custom node attributes routing can be manually performed. + * This API provides an automated way of performing three out of the four manual + * steps listed in the migration guide: + *

    + *
  1. Stop setting the custom hot attribute on new indices.
  2. + *
  3. Remove custom allocation settings from existing ILM policies.
  4. + *
  5. Replace custom allocation settings from existing indices with the + * corresponding tier preference.
  6. + *
+ *

+ * ILM must be stopped before performing the migration. Use the stop ILM and get + * ILM status APIs to wait until the reported operation mode is + * STOPPED. * * @see Documentation @@ -261,7 +307,26 @@ public CompletableFuture migrateToDataTiers() { // ----- Endpoint: ilm.move_to_step /** - * Manually moves an index into the specified step and executes that step. + * Move to a lifecycle step. Manually move an index into a specific step in the + * lifecycle policy and run that step. + *

+ * WARNING: This operation can result in the loss of data. Manually moving an + * index into a specific step runs that step even if it has already been + * performed. This is a potentially destructive action and this should be + * considered an expert level API. + *

+ * You must specify both the current step and the step to be executed in the + * body of the request. The request will fail if the current step does not match + * the step currently running for the index This is to prevent the index from + * being moved from an unexpected step into the next step. + *

+ * When specifying the target (next_step) to which the index will + * be moved, either the name or both the action and name fields are optional. If + * only the phase is specified, the index will move to the first step of the + * first action in the target phase. If the phase and action are specified, the + * index will move to the first step of the specified action in the specified + * phase. Only actions specified in the ILM policy are considered valid. An + * index cannot move to a step that is not part of its policy. * * @see Documentation @@ -276,7 +341,26 @@ public CompletableFuture moveToStep(MoveToStepRequest reques } /** - * Manually moves an index into the specified step and executes that step. + * Move to a lifecycle step. Manually move an index into a specific step in the + * lifecycle policy and run that step. + *

+ * WARNING: This operation can result in the loss of data. Manually moving an + * index into a specific step runs that step even if it has already been + * performed. This is a potentially destructive action and this should be + * considered an expert level API. + *

+ * You must specify both the current step and the step to be executed in the + * body of the request. The request will fail if the current step does not match + * the step currently running for the index This is to prevent the index from + * being moved from an unexpected step into the next step. + *

+ * When specifying the target (next_step) to which the index will + * be moved, either the name or both the action and name fields are optional. If + * only the phase is specified, the index will move to the first step of the + * first action in the target phase. If the phase and action are specified, the + * index will move to the first step of the specified action in the specified + * phase. Only actions specified in the ILM policy are considered valid. An + * index cannot move to a step that is not part of its policy. * * @param fn * a function that initializes a builder to create the @@ -294,8 +378,11 @@ public final CompletableFuture moveToStep( // ----- Endpoint: ilm.put_lifecycle /** - * Creates a lifecycle policy. If the specified policy exists, the policy is + * Create or update a lifecycle policy. If the specified policy exists, it is * replaced and the policy version is incremented. + *

+ * NOTE: Only the latest version of the policy is stored, you cannot revert to + * previous versions. * * @see Documentation @@ -310,8 +397,11 @@ public CompletableFuture putLifecycle(PutLifecycleRequest } /** - * Creates a lifecycle policy. If the specified policy exists, the policy is + * Create or update a lifecycle policy. If the specified policy exists, it is * replaced and the policy version is incremented. + *

+ * NOTE: Only the latest version of the policy is stored, you cannot revert to + * previous versions. * * @param fn * a function that initializes a builder to create the @@ -329,7 +419,8 @@ public final CompletableFuture putLifecycle( // ----- Endpoint: ilm.remove_policy /** - * Removes the assigned lifecycle policy and stops managing the specified index + * Remove policies from an index. Remove the assigned lifecycle policies from an + * index or a data stream's backing indices. It also stops managing the indices. * * @see Documentation @@ -344,7 +435,8 @@ public CompletableFuture removePolicy(RemovePolicyRequest } /** - * Removes the assigned lifecycle policy and stops managing the specified index + * Remove policies from an index. Remove the assigned lifecycle policies from an + * index or a data stream's backing indices. It also stops managing the indices. * * @param fn * a function that initializes a builder to create the @@ -362,7 +454,10 @@ public final CompletableFuture removePolicy( // ----- Endpoint: ilm.retry /** - * Retries executing the policy for an index that is in the ERROR step. + * Retry a policy. Retry running the lifecycle policy for an index that is in + * the ERROR step. The API sets the policy back to the step where the error + * occurred and runs the step. Use the explain lifecycle state API to determine + * whether an index is in the ERROR step. * * @see Documentation @@ -377,7 +472,10 @@ public CompletableFuture retry(RetryRequest request) { } /** - * Retries executing the policy for an index that is in the ERROR step. + * Retry a policy. Retry running the lifecycle policy for an index that is in + * the ERROR step. The API sets the policy back to the step where the error + * occurred and runs the step. Use the explain lifecycle state API to determine + * whether an index is in the ERROR step. * * @param fn * a function that initializes a builder to create the @@ -395,7 +493,10 @@ public final CompletableFuture retry( // ----- Endpoint: ilm.start /** - * Start the index lifecycle management (ILM) plugin. + * Start the ILM plugin. Start the index lifecycle management plugin if it is + * currently stopped. ILM is started automatically when the cluster is formed. + * Restarting ILM is necessary only when it has been stopped using the stop ILM + * API. * * @see Documentation @@ -410,7 +511,10 @@ public CompletableFuture start(StartIlmRequest request) { } /** - * Start the index lifecycle management (ILM) plugin. + * Start the ILM plugin. Start the index lifecycle management plugin if it is + * currently stopped. ILM is started automatically when the cluster is formed. + * Restarting ILM is necessary only when it has been stopped using the stop ILM + * API. * * @param fn * a function that initializes a builder to create the @@ -426,7 +530,10 @@ public final CompletableFuture start( } /** - * Start the index lifecycle management (ILM) plugin. + * Start the ILM plugin. Start the index lifecycle management plugin if it is + * currently stopped. ILM is started automatically when the cluster is formed. + * Restarting ILM is necessary only when it has been stopped using the stop ILM + * API. * * @see Documentation @@ -441,8 +548,15 @@ public CompletableFuture start() { // ----- Endpoint: ilm.stop /** - * Halts all lifecycle management operations and stops the index lifecycle - * management (ILM) plugin + * Stop the ILM plugin. Halt all lifecycle management operations and stop the + * index lifecycle management plugin. This is useful when you are performing + * maintenance on the cluster and need to prevent ILM from performing any + * actions on your indices. + *

+ * The API returns as soon as the stop request has been acknowledged, but the + * plugin might continue to run until in-progress operations complete and the + * plugin can be safely stopped. Use the get ILM status API to check whether ILM + * is running. * * @see Documentation @@ -457,8 +571,15 @@ public CompletableFuture stop(StopIlmRequest request) { } /** - * Halts all lifecycle management operations and stops the index lifecycle - * management (ILM) plugin + * Stop the ILM plugin. Halt all lifecycle management operations and stop the + * index lifecycle management plugin. This is useful when you are performing + * maintenance on the cluster and need to prevent ILM from performing any + * actions on your indices. + *

+ * The API returns as soon as the stop request has been acknowledged, but the + * plugin might continue to run until in-progress operations complete and the + * plugin can be safely stopped. Use the get ILM status API to check whether ILM + * is running. * * @param fn * a function that initializes a builder to create the @@ -474,8 +595,15 @@ public final CompletableFuture stop( } /** - * Halts all lifecycle management operations and stops the index lifecycle - * management (ILM) plugin + * Stop the ILM plugin. Halt all lifecycle management operations and stop the + * index lifecycle management plugin. This is useful when you are performing + * maintenance on the cluster and need to prevent ILM from performing any + * actions on your indices. + *

+ * The API returns as soon as the stop request has been acknowledged, but the + * plugin might continue to run until in-progress operations complete and the + * plugin can be safely stopped. Use the get ILM status API to check whether ILM + * is running. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/ElasticsearchIlmClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/ElasticsearchIlmClient.java index f41489680..6b3e3a8f5 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/ElasticsearchIlmClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/ElasticsearchIlmClient.java @@ -68,9 +68,9 @@ public ElasticsearchIlmClient withTransportOptions(@Nullable TransportOptions tr // ----- Endpoint: ilm.delete_lifecycle /** - * Deletes the specified lifecycle policy definition. You cannot delete policies - * that are currently in use. If the policy is being used to manage any indices, - * the request fails and returns an error. + * Delete a lifecycle policy. You cannot delete policies that are currently in + * use. If the policy is being used to manage any indices, the request fails and + * returns an error. * * @see Documentation @@ -86,9 +86,9 @@ public DeleteLifecycleResponse deleteLifecycle(DeleteLifecycleRequest request) } /** - * Deletes the specified lifecycle policy definition. You cannot delete policies - * that are currently in use. If the policy is being used to manage any indices, - * the request fails and returns an error. + * Delete a lifecycle policy. You cannot delete policies that are currently in + * use. If the policy is being used to manage any indices, the request fails and + * returns an error. * * @param fn * a function that initializes a builder to create the @@ -107,10 +107,12 @@ public final DeleteLifecycleResponse deleteLifecycle( // ----- Endpoint: ilm.explain_lifecycle /** - * Retrieves information about the index’s current lifecycle state, such as the - * currently executing phase, action, and step. Shows when the index entered - * each one, the definition of the running phase, and information about any - * failures. + * Explain the lifecycle state. Get the current lifecycle status for one or more + * indices. For data streams, the API retrieves the current lifecycle status for + * the stream's backing indices. + *

+ * The response indicates when the index entered each lifecycle state, provides + * the definition of the running phase, and information about any failures. * * @see Documentation @@ -126,10 +128,12 @@ public ExplainLifecycleResponse explainLifecycle(ExplainLifecycleRequest request } /** - * Retrieves information about the index’s current lifecycle state, such as the - * currently executing phase, action, and step. Shows when the index entered - * each one, the definition of the running phase, and information about any - * failures. + * Explain the lifecycle state. Get the current lifecycle status for one or more + * indices. For data streams, the API retrieves the current lifecycle status for + * the stream's backing indices. + *

+ * The response indicates when the index entered each lifecycle state, provides + * the definition of the running phase, and information about any failures. * * @param fn * a function that initializes a builder to create the @@ -148,7 +152,7 @@ public final ExplainLifecycleResponse explainLifecycle( // ----- Endpoint: ilm.get_lifecycle /** - * Retrieves a lifecycle policy. + * Get lifecycle policies. * * @see Documentation @@ -163,7 +167,7 @@ public GetLifecycleResponse getLifecycle(GetLifecycleRequest request) throws IOE } /** - * Retrieves a lifecycle policy. + * Get lifecycle policies. * * @param fn * a function that initializes a builder to create the @@ -180,7 +184,7 @@ public final GetLifecycleResponse getLifecycle( } /** - * Retrieves a lifecycle policy. + * Get lifecycle policies. * * @see Documentation @@ -195,7 +199,7 @@ public GetLifecycleResponse getLifecycle() throws IOException, ElasticsearchExce // ----- Endpoint: ilm.get_status /** - * Retrieves the current index lifecycle management (ILM) status. + * Get the ILM status. Get the current index lifecycle management status. * * @see Documentation @@ -209,11 +213,25 @@ public GetIlmStatusResponse getStatus() throws IOException, ElasticsearchExcepti // ----- Endpoint: ilm.migrate_to_data_tiers /** - * Switches the indices, ILM policies, and legacy, composable and component - * templates from using custom node attributes and attribute-based allocation - * filters to using data tiers, and optionally deletes one legacy index - * template.+ Using node roles enables ILM to automatically move the indices - * between data tiers. + * Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, + * composable, and component templates from using custom node attributes and + * attribute-based allocation filters to using data tiers. Optionally, delete + * one legacy index template. Using node roles enables ILM to automatically move + * the indices between data tiers. + *

+ * Migrating away from custom node attributes routing can be manually performed. + * This API provides an automated way of performing three out of the four manual + * steps listed in the migration guide: + *

    + *
  1. Stop setting the custom hot attribute on new indices.
  2. + *
  3. Remove custom allocation settings from existing ILM policies.
  4. + *
  5. Replace custom allocation settings from existing indices with the + * corresponding tier preference.
  6. + *
+ *

+ * ILM must be stopped before performing the migration. Use the stop ILM and get + * ILM status APIs to wait until the reported operation mode is + * STOPPED. * * @see Documentation @@ -229,11 +247,25 @@ public MigrateToDataTiersResponse migrateToDataTiers(MigrateToDataTiersRequest r } /** - * Switches the indices, ILM policies, and legacy, composable and component - * templates from using custom node attributes and attribute-based allocation - * filters to using data tiers, and optionally deletes one legacy index - * template.+ Using node roles enables ILM to automatically move the indices - * between data tiers. + * Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, + * composable, and component templates from using custom node attributes and + * attribute-based allocation filters to using data tiers. Optionally, delete + * one legacy index template. Using node roles enables ILM to automatically move + * the indices between data tiers. + *

+ * Migrating away from custom node attributes routing can be manually performed. + * This API provides an automated way of performing three out of the four manual + * steps listed in the migration guide: + *

    + *
  1. Stop setting the custom hot attribute on new indices.
  2. + *
  3. Remove custom allocation settings from existing ILM policies.
  4. + *
  5. Replace custom allocation settings from existing indices with the + * corresponding tier preference.
  6. + *
+ *

+ * ILM must be stopped before performing the migration. Use the stop ILM and get + * ILM status APIs to wait until the reported operation mode is + * STOPPED. * * @param fn * a function that initializes a builder to create the @@ -250,11 +282,25 @@ public final MigrateToDataTiersResponse migrateToDataTiers( } /** - * Switches the indices, ILM policies, and legacy, composable and component - * templates from using custom node attributes and attribute-based allocation - * filters to using data tiers, and optionally deletes one legacy index - * template.+ Using node roles enables ILM to automatically move the indices - * between data tiers. + * Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, + * composable, and component templates from using custom node attributes and + * attribute-based allocation filters to using data tiers. Optionally, delete + * one legacy index template. Using node roles enables ILM to automatically move + * the indices between data tiers. + *

+ * Migrating away from custom node attributes routing can be manually performed. + * This API provides an automated way of performing three out of the four manual + * steps listed in the migration guide: + *

    + *
  1. Stop setting the custom hot attribute on new indices.
  2. + *
  3. Remove custom allocation settings from existing ILM policies.
  4. + *
  5. Replace custom allocation settings from existing indices with the + * corresponding tier preference.
  6. + *
+ *

+ * ILM must be stopped before performing the migration. Use the stop ILM and get + * ILM status APIs to wait until the reported operation mode is + * STOPPED. * * @see Documentation @@ -269,7 +315,26 @@ public MigrateToDataTiersResponse migrateToDataTiers() throws IOException, Elast // ----- Endpoint: ilm.move_to_step /** - * Manually moves an index into the specified step and executes that step. + * Move to a lifecycle step. Manually move an index into a specific step in the + * lifecycle policy and run that step. + *

+ * WARNING: This operation can result in the loss of data. Manually moving an + * index into a specific step runs that step even if it has already been + * performed. This is a potentially destructive action and this should be + * considered an expert level API. + *

+ * You must specify both the current step and the step to be executed in the + * body of the request. The request will fail if the current step does not match + * the step currently running for the index This is to prevent the index from + * being moved from an unexpected step into the next step. + *

+ * When specifying the target (next_step) to which the index will + * be moved, either the name or both the action and name fields are optional. If + * only the phase is specified, the index will move to the first step of the + * first action in the target phase. If the phase and action are specified, the + * index will move to the first step of the specified action in the specified + * phase. Only actions specified in the ILM policy are considered valid. An + * index cannot move to a step that is not part of its policy. * * @see Documentation @@ -284,7 +349,26 @@ public MoveToStepResponse moveToStep(MoveToStepRequest request) throws IOExcepti } /** - * Manually moves an index into the specified step and executes that step. + * Move to a lifecycle step. Manually move an index into a specific step in the + * lifecycle policy and run that step. + *

+ * WARNING: This operation can result in the loss of data. Manually moving an + * index into a specific step runs that step even if it has already been + * performed. This is a potentially destructive action and this should be + * considered an expert level API. + *

+ * You must specify both the current step and the step to be executed in the + * body of the request. The request will fail if the current step does not match + * the step currently running for the index This is to prevent the index from + * being moved from an unexpected step into the next step. + *

+ * When specifying the target (next_step) to which the index will + * be moved, either the name or both the action and name fields are optional. If + * only the phase is specified, the index will move to the first step of the + * first action in the target phase. If the phase and action are specified, the + * index will move to the first step of the specified action in the specified + * phase. Only actions specified in the ILM policy are considered valid. An + * index cannot move to a step that is not part of its policy. * * @param fn * a function that initializes a builder to create the @@ -302,8 +386,11 @@ public final MoveToStepResponse moveToStep(Function + * NOTE: Only the latest version of the policy is stored, you cannot revert to + * previous versions. * * @see Documentation @@ -318,8 +405,11 @@ public PutLifecycleResponse putLifecycle(PutLifecycleRequest request) throws IOE } /** - * Creates a lifecycle policy. If the specified policy exists, the policy is + * Create or update a lifecycle policy. If the specified policy exists, it is * replaced and the policy version is incremented. + *

+ * NOTE: Only the latest version of the policy is stored, you cannot revert to + * previous versions. * * @param fn * a function that initializes a builder to create the @@ -338,7 +428,8 @@ public final PutLifecycleResponse putLifecycle( // ----- Endpoint: ilm.remove_policy /** - * Removes the assigned lifecycle policy and stops managing the specified index + * Remove policies from an index. Remove the assigned lifecycle policies from an + * index or a data stream's backing indices. It also stops managing the indices. * * @see Documentation @@ -353,7 +444,8 @@ public RemovePolicyResponse removePolicy(RemovePolicyRequest request) throws IOE } /** - * Removes the assigned lifecycle policy and stops managing the specified index + * Remove policies from an index. Remove the assigned lifecycle policies from an + * index or a data stream's backing indices. It also stops managing the indices. * * @param fn * a function that initializes a builder to create the @@ -372,7 +464,10 @@ public final RemovePolicyResponse removePolicy( // ----- Endpoint: ilm.retry /** - * Retries executing the policy for an index that is in the ERROR step. + * Retry a policy. Retry running the lifecycle policy for an index that is in + * the ERROR step. The API sets the policy back to the step where the error + * occurred and runs the step. Use the explain lifecycle state API to determine + * whether an index is in the ERROR step. * * @see Documentation @@ -387,7 +482,10 @@ public RetryResponse retry(RetryRequest request) throws IOException, Elasticsear } /** - * Retries executing the policy for an index that is in the ERROR step. + * Retry a policy. Retry running the lifecycle policy for an index that is in + * the ERROR step. The API sets the policy back to the step where the error + * occurred and runs the step. Use the explain lifecycle state API to determine + * whether an index is in the ERROR step. * * @param fn * a function that initializes a builder to create the @@ -405,7 +503,10 @@ public final RetryResponse retry(FunctionDocumentation @@ -420,7 +521,10 @@ public StartIlmResponse start(StartIlmRequest request) throws IOException, Elast } /** - * Start the index lifecycle management (ILM) plugin. + * Start the ILM plugin. Start the index lifecycle management plugin if it is + * currently stopped. ILM is started automatically when the cluster is formed. + * Restarting ILM is necessary only when it has been stopped using the stop ILM + * API. * * @param fn * a function that initializes a builder to create the @@ -436,7 +540,10 @@ public final StartIlmResponse start(FunctionDocumentation @@ -451,8 +558,15 @@ public StartIlmResponse start() throws IOException, ElasticsearchException { // ----- Endpoint: ilm.stop /** - * Halts all lifecycle management operations and stops the index lifecycle - * management (ILM) plugin + * Stop the ILM plugin. Halt all lifecycle management operations and stop the + * index lifecycle management plugin. This is useful when you are performing + * maintenance on the cluster and need to prevent ILM from performing any + * actions on your indices. + *

+ * The API returns as soon as the stop request has been acknowledged, but the + * plugin might continue to run until in-progress operations complete and the + * plugin can be safely stopped. Use the get ILM status API to check whether ILM + * is running. * * @see Documentation @@ -467,8 +581,15 @@ public StopIlmResponse stop(StopIlmRequest request) throws IOException, Elastics } /** - * Halts all lifecycle management operations and stops the index lifecycle - * management (ILM) plugin + * Stop the ILM plugin. Halt all lifecycle management operations and stop the + * index lifecycle management plugin. This is useful when you are performing + * maintenance on the cluster and need to prevent ILM from performing any + * actions on your indices. + *

+ * The API returns as soon as the stop request has been acknowledged, but the + * plugin might continue to run until in-progress operations complete and the + * plugin can be safely stopped. Use the get ILM status API to check whether ILM + * is running. * * @param fn * a function that initializes a builder to create the @@ -484,8 +605,15 @@ public final StopIlmResponse stop(Function + * The API returns as soon as the stop request has been acknowledged, but the + * plugin might continue to run until in-progress operations complete and the + * plugin can be safely stopped. Use the get ILM status API to check whether ILM + * is running. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/ExplainLifecycleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/ExplainLifecycleRequest.java index 1f0884e87..00cc32243 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/ExplainLifecycleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/ExplainLifecycleRequest.java @@ -57,10 +57,12 @@ // typedef: ilm.explain_lifecycle.Request /** - * Retrieves information about the index’s current lifecycle state, such as the - * currently executing phase, action, and step. Shows when the index entered - * each one, the definition of the running phase, and information about any - * failures. + * Explain the lifecycle state. Get the current lifecycle status for one or more + * indices. For data streams, the API retrieves the current lifecycle status for + * the stream's backing indices. + *

+ * The response indicates when the index entered each lifecycle state, provides + * the definition of the running phase, and information about any failures. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/GetIlmStatusRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/GetIlmStatusRequest.java index 656d6d1e7..b3640093b 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/GetIlmStatusRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/GetIlmStatusRequest.java @@ -50,7 +50,7 @@ // typedef: ilm.get_status.Request /** - * Retrieves the current index lifecycle management (ILM) status. + * Get the ILM status. Get the current index lifecycle management status. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/GetLifecycleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/GetLifecycleRequest.java index 7affc2ee2..2ddc221c1 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/GetLifecycleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/GetLifecycleRequest.java @@ -55,7 +55,7 @@ // typedef: ilm.get_lifecycle.Request /** - * Retrieves a lifecycle policy. + * Get lifecycle policies. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/MigrateToDataTiersRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/MigrateToDataTiersRequest.java index 9dc3bdf94..1ed5a4431 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/MigrateToDataTiersRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/MigrateToDataTiersRequest.java @@ -58,11 +58,25 @@ // typedef: ilm.migrate_to_data_tiers.Request /** - * Switches the indices, ILM policies, and legacy, composable and component - * templates from using custom node attributes and attribute-based allocation - * filters to using data tiers, and optionally deletes one legacy index - * template.+ Using node roles enables ILM to automatically move the indices - * between data tiers. + * Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, + * composable, and component templates from using custom node attributes and + * attribute-based allocation filters to using data tiers. Optionally, delete + * one legacy index template. Using node roles enables ILM to automatically move + * the indices between data tiers. + *

+ * Migrating away from custom node attributes routing can be manually performed. + * This API provides an automated way of performing three out of the four manual + * steps listed in the migration guide: + *

    + *
  1. Stop setting the custom hot attribute on new indices.
  2. + *
  3. Remove custom allocation settings from existing ILM policies.
  4. + *
  5. Replace custom allocation settings from existing indices with the + * corresponding tier preference.
  6. + *
+ *

+ * ILM must be stopped before performing the migration. Use the stop ILM and get + * ILM status APIs to wait until the reported operation mode is + * STOPPED. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/MoveToStepRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/MoveToStepRequest.java index 2691be8ab..8dafaf75c 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/MoveToStepRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/MoveToStepRequest.java @@ -59,7 +59,26 @@ // typedef: ilm.move_to_step.Request /** - * Manually moves an index into the specified step and executes that step. + * Move to a lifecycle step. Manually move an index into a specific step in the + * lifecycle policy and run that step. + *

+ * WARNING: This operation can result in the loss of data. Manually moving an + * index into a specific step runs that step even if it has already been + * performed. This is a potentially destructive action and this should be + * considered an expert level API. + *

+ * You must specify both the current step and the step to be executed in the + * body of the request. The request will fail if the current step does not match + * the step currently running for the index This is to prevent the index from + * being moved from an unexpected step into the next step. + *

+ * When specifying the target (next_step) to which the index will + * be moved, either the name or both the action and name fields are optional. If + * only the phase is specified, the index will move to the first step of the + * first action in the target phase. If the phase and action are specified, the + * index will move to the first step of the specified action in the specified + * phase. Only actions specified in the ILM policy are considered valid. An + * index cannot move to a step that is not part of its policy. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/PutLifecycleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/PutLifecycleRequest.java index 5766c5f74..b9e4a2837 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/PutLifecycleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/PutLifecycleRequest.java @@ -58,8 +58,11 @@ // typedef: ilm.put_lifecycle.Request /** - * Creates a lifecycle policy. If the specified policy exists, the policy is + * Create or update a lifecycle policy. If the specified policy exists, it is * replaced and the policy version is incremented. + *

+ * NOTE: Only the latest version of the policy is stored, you cannot revert to + * previous versions. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/RemovePolicyRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/RemovePolicyRequest.java index 317d01f54..6b102a157 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/RemovePolicyRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/RemovePolicyRequest.java @@ -56,7 +56,8 @@ // typedef: ilm.remove_policy.Request /** - * Removes the assigned lifecycle policy and stops managing the specified index + * Remove policies from an index. Remove the assigned lifecycle policies from an + * index or a data stream's backing indices. It also stops managing the indices. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/RetryRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/RetryRequest.java index 0c261f62a..37213aa6b 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/RetryRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/RetryRequest.java @@ -56,7 +56,10 @@ // typedef: ilm.retry.Request /** - * Retries executing the policy for an index that is in the ERROR step. + * Retry a policy. Retry running the lifecycle policy for an index that is in + * the ERROR step. The API sets the policy back to the step where the error + * occurred and runs the step. Use the explain lifecycle state API to determine + * whether an index is in the ERROR step. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/StartIlmRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/StartIlmRequest.java index 69488af3e..78af2f967 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/StartIlmRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/StartIlmRequest.java @@ -55,7 +55,10 @@ // typedef: ilm.start.Request /** - * Start the index lifecycle management (ILM) plugin. + * Start the ILM plugin. Start the index lifecycle management plugin if it is + * currently stopped. ILM is started automatically when the cluster is formed. + * Restarting ILM is necessary only when it has been stopped using the stop ILM + * API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/StopIlmRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/StopIlmRequest.java index 91dbc8e13..7f3ebe90d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/StopIlmRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ilm/StopIlmRequest.java @@ -55,8 +55,15 @@ // typedef: ilm.stop.Request /** - * Halts all lifecycle management operations and stops the index lifecycle - * management (ILM) plugin + * Stop the ILM plugin. Halt all lifecycle management operations and stop the + * index lifecycle management plugin. This is useful when you are performing + * maintenance on the cluster and need to prevent ILM from performing any + * actions on your indices. + *

+ * The API returns as soon as the stop request has been acknowledged, but the + * plugin might continue to run until in-progress operations complete and the + * plugin can be safely stopped. Use the get ILM status API to check whether ILM + * is running. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ClearCacheRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ClearCacheRequest.java index 86e774c2b..589c3e312 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ClearCacheRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ClearCacheRequest.java @@ -59,8 +59,8 @@ // typedef: indices.clear_cache.Request /** - * Clears the caches of one or more indices. For data streams, the API clears - * the caches of the stream’s backing indices. + * Clear the cache. Clear the cache of one or more indices. For data streams, + * the API clears the caches of the stream's backing indices. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CloneIndexRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CloneIndexRequest.java index 78c58675a..9e25c3ccb 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CloneIndexRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CloneIndexRequest.java @@ -60,8 +60,42 @@ // typedef: indices.clone.Request /** - * Clones an existing index. - * + * Clone an index. Clone an existing index into a new index. Each original + * primary shard is cloned into a new primary shard in the new index. + *

+ * IMPORTANT: Elasticsearch does not apply index templates to the resulting + * index. The API also does not copy index metadata from the original index. + * Index metadata includes aliases, index lifecycle management phase + * definitions, and cross-cluster replication (CCR) follower information. For + * example, if you clone a CCR follower index, the resulting clone will not be a + * follower index. + *

+ * The clone API copies most index settings from the source index to the + * resulting index, with the exception of index.number_of_replicas + * and index.auto_expand_replicas. To set the number of replicas in + * the resulting index, configure these settings in the clone request. + *

+ * Cloning works as follows: + *

    + *
  • First, it creates a new target index with the same definition as the + * source index.
  • + *
  • Then it hard-links segments from the source index into the target index. + * If the file system does not support hard-linking, all segments are copied + * into the new index, which is a much more time consuming process.
  • + *
  • Finally, it recovers the target index as though it were a closed index + * which had just been re-opened.
  • + *
+ *

+ * IMPORTANT: Indices can only be cloned if they meet the following + * requirements: + *

    + *
  • The target index must not exist.
  • + *
  • The source index must have the same number of primary shards as the + * target index.
  • + *
  • The node handling the clone process must have sufficient free disk space + * to accommodate a second copy of the existing index.
  • + *
+ * * @see API * specification */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CloseIndexRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CloseIndexRequest.java index cf7fc3538..ae7f21fc6 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CloseIndexRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/CloseIndexRequest.java @@ -61,7 +61,33 @@ // typedef: indices.close.Request /** - * Closes an index. + * Close an index. A closed index is blocked for read or write operations and + * does not allow all operations that opened indices allow. It is not possible + * to index documents or to search for documents in a closed index. Closed + * indices do not have to maintain internal data structures for indexing or + * searching documents, which results in a smaller overhead on the cluster. + *

+ * When opening or closing an index, the master node is responsible for + * restarting the index shards to reflect the new state of the index. The shards + * will then go through the normal recovery process. The data of opened and + * closed indices is automatically replicated by the cluster to ensure that + * enough shard copies are safely kept around at all times. + *

+ * You can open and close multiple indices. An error is thrown if the request + * explicitly refers to a missing index. This behaviour can be turned off using + * the ignore_unavailable=true parameter. + *

+ * By default, you must explicitly name the indices you are opening or closing. + * To open or close indices with _all, *, or other + * wildcard expressions, change + * the action.destructive_requires_name setting to + * false. This setting can also be changed with the cluster update + * settings API. + *

+ * Closed indices consume a significant amount of disk-space which can cause + * problems in managed environments. Closing indices can be turned off with the + * cluster settings API by setting cluster.indices.close.enable to + * false. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DiskUsageRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DiskUsageRequest.java index eebe6a7ed..3c026fcfa 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DiskUsageRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DiskUsageRequest.java @@ -59,7 +59,10 @@ // typedef: indices.disk_usage.Request /** - * Analyzes the disk usage of each field of an index or data stream. + * Analyze the index disk usage. Analyze the disk usage of each field of an + * index or data stream. This API might not support indices created in previous + * Elasticsearch versions. The result of a small index can be inaccurate as some + * parts of an index might not be analyzed by the API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DownsampleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DownsampleRequest.java index 3434cb100..09d6a128d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DownsampleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/DownsampleRequest.java @@ -59,10 +59,17 @@ // typedef: indices.downsample.Request /** - * Aggregates a time series (TSDS) index and stores pre-computed statistical - * summaries (min, max, sum, - * value_count and avg) for each metric field grouped - * by a configured time interval. + * Downsample an index. Aggregate a time series (TSDS) index and store + * pre-computed statistical summaries (min, max, + * sum, value_count and avg) for each + * metric field grouped by a configured time interval. For example, a TSDS index + * that contains metrics sampled every 10 seconds can be downsampled to an + * hourly index. All documents within an hour interval are summarized and stored + * as a single document in the downsample index. + *

+ * NOTE: Only indices in a time series data stream are supported. Neither field + * nor document level security can be defined on the source index. The source + * index must be read only (index.blocks.write: true). * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesAsyncClient.java index b651cf821..29577b4e1 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesAsyncClient.java @@ -158,8 +158,8 @@ public CompletableFuture analyze() { // ----- Endpoint: indices.clear_cache /** - * Clears the caches of one or more indices. For data streams, the API clears - * the caches of the stream’s backing indices. + * Clear the cache. Clear the cache of one or more indices. For data streams, + * the API clears the caches of the stream's backing indices. * * @see Documentation @@ -174,8 +174,8 @@ public CompletableFuture clearCache(ClearCacheRequest reques } /** - * Clears the caches of one or more indices. For data streams, the API clears - * the caches of the stream’s backing indices. + * Clear the cache. Clear the cache of one or more indices. For data streams, + * the API clears the caches of the stream's backing indices. * * @param fn * a function that initializes a builder to create the @@ -191,8 +191,8 @@ public final CompletableFuture clearCache( } /** - * Clears the caches of one or more indices. For data streams, the API clears - * the caches of the stream’s backing indices. + * Clear the cache. Clear the cache of one or more indices. For data streams, + * the API clears the caches of the stream's backing indices. * * @see Documentation @@ -207,8 +207,42 @@ public CompletableFuture clearCache() { // ----- Endpoint: indices.clone /** - * Clones an existing index. - * + * Clone an index. Clone an existing index into a new index. Each original + * primary shard is cloned into a new primary shard in the new index. + *

+ * IMPORTANT: Elasticsearch does not apply index templates to the resulting + * index. The API also does not copy index metadata from the original index. + * Index metadata includes aliases, index lifecycle management phase + * definitions, and cross-cluster replication (CCR) follower information. For + * example, if you clone a CCR follower index, the resulting clone will not be a + * follower index. + *

+ * The clone API copies most index settings from the source index to the + * resulting index, with the exception of index.number_of_replicas + * and index.auto_expand_replicas. To set the number of replicas in + * the resulting index, configure these settings in the clone request. + *

+ * Cloning works as follows: + *

+ *

+ * IMPORTANT: Indices can only be cloned if they meet the following + * requirements: + *

    + *
  • The target index must not exist.
  • + *
  • The source index must have the same number of primary shards as the + * target index.
  • + *
  • The node handling the clone process must have sufficient free disk space + * to accommodate a second copy of the existing index.
  • + *
+ * * @see
Documentation * on elastic.co @@ -222,8 +256,42 @@ public CompletableFuture clone(CloneIndexRequest request) { } /** - * Clones an existing index. - * + * Clone an index. Clone an existing index into a new index. Each original + * primary shard is cloned into a new primary shard in the new index. + *

+ * IMPORTANT: Elasticsearch does not apply index templates to the resulting + * index. The API also does not copy index metadata from the original index. + * Index metadata includes aliases, index lifecycle management phase + * definitions, and cross-cluster replication (CCR) follower information. For + * example, if you clone a CCR follower index, the resulting clone will not be a + * follower index. + *

+ * The clone API copies most index settings from the source index to the + * resulting index, with the exception of index.number_of_replicas + * and index.auto_expand_replicas. To set the number of replicas in + * the resulting index, configure these settings in the clone request. + *

+ * Cloning works as follows: + *

    + *
  • First, it creates a new target index with the same definition as the + * source index.
  • + *
  • Then it hard-links segments from the source index into the target index. + * If the file system does not support hard-linking, all segments are copied + * into the new index, which is a much more time consuming process.
  • + *
  • Finally, it recovers the target index as though it were a closed index + * which had just been re-opened.
  • + *
+ *

+ * IMPORTANT: Indices can only be cloned if they meet the following + * requirements: + *

    + *
  • The target index must not exist.
  • + *
  • The source index must have the same number of primary shards as the + * target index.
  • + *
  • The node handling the clone process must have sufficient free disk space + * to accommodate a second copy of the existing index.
  • + *
+ * * @param fn * a function that initializes a builder to create the * {@link CloneIndexRequest} @@ -240,7 +308,33 @@ public final CompletableFuture clone( // ----- Endpoint: indices.close /** - * Closes an index. + * Close an index. A closed index is blocked for read or write operations and + * does not allow all operations that opened indices allow. It is not possible + * to index documents or to search for documents in a closed index. Closed + * indices do not have to maintain internal data structures for indexing or + * searching documents, which results in a smaller overhead on the cluster. + *

+ * When opening or closing an index, the master node is responsible for + * restarting the index shards to reflect the new state of the index. The shards + * will then go through the normal recovery process. The data of opened and + * closed indices is automatically replicated by the cluster to ensure that + * enough shard copies are safely kept around at all times. + *

+ * You can open and close multiple indices. An error is thrown if the request + * explicitly refers to a missing index. This behaviour can be turned off using + * the ignore_unavailable=true parameter. + *

+ * By default, you must explicitly name the indices you are opening or closing. + * To open or close indices with _all, *, or other + * wildcard expressions, change + * the action.destructive_requires_name setting to + * false. This setting can also be changed with the cluster update + * settings API. + *

+ * Closed indices consume a significant amount of disk-space which can cause + * problems in managed environments. Closing indices can be turned off with the + * cluster settings API by setting cluster.indices.close.enable to + * false. * * @see Documentation @@ -255,7 +349,33 @@ public CompletableFuture close(CloseIndexRequest request) { } /** - * Closes an index. + * Close an index. A closed index is blocked for read or write operations and + * does not allow all operations that opened indices allow. It is not possible + * to index documents or to search for documents in a closed index. Closed + * indices do not have to maintain internal data structures for indexing or + * searching documents, which results in a smaller overhead on the cluster. + *

+ * When opening or closing an index, the master node is responsible for + * restarting the index shards to reflect the new state of the index. The shards + * will then go through the normal recovery process. The data of opened and + * closed indices is automatically replicated by the cluster to ensure that + * enough shard copies are safely kept around at all times. + *

+ * You can open and close multiple indices. An error is thrown if the request + * explicitly refers to a missing index. This behaviour can be turned off using + * the ignore_unavailable=true parameter. + *

+ * By default, you must explicitly name the indices you are opening or closing. + * To open or close indices with _all, *, or other + * wildcard expressions, change + * the action.destructive_requires_name setting to + * false. This setting can also be changed with the cluster update + * settings API. + *

+ * Closed indices consume a significant amount of disk-space which can cause + * problems in managed environments. Closing indices can be turned off with the + * cluster settings API by setting cluster.indices.close.enable to + * false. * * @param fn * a function that initializes a builder to create the @@ -595,7 +715,10 @@ public final CompletableFuture deleteTemplate( // ----- Endpoint: indices.disk_usage /** - * Analyzes the disk usage of each field of an index or data stream. + * Analyze the index disk usage. Analyze the disk usage of each field of an + * index or data stream. This API might not support indices created in previous + * Elasticsearch versions. The result of a small index can be inaccurate as some + * parts of an index might not be analyzed by the API. * * @see Documentation @@ -610,7 +733,10 @@ public CompletableFuture diskUsage(DiskUsageRequest request) } /** - * Analyzes the disk usage of each field of an index or data stream. + * Analyze the index disk usage. Analyze the disk usage of each field of an + * index or data stream. This API might not support indices created in previous + * Elasticsearch versions. The result of a small index can be inaccurate as some + * parts of an index might not be analyzed by the API. * * @param fn * a function that initializes a builder to create the @@ -628,10 +754,17 @@ public final CompletableFuture diskUsage( // ----- Endpoint: indices.downsample /** - * Aggregates a time series (TSDS) index and stores pre-computed statistical - * summaries (min, max, sum, - * value_count and avg) for each metric field grouped - * by a configured time interval. + * Downsample an index. Aggregate a time series (TSDS) index and store + * pre-computed statistical summaries (min, max, + * sum, value_count and avg) for each + * metric field grouped by a configured time interval. For example, a TSDS index + * that contains metrics sampled every 10 seconds can be downsampled to an + * hourly index. All documents within an hour interval are summarized and stored + * as a single document in the downsample index. + *

+ * NOTE: Only indices in a time series data stream are supported. Neither field + * nor document level security can be defined on the source index. The source + * index must be read only (index.blocks.write: true). * * @see Documentation @@ -646,10 +779,17 @@ public CompletableFuture downsample(DownsampleRequest reques } /** - * Aggregates a time series (TSDS) index and stores pre-computed statistical - * summaries (min, max, sum, - * value_count and avg) for each metric field grouped - * by a configured time interval. + * Downsample an index. Aggregate a time series (TSDS) index and store + * pre-computed statistical summaries (min, max, + * sum, value_count and avg) for each + * metric field grouped by a configured time interval. For example, a TSDS index + * that contains metrics sampled every 10 seconds can be downsampled to an + * hourly index. All documents within an hour interval are summarized and stored + * as a single document in the downsample index. + *

+ * NOTE: Only indices in a time series data stream are supported. Neither field + * nor document level security can be defined on the source index. The source + * index must be read only (index.blocks.write: true). * * @param fn * a function that initializes a builder to create the @@ -803,10 +943,10 @@ public final CompletableFuture existsTemplate( // ----- Endpoint: indices.explain_data_lifecycle /** - * Get the status for a data stream lifecycle. Retrieves information about an - * index or data stream’s current data stream lifecycle status, such as time - * since index creation, time since rollover, the lifecycle configuration - * managing the index, or any errors encountered during lifecycle execution. + * Get the status for a data stream lifecycle. Get information about an index or + * data stream's current data stream lifecycle status, such as time since index + * creation, time since rollover, the lifecycle configuration managing the + * index, or any errors encountered during lifecycle execution. * * @see Documentation @@ -821,10 +961,10 @@ public CompletableFuture explainDataLifecycle(Expl } /** - * Get the status for a data stream lifecycle. Retrieves information about an - * index or data stream’s current data stream lifecycle status, such as time - * since index creation, time since rollover, the lifecycle configuration - * managing the index, or any errors encountered during lifecycle execution. + * Get the status for a data stream lifecycle. Get information about an index or + * data stream's current data stream lifecycle status, such as time since index + * creation, time since rollover, the lifecycle configuration managing the + * index, or any errors encountered during lifecycle execution. * * @param fn * a function that initializes a builder to create the @@ -842,7 +982,11 @@ public final CompletableFuture explainDataLifecycl // ----- Endpoint: indices.field_usage_stats /** - * Returns field usage information for each shard and field of an index. + * Get field usage stats. Get field usage information for each shard and field + * of an index. Field usage statistics are automatically captured when queries + * are running on a cluster. A shard-level search request that accesses a given + * field, even if multiple times during that request, is counted as a single + * use. * * @see Documentation @@ -857,7 +1001,11 @@ public CompletableFuture fieldUsageStats(FieldUsageStat } /** - * Returns field usage information for each shard and field of an index. + * Get field usage stats. Get field usage information for each shard and field + * of an index. Field usage statistics are automatically captured when queries + * are running on a cluster. A shard-level search request that accesses a given + * field, even if multiple times during that request, is counted as a single + * use. * * @param fn * a function that initializes a builder to create the @@ -875,7 +1023,26 @@ public final CompletableFuture fieldUsageStats( // ----- Endpoint: indices.flush /** - * Flushes one or more data streams or indices. + * Flush data streams or indices. Flushing a data stream or index is the process + * of making sure that any data that is currently only stored in the transaction + * log is also permanently stored in the Lucene index. When restarting, + * Elasticsearch replays any unflushed operations from the transaction log into + * the Lucene index to bring it back into the state that it was in before the + * restart. Elasticsearch automatically triggers flushes as needed, using + * heuristics that trade off the size of the unflushed transaction log against + * the cost of performing each flush. + *

+ * After each operation has been flushed it is permanently stored in the Lucene + * index. This may mean that there is no need to maintain an additional copy of + * it in the transaction log. The transaction log is made up of multiple files, + * called generations, and Elasticsearch will delete any generation files when + * they are no longer needed, freeing up disk space. + *

+ * It is also possible to trigger a flush on one or more indices using the flush + * API, although it is rare for users to need to call this API directly. If you + * call the flush API after indexing some documents then a successful response + * indicates that Elasticsearch has flushed all the documents that were indexed + * before the flush API was called. * * @see Documentation @@ -890,7 +1057,26 @@ public CompletableFuture flush(FlushRequest request) { } /** - * Flushes one or more data streams or indices. + * Flush data streams or indices. Flushing a data stream or index is the process + * of making sure that any data that is currently only stored in the transaction + * log is also permanently stored in the Lucene index. When restarting, + * Elasticsearch replays any unflushed operations from the transaction log into + * the Lucene index to bring it back into the state that it was in before the + * restart. Elasticsearch automatically triggers flushes as needed, using + * heuristics that trade off the size of the unflushed transaction log against + * the cost of performing each flush. + *

+ * After each operation has been flushed it is permanently stored in the Lucene + * index. This may mean that there is no need to maintain an additional copy of + * it in the transaction log. The transaction log is made up of multiple files, + * called generations, and Elasticsearch will delete any generation files when + * they are no longer needed, freeing up disk space. + *

+ * It is also possible to trigger a flush on one or more indices using the flush + * API, although it is rare for users to need to call this API directly. If you + * call the flush API after indexing some documents then a successful response + * indicates that Elasticsearch has flushed all the documents that were indexed + * before the flush API was called. * * @param fn * a function that initializes a builder to create the @@ -906,7 +1092,26 @@ public final CompletableFuture flush( } /** - * Flushes one or more data streams or indices. + * Flush data streams or indices. Flushing a data stream or index is the process + * of making sure that any data that is currently only stored in the transaction + * log is also permanently stored in the Lucene index. When restarting, + * Elasticsearch replays any unflushed operations from the transaction log into + * the Lucene index to bring it back into the state that it was in before the + * restart. Elasticsearch automatically triggers flushes as needed, using + * heuristics that trade off the size of the unflushed transaction log against + * the cost of performing each flush. + *

+ * After each operation has been flushed it is permanently stored in the Lucene + * index. This may mean that there is no need to maintain an additional copy of + * it in the transaction log. The transaction log is made up of multiple files, + * called generations, and Elasticsearch will delete any generation files when + * they are no longer needed, freeing up disk space. + *

+ * It is also possible to trigger a flush on one or more indices using the flush + * API, although it is rare for users to need to call this API directly. If you + * call the flush API after indexing some documents then a successful response + * indicates that Elasticsearch has flushed all the documents that were indexed + * before the flush API was called. * * @see Documentation @@ -921,7 +1126,25 @@ public CompletableFuture flush() { // ----- Endpoint: indices.forcemerge /** - * Performs the force merge operation on one or more indices. + * Force a merge. Perform the force merge operation on the shards of one or more + * indices. For data streams, the API forces a merge on the shards of the + * stream's backing indices. + *

+ * Merging reduces the number of segments in each shard by merging some of them + * together and also frees up the space used by deleted documents. Merging + * normally happens automatically, but sometimes it is useful to trigger a merge + * manually. + *

+ * WARNING: We recommend force merging only a read-only index (meaning the index + * is no longer receiving writes). When documents are updated or deleted, the + * old version is not immediately removed but instead soft-deleted and marked + * with a "tombstone". These soft-deleted documents are automatically + * cleaned up during regular segment merges. But force merge can cause very + * large (greater than 5 GB) segments to be produced, which are not eligible for + * regular merges. So the number of soft-deleted documents can then grow + * rapidly, resulting in higher disk usage and worse search performance. If you + * regularly force merge an index receiving writes, this can also make snapshots + * more expensive, since the new documents can't be backed up incrementally. * * @see Documentation @@ -936,7 +1159,25 @@ public CompletableFuture forcemerge(ForcemergeRequest reques } /** - * Performs the force merge operation on one or more indices. + * Force a merge. Perform the force merge operation on the shards of one or more + * indices. For data streams, the API forces a merge on the shards of the + * stream's backing indices. + *

+ * Merging reduces the number of segments in each shard by merging some of them + * together and also frees up the space used by deleted documents. Merging + * normally happens automatically, but sometimes it is useful to trigger a merge + * manually. + *

+ * WARNING: We recommend force merging only a read-only index (meaning the index + * is no longer receiving writes). When documents are updated or deleted, the + * old version is not immediately removed but instead soft-deleted and marked + * with a "tombstone". These soft-deleted documents are automatically + * cleaned up during regular segment merges. But force merge can cause very + * large (greater than 5 GB) segments to be produced, which are not eligible for + * regular merges. So the number of soft-deleted documents can then grow + * rapidly, resulting in higher disk usage and worse search performance. If you + * regularly force merge an index receiving writes, this can also make snapshots + * more expensive, since the new documents can't be backed up incrementally. * * @param fn * a function that initializes a builder to create the @@ -952,7 +1193,25 @@ public final CompletableFuture forcemerge( } /** - * Performs the force merge operation on one or more indices. + * Force a merge. Perform the force merge operation on the shards of one or more + * indices. For data streams, the API forces a merge on the shards of the + * stream's backing indices. + *

+ * Merging reduces the number of segments in each shard by merging some of them + * together and also frees up the space used by deleted documents. Merging + * normally happens automatically, but sometimes it is useful to trigger a merge + * manually. + *

+ * WARNING: We recommend force merging only a read-only index (meaning the index + * is no longer receiving writes). When documents are updated or deleted, the + * old version is not immediately removed but instead soft-deleted and marked + * with a "tombstone". These soft-deleted documents are automatically + * cleaned up during regular segment merges. But force merge can cause very + * large (greater than 5 GB) segments to be produced, which are not eligible for + * regular merges. So the number of soft-deleted documents can then grow + * rapidly, resulting in higher disk usage and worse search performance. If you + * regularly force merge an index receiving writes, this can also make snapshots + * more expensive, since the new documents can't be backed up incrementally. * * @see Documentation @@ -1480,8 +1739,22 @@ public final CompletableFuture open(Function + * With CCR auto following, a data stream from a remote cluster can be + * replicated to the local cluster. These data streams can't be rolled over in + * the local cluster. These replicated data streams roll over only if the + * upstream data stream rolls over. In the event that the remote cluster is no + * longer available, the data stream in the local cluster can be promoted to a + * regular data stream, which allows these data streams to be rolled over in the + * local cluster. + *

+ * NOTE: When promoting a data stream, ensure the local cluster has a data + * stream enabled index template that matches the data stream. If this is + * missing, the data stream will not be able to roll over until a matching index + * template is created. This will affect the lifecycle management of the data + * stream and interfere with the data stream size and retention. * * @see Documentation @@ -1496,8 +1769,22 @@ public CompletableFuture promoteDataStream(PromoteDat } /** - * Promotes a data stream from a replicated data stream managed by CCR to a - * regular data stream + * Promote a data stream. Promote a data stream from a replicated data stream + * managed by cross-cluster replication (CCR) to a regular data stream. + *

+ * With CCR auto following, a data stream from a remote cluster can be + * replicated to the local cluster. These data streams can't be rolled over in + * the local cluster. These replicated data streams roll over only if the + * upstream data stream rolls over. In the event that the remote cluster is no + * longer available, the data stream in the local cluster can be promoted to a + * regular data stream, which allows these data streams to be rolled over in the + * local cluster. + *

+ * NOTE: When promoting a data stream, ensure the local cluster has a data + * stream enabled index template that matches the data stream. If this is + * missing, the data stream will not be able to roll over until a matching index + * template is created. This will affect the lifecycle management of the data + * stream and interfere with the data stream size and retention. * * @param fn * a function that initializes a builder to create the @@ -1708,6 +1995,21 @@ public CompletableFuture putSettings() { /** * Create or update an index template. Index templates define settings, * mappings, and aliases that can be applied automatically to new indices. + * Elasticsearch applies templates to new indices based on an index pattern that + * matches the index name. + *

+ * IMPORTANT: This documentation is about legacy index templates, which are + * deprecated and will be replaced by the composable templates introduced in + * Elasticsearch 7.8. + *

+ * Composable templates always take precedence over legacy templates. If no + * composable template matches a new index, matching legacy templates are + * applied according to their order. + *

+ * Index templates are only applied during index creation. Changes to index + * templates do not affect existing indices. Settings and mappings specified in + * create index API requests override any settings or mappings specified in an + * index template. * * @see Documentation @@ -1724,6 +2026,21 @@ public CompletableFuture putTemplate(PutTemplateRequest req /** * Create or update an index template. Index templates define settings, * mappings, and aliases that can be applied automatically to new indices. + * Elasticsearch applies templates to new indices based on an index pattern that + * matches the index name. + *

+ * IMPORTANT: This documentation is about legacy index templates, which are + * deprecated and will be replaced by the composable templates introduced in + * Elasticsearch 7.8. + *

+ * Composable templates always take precedence over legacy templates. If no + * composable template matches a new index, matching legacy templates are + * applied according to their order. + *

+ * Index templates are only applied during index creation. Changes to index + * templates do not affect existing indices. Settings and mappings specified in + * create index API requests override any settings or mappings specified in an + * index template. * * @param fn * a function that initializes a builder to create the @@ -1741,9 +2058,37 @@ public final CompletableFuture putTemplate( // ----- Endpoint: indices.recovery /** - * Returns information about ongoing and completed shard recoveries for one or - * more indices. For data streams, the API returns information for the stream’s - * backing indices. + * Get index recovery information. Get information about ongoing and completed + * shard recoveries for one or more indices. For data streams, the API returns + * information for the stream's backing indices. + *

+ * Shard recovery is the process of initializing a shard copy, such as restoring + * a primary shard from a snapshot or creating a replica shard from a primary + * shard. When a shard recovery completes, the recovered shard is available for + * search and indexing. + *

+ * Recovery automatically occurs during the following processes: + *

+ *

+ * You can determine the cause of a shard recovery using the recovery or cat + * recovery APIs. + *

+ * The index recovery API reports information about completed recoveries only + * for shard copies that currently exist in the cluster. It only reports the + * last recovery for each shard copy and does not report historical information + * about earlier recoveries, nor does it report information about the recoveries + * of shard copies that no longer exist. This means that if a shard copy + * completes a recovery and then Elasticsearch relocates it onto a different + * node then the information about the original recovery will not be shown in + * the recovery API. * * @see Documentation @@ -1758,9 +2103,37 @@ public CompletableFuture recovery(RecoveryRequest request) { } /** - * Returns information about ongoing and completed shard recoveries for one or - * more indices. For data streams, the API returns information for the stream’s - * backing indices. + * Get index recovery information. Get information about ongoing and completed + * shard recoveries for one or more indices. For data streams, the API returns + * information for the stream's backing indices. + *

+ * Shard recovery is the process of initializing a shard copy, such as restoring + * a primary shard from a snapshot or creating a replica shard from a primary + * shard. When a shard recovery completes, the recovered shard is available for + * search and indexing. + *

+ * Recovery automatically occurs during the following processes: + *

+ *

+ * You can determine the cause of a shard recovery using the recovery or cat + * recovery APIs. + *

+ * The index recovery API reports information about completed recoveries only + * for shard copies that currently exist in the cluster. It only reports the + * last recovery for each shard copy and does not report historical information + * about earlier recoveries, nor does it report information about the recoveries + * of shard copies that no longer exist. This means that if a shard copy + * completes a recovery and then Elasticsearch relocates it onto a different + * node then the information about the original recovery will not be shown in + * the recovery API. * * @param fn * a function that initializes a builder to create the @@ -1776,9 +2149,37 @@ public final CompletableFuture recovery( } /** - * Returns information about ongoing and completed shard recoveries for one or - * more indices. For data streams, the API returns information for the stream’s - * backing indices. + * Get index recovery information. Get information about ongoing and completed + * shard recoveries for one or more indices. For data streams, the API returns + * information for the stream's backing indices. + *

+ * Shard recovery is the process of initializing a shard copy, such as restoring + * a primary shard from a snapshot or creating a replica shard from a primary + * shard. When a shard recovery completes, the recovered shard is available for + * search and indexing. + *

+ * Recovery automatically occurs during the following processes: + *

    + *
  • When creating an index for the first time.
  • + *
  • When a node rejoins the cluster and starts up any missing primary shard + * copies using the data that it holds in its data path.
  • + *
  • Creation of new replica shard copies from the primary.
  • + *
  • Relocation of a shard copy to a different node in the same cluster.
  • + *
  • A snapshot restore operation.
  • + *
  • A clone, shrink, or split operation.
  • + *
+ *

+ * You can determine the cause of a shard recovery using the recovery or cat + * recovery APIs. + *

+ * The index recovery API reports information about completed recoveries only + * for shard copies that currently exist in the cluster. It only reports the + * last recovery for each shard copy and does not report historical information + * about earlier recoveries, nor does it report information about the recoveries + * of shard copies that no longer exist. This means that if a shard copy + * completes a recovery and then Elasticsearch relocates it onto a different + * node then the information about the original recovery will not be shown in + * the recovery API. * * @see Documentation @@ -1845,7 +2246,28 @@ public CompletableFuture refresh() { // ----- Endpoint: indices.reload_search_analyzers /** - * Reloads an index's search analyzers and their resources. + * Reload search analyzers. Reload an index's search analyzers and their + * resources. For data streams, the API reloads search analyzers and resources + * for the stream's backing indices. + *

+ * IMPORTANT: After reloading the search analyzers you should clear the request + * cache to make sure it doesn't contain responses derived from the previous + * versions of the analyzer. + *

+ * You can use the reload search analyzers API to pick up changes to synonym + * files used in the synonym_graph or synonym token + * filter of a search analyzer. To be eligible, the token filter must have an + * updateable flag of true and only be used in search + * analyzers. + *

+ * NOTE: This API does not perform a reload for each shard of an index. Instead, + * it performs a reload for each node containing index shards. As a result, the + * total shard count returned by the API can differ from the number of index + * shards. Because reloading affects every node with an index shard, it is + * important to update the synonym file on every data node in the + * cluster--including nodes that don't contain a shard replica--before using + * this API. This ensures the synonym file is updated everywhere in the cluster + * in case shards are relocated in the future. * * @see Documentation @@ -1861,7 +2283,28 @@ public CompletableFuture reloadSearchAnalyzers( } /** - * Reloads an index's search analyzers and their resources. + * Reload search analyzers. Reload an index's search analyzers and their + * resources. For data streams, the API reloads search analyzers and resources + * for the stream's backing indices. + *

+ * IMPORTANT: After reloading the search analyzers you should clear the request + * cache to make sure it doesn't contain responses derived from the previous + * versions of the analyzer. + *

+ * You can use the reload search analyzers API to pick up changes to synonym + * files used in the synonym_graph or synonym token + * filter of a search analyzer. To be eligible, the token filter must have an + * updateable flag of true and only be used in search + * analyzers. + *

+ * NOTE: This API does not perform a reload for each shard of an index. Instead, + * it performs a reload for each node containing index shards. As a result, the + * total shard count returned by the API can differ from the number of index + * shards. Because reloading affects every node with an index shard, it is + * important to update the synonym file on every data node in the + * cluster--including nodes that don't contain a shard replica--before using + * this API. This ensures the synonym file is updated everywhere in the cluster + * in case shards are relocated in the future. * * @param fn * a function that initializes a builder to create the @@ -1879,10 +2322,33 @@ public final CompletableFuture reloadSearchAnalyz // ----- Endpoint: indices.resolve_cluster /** - * Resolves the specified index expressions to return information about each - * cluster, including the local cluster, if included. Multiple patterns and - * remote clusters are supported. - * + * Resolve the cluster. Resolve the specified index expressions to return + * information about each cluster, including the local cluster, if included. + * Multiple patterns and remote clusters are supported. + *

+ * This endpoint is useful before doing a cross-cluster search in order to + * determine which remote clusters should be included in a search. + *

+ * You use the same index expression with this endpoint as you would for + * cross-cluster search. Index and cluster exclusions are also supported with + * this endpoint. + *

+ * For each cluster in the index expression, information is returned about: + *

+ * * @see Documentation * on elastic.co @@ -1896,10 +2362,33 @@ public CompletableFuture resolveCluster(ResolveClusterRe } /** - * Resolves the specified index expressions to return information about each - * cluster, including the local cluster, if included. Multiple patterns and - * remote clusters are supported. - * + * Resolve the cluster. Resolve the specified index expressions to return + * information about each cluster, including the local cluster, if included. + * Multiple patterns and remote clusters are supported. + *

+ * This endpoint is useful before doing a cross-cluster search in order to + * determine which remote clusters should be included in a search. + *

+ * You use the same index expression with this endpoint as you would for + * cross-cluster search. Index and cluster exclusions are also supported with + * this endpoint. + *

+ * For each cluster in the index expression, information is returned about: + *

    + *
  • Whether the querying ("local") cluster is currently connected + * to each remote cluster in the index expression scope.
  • + *
  • Whether each remote cluster is configured with + * skip_unavailable as true or + * false.
  • + *
  • Whether there are any indices, aliases, or data streams on that cluster + * that match the index expression.
  • + *
  • Whether the search is likely to have errors returned when you do the + * cross-cluster search (including any authorization errors if you do not have + * permission to query the index).
  • + *
  • Cluster version information, including the Elasticsearch server + * version.
  • + *
+ * * @param fn * a function that initializes a builder to create the * {@link ResolveClusterRequest} @@ -1988,8 +2477,9 @@ public final CompletableFuture rollover( // ----- Endpoint: indices.segments /** - * Returns low-level information about the Lucene segments in index shards. For - * data streams, the API returns information about the stream’s backing indices. + * Get index segments. Get low-level information about the Lucene segments in + * index shards. For data streams, the API returns information about the + * stream's backing indices. * * @see Documentation @@ -2004,8 +2494,9 @@ public CompletableFuture segments(SegmentsRequest request) { } /** - * Returns low-level information about the Lucene segments in index shards. For - * data streams, the API returns information about the stream’s backing indices. + * Get index segments. Get low-level information about the Lucene segments in + * index shards. For data streams, the API returns information about the + * stream's backing indices. * * @param fn * a function that initializes a builder to create the @@ -2021,8 +2512,9 @@ public final CompletableFuture segments( } /** - * Returns low-level information about the Lucene segments in index shards. For - * data streams, the API returns information about the stream’s backing indices. + * Get index segments. Get low-level information about the Lucene segments in + * index shards. For data streams, the API returns information about the + * stream's backing indices. * * @see Documentation @@ -2037,9 +2529,21 @@ public CompletableFuture segments() { // ----- Endpoint: indices.shard_stores /** - * Retrieves store information about replica shards in one or more indices. For - * data streams, the API retrieves store information for the stream’s backing - * indices. + * Get index shard stores. Get store information about replica shards in one or + * more indices. For data streams, the API retrieves store information for the + * stream's backing indices. + *

+ * The index shard stores API returns the following information: + *

    + *
  • The node on which each replica shard exists.
  • + *
  • The allocation ID for each replica shard.
  • + *
  • A unique ID for each replica shard.
  • + *
  • Any errors encountered while opening the shard index or from an earlier + * failure.
  • + *
+ *

+ * By default, the API returns store information only for primary shards that + * are unassigned or have one or more unassigned replica shards. * * @see Documentation @@ -2054,9 +2558,21 @@ public CompletableFuture shardStores(ShardStoresRequest req } /** - * Retrieves store information about replica shards in one or more indices. For - * data streams, the API retrieves store information for the stream’s backing - * indices. + * Get index shard stores. Get store information about replica shards in one or + * more indices. For data streams, the API retrieves store information for the + * stream's backing indices. + *

+ * The index shard stores API returns the following information: + *

+ *

+ * By default, the API returns store information only for primary shards that + * are unassigned or have one or more unassigned replica shards. * * @param fn * a function that initializes a builder to create the @@ -2072,9 +2588,21 @@ public final CompletableFuture shardStores( } /** - * Retrieves store information about replica shards in one or more indices. For - * data streams, the API retrieves store information for the stream’s backing - * indices. + * Get index shard stores. Get store information about replica shards in one or + * more indices. For data streams, the API retrieves store information for the + * stream's backing indices. + *

+ * The index shard stores API returns the following information: + *

    + *
  • The node on which each replica shard exists.
  • + *
  • The allocation ID for each replica shard.
  • + *
  • A unique ID for each replica shard.
  • + *
  • Any errors encountered while opening the shard index or from an earlier + * failure.
  • + *
+ *

+ * By default, the API returns store information only for primary shards that + * are unassigned or have one or more unassigned replica shards. * * @see Documentation @@ -2089,8 +2617,62 @@ public CompletableFuture shardStores() { // ----- Endpoint: indices.shrink /** - * Shrinks an existing index into a new index with fewer primary shards. - * + * Shrink an index. Shrink an index into a new index with fewer primary shards. + *

+ * Before you can shrink an index: + *

+ *

+ * To make shard allocation easier, we recommend you also remove the index's + * replica shards. You can later re-add replica shards as part of the shrink + * operation. + *

+ * The requested number of primary shards in the target index must be a factor + * of the number of shards in the source index. For example an index with 8 + * primary shards can be shrunk into 4, 2 or 1 primary shards or an index with + * 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in + * the index is a prime number it can only be shrunk into a single primary shard + * Before shrinking, a (primary or replica) copy of every shard in the index + * must be present on the same node. + *

+ * The current write index on a data stream cannot be shrunk. In order to shrink + * the current write index, the data stream must first be rolled over so that a + * new write index is created and then the previous write index can be shrunk. + *

+ * A shrink operation: + *

    + *
  • Creates a new target index with the same definition as the source index, + * but with a smaller number of primary shards.
  • + *
  • Hard-links segments from the source index into the target index. If the + * file system does not support hard-linking, then all segments are copied into + * the new index, which is a much more time consuming process. Also if using + * multiple data paths, shards on different data paths require a full copy of + * segment files if they are not on the same disk since hardlinks do not work + * across disks.
  • + *
  • Recovers the target index as though it were a closed index which had just + * been re-opened. Recovers shards to the + * .routing.allocation.initial_recovery._id index setting.
  • + *
+ *

+ * IMPORTANT: Indices can only be shrunk if they satisfy the following + * requirements: + *

    + *
  • The target index must not exist.
  • + *
  • The source index must have more primary shards than the target + * index.
  • + *
  • The number of primary shards in the target index must be a factor of the + * number of primary shards in the source index. The source index must have more + * primary shards than the target index.
  • + *
  • The index must not contain more than 2,147,483,519 documents in total + * across all shards that will be shrunk into a single shard on the target index + * as this is the maximum number of docs that can fit into a single shard.
  • + *
  • The node handling the shrink process must have sufficient free disk space + * to accommodate a second copy of the existing index.
  • + *
+ * * @see
Documentation * on elastic.co @@ -2104,8 +2686,62 @@ public CompletableFuture shrink(ShrinkRequest request) { } /** - * Shrinks an existing index into a new index with fewer primary shards. - * + * Shrink an index. Shrink an index into a new index with fewer primary shards. + *

+ * Before you can shrink an index: + *

    + *
  • The index must be read-only.
  • + *
  • A copy of every shard in the index must reside on the same node.
  • + *
  • The index must have a green health status.
  • + *
+ *

+ * To make shard allocation easier, we recommend you also remove the index's + * replica shards. You can later re-add replica shards as part of the shrink + * operation. + *

+ * The requested number of primary shards in the target index must be a factor + * of the number of shards in the source index. For example an index with 8 + * primary shards can be shrunk into 4, 2 or 1 primary shards or an index with + * 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in + * the index is a prime number it can only be shrunk into a single primary shard + * Before shrinking, a (primary or replica) copy of every shard in the index + * must be present on the same node. + *

+ * The current write index on a data stream cannot be shrunk. In order to shrink + * the current write index, the data stream must first be rolled over so that a + * new write index is created and then the previous write index can be shrunk. + *

+ * A shrink operation: + *

    + *
  • Creates a new target index with the same definition as the source index, + * but with a smaller number of primary shards.
  • + *
  • Hard-links segments from the source index into the target index. If the + * file system does not support hard-linking, then all segments are copied into + * the new index, which is a much more time consuming process. Also if using + * multiple data paths, shards on different data paths require a full copy of + * segment files if they are not on the same disk since hardlinks do not work + * across disks.
  • + *
  • Recovers the target index as though it were a closed index which had just + * been re-opened. Recovers shards to the + * .routing.allocation.initial_recovery._id index setting.
  • + *
+ *

+ * IMPORTANT: Indices can only be shrunk if they satisfy the following + * requirements: + *

    + *
  • The target index must not exist.
  • + *
  • The source index must have more primary shards than the target + * index.
  • + *
  • The number of primary shards in the target index must be a factor of the + * number of primary shards in the source index. The source index must have more + * primary shards than the target index.
  • + *
  • The index must not contain more than 2,147,483,519 documents in total + * across all shards that will be shrunk into a single shard on the target index + * as this is the maximum number of docs that can fit into a single shard.
  • + *
  • The node handling the shrink process must have sufficient free disk space + * to accommodate a second copy of the existing index.
  • + *
+ * * @param fn * a function that initializes a builder to create the * {@link ShrinkRequest} @@ -2207,8 +2843,58 @@ public CompletableFuture simulateTemplate() { // ----- Endpoint: indices.split /** - * Splits an existing index into a new index with more primary shards. - * + * Split an index. Split an index into a new index with more primary shards. + *
    + *
  • + *

    + * Before you can split an index: + *

    + *
  • + *
  • + *

    + * The index must be read-only. + *

    + *
  • + *
  • + *

    + * The cluster health status must be green. + *

    + *
  • + *
+ *

+ * The number of times the index can be split (and the number of shards that + * each original shard can be split into) is determined by the + * index.number_of_routing_shards setting. The number of routing + * shards specifies the hashing space that is used internally to distribute + * documents across shards with consistent hashing. For instance, a 5 shard + * index with number_of_routing_shards set to 30 (5 x 2 x 3) could + * be split by a factor of 2 or 3. + *

+ * A split operation: + *

    + *
  • Creates a new target index with the same definition as the source index, + * but with a larger number of primary shards.
  • + *
  • Hard-links segments from the source index into the target index. If the + * file system doesn't support hard-linking, all segments are copied into the + * new index, which is a much more time consuming process.
  • + *
  • Hashes all documents again, after low level files are created, to delete + * documents that belong to a different shard.
  • + *
  • Recovers the target index as though it were a closed index which had just + * been re-opened.
  • + *
+ *

+ * IMPORTANT: Indices can only be split if they satisfy the following + * requirements: + *

    + *
  • The target index must not exist.
  • + *
  • The source index must have fewer primary shards than the target + * index.
  • + *
  • The number of primary shards in the target index must be a multiple of + * the number of primary shards in the source index.
  • + *
  • The node handling the split process must have sufficient free disk space + * to accommodate a second copy of the existing index.
  • + *
+ * * @see Documentation * on elastic.co @@ -2222,8 +2908,58 @@ public CompletableFuture split(SplitRequest request) { } /** - * Splits an existing index into a new index with more primary shards. - * + * Split an index. Split an index into a new index with more primary shards. + *
    + *
  • + *

    + * Before you can split an index: + *

    + *
  • + *
  • + *

    + * The index must be read-only. + *

    + *
  • + *
  • + *

    + * The cluster health status must be green. + *

    + *
  • + *
+ *

+ * The number of times the index can be split (and the number of shards that + * each original shard can be split into) is determined by the + * index.number_of_routing_shards setting. The number of routing + * shards specifies the hashing space that is used internally to distribute + * documents across shards with consistent hashing. For instance, a 5 shard + * index with number_of_routing_shards set to 30 (5 x 2 x 3) could + * be split by a factor of 2 or 3. + *

+ * A split operation: + *

    + *
  • Creates a new target index with the same definition as the source index, + * but with a larger number of primary shards.
  • + *
  • Hard-links segments from the source index into the target index. If the + * file system doesn't support hard-linking, all segments are copied into the + * new index, which is a much more time consuming process.
  • + *
  • Hashes all documents again, after low level files are created, to delete + * documents that belong to a different shard.
  • + *
  • Recovers the target index as though it were a closed index which had just + * been re-opened.
  • + *
+ *

+ * IMPORTANT: Indices can only be split if they satisfy the following + * requirements: + *

    + *
  • The target index must not exist.
  • + *
  • The source index must have fewer primary shards than the target + * index.
  • + *
  • The number of primary shards in the target index must be a multiple of + * the number of primary shards in the source index.
  • + *
  • The node handling the split process must have sufficient free disk space + * to accommodate a second copy of the existing index.
  • + *
+ * * @param fn * a function that initializes a builder to create the * {@link SplitRequest} @@ -2240,8 +2976,21 @@ public final CompletableFuture split( // ----- Endpoint: indices.stats /** - * Returns statistics for one or more indices. For data streams, the API - * retrieves statistics for the stream’s backing indices. + * Get index statistics. For data streams, the API retrieves statistics for the + * stream's backing indices. + *

+ * By default, the returned statistics are index-level with + * primaries and total aggregations. + * primaries are the values for only the primary shards. + * total are the accumulated values for both primary and replica + * shards. + *

+ * To get shard-level statistics, set the level parameter to + * shards. + *

+ * NOTE: When moving to another node, the shard-level statistics for a shard are + * cleared. Although the shard is no longer part of the node, that node retains + * any node-level statistics to which the shard contributed. * * @see Documentation @@ -2256,8 +3005,21 @@ public CompletableFuture stats(IndicesStatsRequest request } /** - * Returns statistics for one or more indices. For data streams, the API - * retrieves statistics for the stream’s backing indices. + * Get index statistics. For data streams, the API retrieves statistics for the + * stream's backing indices. + *

+ * By default, the returned statistics are index-level with + * primaries and total aggregations. + * primaries are the values for only the primary shards. + * total are the accumulated values for both primary and replica + * shards. + *

+ * To get shard-level statistics, set the level parameter to + * shards. + *

+ * NOTE: When moving to another node, the shard-level statistics for a shard are + * cleared. Although the shard is no longer part of the node, that node retains + * any node-level statistics to which the shard contributed. * * @param fn * a function that initializes a builder to create the @@ -2273,8 +3035,21 @@ public final CompletableFuture stats( } /** - * Returns statistics for one or more indices. For data streams, the API - * retrieves statistics for the stream’s backing indices. + * Get index statistics. For data streams, the API retrieves statistics for the + * stream's backing indices. + *

+ * By default, the returned statistics are index-level with + * primaries and total aggregations. + * primaries are the values for only the primary shards. + * total are the accumulated values for both primary and replica + * shards. + *

+ * To get shard-level statistics, set the level parameter to + * shards. + *

+ * NOTE: When moving to another node, the shard-level statistics for a shard are + * cleared. Although the shard is no longer part of the node, that node retains + * any node-level statistics to which the shard contributed. * * @see Documentation @@ -2289,7 +3064,8 @@ public CompletableFuture stats() { // ----- Endpoint: indices.unfreeze /** - * Unfreezes an index. + * Unfreeze an index. When a frozen index is unfrozen, the index goes through + * the normal recovery process and becomes writeable again. * * @see Documentation @@ -2304,7 +3080,8 @@ public CompletableFuture unfreeze(UnfreezeRequest request) { } /** - * Unfreezes an index. + * Unfreeze an index. When a frozen index is unfrozen, the index goes through + * the normal recovery process and becomes writeable again. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesClient.java index c4887ea43..7effcb255 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ElasticsearchIndicesClient.java @@ -156,8 +156,8 @@ public AnalyzeResponse analyze() throws IOException, ElasticsearchException { // ----- Endpoint: indices.clear_cache /** - * Clears the caches of one or more indices. For data streams, the API clears - * the caches of the stream’s backing indices. + * Clear the cache. Clear the cache of one or more indices. For data streams, + * the API clears the caches of the stream's backing indices. * * @see Documentation @@ -172,8 +172,8 @@ public ClearCacheResponse clearCache(ClearCacheRequest request) throws IOExcepti } /** - * Clears the caches of one or more indices. For data streams, the API clears - * the caches of the stream’s backing indices. + * Clear the cache. Clear the cache of one or more indices. For data streams, + * the API clears the caches of the stream's backing indices. * * @param fn * a function that initializes a builder to create the @@ -189,8 +189,8 @@ public final ClearCacheResponse clearCache(FunctionDocumentation @@ -205,8 +205,42 @@ public ClearCacheResponse clearCache() throws IOException, ElasticsearchExceptio // ----- Endpoint: indices.clone /** - * Clones an existing index. - * + * Clone an index. Clone an existing index into a new index. Each original + * primary shard is cloned into a new primary shard in the new index. + *

+ * IMPORTANT: Elasticsearch does not apply index templates to the resulting + * index. The API also does not copy index metadata from the original index. + * Index metadata includes aliases, index lifecycle management phase + * definitions, and cross-cluster replication (CCR) follower information. For + * example, if you clone a CCR follower index, the resulting clone will not be a + * follower index. + *

+ * The clone API copies most index settings from the source index to the + * resulting index, with the exception of index.number_of_replicas + * and index.auto_expand_replicas. To set the number of replicas in + * the resulting index, configure these settings in the clone request. + *

+ * Cloning works as follows: + *

+ *

+ * IMPORTANT: Indices can only be cloned if they meet the following + * requirements: + *

    + *
  • The target index must not exist.
  • + *
  • The source index must have the same number of primary shards as the + * target index.
  • + *
  • The node handling the clone process must have sufficient free disk space + * to accommodate a second copy of the existing index.
  • + *
+ * * @see
Documentation * on elastic.co @@ -220,8 +254,42 @@ public CloneIndexResponse clone(CloneIndexRequest request) throws IOException, E } /** - * Clones an existing index. - * + * Clone an index. Clone an existing index into a new index. Each original + * primary shard is cloned into a new primary shard in the new index. + *

+ * IMPORTANT: Elasticsearch does not apply index templates to the resulting + * index. The API also does not copy index metadata from the original index. + * Index metadata includes aliases, index lifecycle management phase + * definitions, and cross-cluster replication (CCR) follower information. For + * example, if you clone a CCR follower index, the resulting clone will not be a + * follower index. + *

+ * The clone API copies most index settings from the source index to the + * resulting index, with the exception of index.number_of_replicas + * and index.auto_expand_replicas. To set the number of replicas in + * the resulting index, configure these settings in the clone request. + *

+ * Cloning works as follows: + *

    + *
  • First, it creates a new target index with the same definition as the + * source index.
  • + *
  • Then it hard-links segments from the source index into the target index. + * If the file system does not support hard-linking, all segments are copied + * into the new index, which is a much more time consuming process.
  • + *
  • Finally, it recovers the target index as though it were a closed index + * which had just been re-opened.
  • + *
+ *

+ * IMPORTANT: Indices can only be cloned if they meet the following + * requirements: + *

    + *
  • The target index must not exist.
  • + *
  • The source index must have the same number of primary shards as the + * target index.
  • + *
  • The node handling the clone process must have sufficient free disk space + * to accommodate a second copy of the existing index.
  • + *
+ * * @param fn * a function that initializes a builder to create the * {@link CloneIndexRequest} @@ -238,7 +306,33 @@ public final CloneIndexResponse clone(Function + * When opening or closing an index, the master node is responsible for + * restarting the index shards to reflect the new state of the index. The shards + * will then go through the normal recovery process. The data of opened and + * closed indices is automatically replicated by the cluster to ensure that + * enough shard copies are safely kept around at all times. + *

+ * You can open and close multiple indices. An error is thrown if the request + * explicitly refers to a missing index. This behaviour can be turned off using + * the ignore_unavailable=true parameter. + *

+ * By default, you must explicitly name the indices you are opening or closing. + * To open or close indices with _all, *, or other + * wildcard expressions, change + * the action.destructive_requires_name setting to + * false. This setting can also be changed with the cluster update + * settings API. + *

+ * Closed indices consume a significant amount of disk-space which can cause + * problems in managed environments. Closing indices can be turned off with the + * cluster settings API by setting cluster.indices.close.enable to + * false. * * @see Documentation @@ -253,7 +347,33 @@ public CloseIndexResponse close(CloseIndexRequest request) throws IOException, E } /** - * Closes an index. + * Close an index. A closed index is blocked for read or write operations and + * does not allow all operations that opened indices allow. It is not possible + * to index documents or to search for documents in a closed index. Closed + * indices do not have to maintain internal data structures for indexing or + * searching documents, which results in a smaller overhead on the cluster. + *

+ * When opening or closing an index, the master node is responsible for + * restarting the index shards to reflect the new state of the index. The shards + * will then go through the normal recovery process. The data of opened and + * closed indices is automatically replicated by the cluster to ensure that + * enough shard copies are safely kept around at all times. + *

+ * You can open and close multiple indices. An error is thrown if the request + * explicitly refers to a missing index. This behaviour can be turned off using + * the ignore_unavailable=true parameter. + *

+ * By default, you must explicitly name the indices you are opening or closing. + * To open or close indices with _all, *, or other + * wildcard expressions, change + * the action.destructive_requires_name setting to + * false. This setting can also be changed with the cluster update + * settings API. + *

+ * Closed indices consume a significant amount of disk-space which can cause + * problems in managed environments. Closing indices can be turned off with the + * cluster settings API by setting cluster.indices.close.enable to + * false. * * @param fn * a function that initializes a builder to create the @@ -606,7 +726,10 @@ public final DeleteTemplateResponse deleteTemplate( // ----- Endpoint: indices.disk_usage /** - * Analyzes the disk usage of each field of an index or data stream. + * Analyze the index disk usage. Analyze the disk usage of each field of an + * index or data stream. This API might not support indices created in previous + * Elasticsearch versions. The result of a small index can be inaccurate as some + * parts of an index might not be analyzed by the API. * * @see Documentation @@ -621,7 +744,10 @@ public DiskUsageResponse diskUsage(DiskUsageRequest request) throws IOException, } /** - * Analyzes the disk usage of each field of an index or data stream. + * Analyze the index disk usage. Analyze the disk usage of each field of an + * index or data stream. This API might not support indices created in previous + * Elasticsearch versions. The result of a small index can be inaccurate as some + * parts of an index might not be analyzed by the API. * * @param fn * a function that initializes a builder to create the @@ -639,10 +765,17 @@ public final DiskUsageResponse diskUsage(Functionmin, max, sum, - * value_count and avg) for each metric field grouped - * by a configured time interval. + * Downsample an index. Aggregate a time series (TSDS) index and store + * pre-computed statistical summaries (min, max, + * sum, value_count and avg) for each + * metric field grouped by a configured time interval. For example, a TSDS index + * that contains metrics sampled every 10 seconds can be downsampled to an + * hourly index. All documents within an hour interval are summarized and stored + * as a single document in the downsample index. + *

+ * NOTE: Only indices in a time series data stream are supported. Neither field + * nor document level security can be defined on the source index. The source + * index must be read only (index.blocks.write: true). * * @see Documentation @@ -657,10 +790,17 @@ public DownsampleResponse downsample(DownsampleRequest request) throws IOExcepti } /** - * Aggregates a time series (TSDS) index and stores pre-computed statistical - * summaries (min, max, sum, - * value_count and avg) for each metric field grouped - * by a configured time interval. + * Downsample an index. Aggregate a time series (TSDS) index and store + * pre-computed statistical summaries (min, max, + * sum, value_count and avg) for each + * metric field grouped by a configured time interval. For example, a TSDS index + * that contains metrics sampled every 10 seconds can be downsampled to an + * hourly index. All documents within an hour interval are summarized and stored + * as a single document in the downsample index. + *

+ * NOTE: Only indices in a time series data stream are supported. Neither field + * nor document level security can be defined on the source index. The source + * index must be read only (index.blocks.write: true). * * @param fn * a function that initializes a builder to create the @@ -817,10 +957,10 @@ public final BooleanResponse existsTemplate( // ----- Endpoint: indices.explain_data_lifecycle /** - * Get the status for a data stream lifecycle. Retrieves information about an - * index or data stream’s current data stream lifecycle status, such as time - * since index creation, time since rollover, the lifecycle configuration - * managing the index, or any errors encountered during lifecycle execution. + * Get the status for a data stream lifecycle. Get information about an index or + * data stream's current data stream lifecycle status, such as time since index + * creation, time since rollover, the lifecycle configuration managing the + * index, or any errors encountered during lifecycle execution. * * @see Documentation @@ -836,10 +976,10 @@ public ExplainDataLifecycleResponse explainDataLifecycle(ExplainDataLifecycleReq } /** - * Get the status for a data stream lifecycle. Retrieves information about an - * index or data stream’s current data stream lifecycle status, such as time - * since index creation, time since rollover, the lifecycle configuration - * managing the index, or any errors encountered during lifecycle execution. + * Get the status for a data stream lifecycle. Get information about an index or + * data stream's current data stream lifecycle status, such as time since index + * creation, time since rollover, the lifecycle configuration managing the + * index, or any errors encountered during lifecycle execution. * * @param fn * a function that initializes a builder to create the @@ -858,7 +998,11 @@ public final ExplainDataLifecycleResponse explainDataLifecycle( // ----- Endpoint: indices.field_usage_stats /** - * Returns field usage information for each shard and field of an index. + * Get field usage stats. Get field usage information for each shard and field + * of an index. Field usage statistics are automatically captured when queries + * are running on a cluster. A shard-level search request that accesses a given + * field, even if multiple times during that request, is counted as a single + * use. * * @see Documentation @@ -874,7 +1018,11 @@ public FieldUsageStatsResponse fieldUsageStats(FieldUsageStatsRequest request) } /** - * Returns field usage information for each shard and field of an index. + * Get field usage stats. Get field usage information for each shard and field + * of an index. Field usage statistics are automatically captured when queries + * are running on a cluster. A shard-level search request that accesses a given + * field, even if multiple times during that request, is counted as a single + * use. * * @param fn * a function that initializes a builder to create the @@ -893,7 +1041,26 @@ public final FieldUsageStatsResponse fieldUsageStats( // ----- Endpoint: indices.flush /** - * Flushes one or more data streams or indices. + * Flush data streams or indices. Flushing a data stream or index is the process + * of making sure that any data that is currently only stored in the transaction + * log is also permanently stored in the Lucene index. When restarting, + * Elasticsearch replays any unflushed operations from the transaction log into + * the Lucene index to bring it back into the state that it was in before the + * restart. Elasticsearch automatically triggers flushes as needed, using + * heuristics that trade off the size of the unflushed transaction log against + * the cost of performing each flush. + *

+ * After each operation has been flushed it is permanently stored in the Lucene + * index. This may mean that there is no need to maintain an additional copy of + * it in the transaction log. The transaction log is made up of multiple files, + * called generations, and Elasticsearch will delete any generation files when + * they are no longer needed, freeing up disk space. + *

+ * It is also possible to trigger a flush on one or more indices using the flush + * API, although it is rare for users to need to call this API directly. If you + * call the flush API after indexing some documents then a successful response + * indicates that Elasticsearch has flushed all the documents that were indexed + * before the flush API was called. * * @see Documentation @@ -908,7 +1075,26 @@ public FlushResponse flush(FlushRequest request) throws IOException, Elasticsear } /** - * Flushes one or more data streams or indices. + * Flush data streams or indices. Flushing a data stream or index is the process + * of making sure that any data that is currently only stored in the transaction + * log is also permanently stored in the Lucene index. When restarting, + * Elasticsearch replays any unflushed operations from the transaction log into + * the Lucene index to bring it back into the state that it was in before the + * restart. Elasticsearch automatically triggers flushes as needed, using + * heuristics that trade off the size of the unflushed transaction log against + * the cost of performing each flush. + *

+ * After each operation has been flushed it is permanently stored in the Lucene + * index. This may mean that there is no need to maintain an additional copy of + * it in the transaction log. The transaction log is made up of multiple files, + * called generations, and Elasticsearch will delete any generation files when + * they are no longer needed, freeing up disk space. + *

+ * It is also possible to trigger a flush on one or more indices using the flush + * API, although it is rare for users to need to call this API directly. If you + * call the flush API after indexing some documents then a successful response + * indicates that Elasticsearch has flushed all the documents that were indexed + * before the flush API was called. * * @param fn * a function that initializes a builder to create the @@ -924,7 +1110,26 @@ public final FlushResponse flush(Function + * After each operation has been flushed it is permanently stored in the Lucene + * index. This may mean that there is no need to maintain an additional copy of + * it in the transaction log. The transaction log is made up of multiple files, + * called generations, and Elasticsearch will delete any generation files when + * they are no longer needed, freeing up disk space. + *

+ * It is also possible to trigger a flush on one or more indices using the flush + * API, although it is rare for users to need to call this API directly. If you + * call the flush API after indexing some documents then a successful response + * indicates that Elasticsearch has flushed all the documents that were indexed + * before the flush API was called. * * @see Documentation @@ -939,7 +1144,25 @@ public FlushResponse flush() throws IOException, ElasticsearchException { // ----- Endpoint: indices.forcemerge /** - * Performs the force merge operation on one or more indices. + * Force a merge. Perform the force merge operation on the shards of one or more + * indices. For data streams, the API forces a merge on the shards of the + * stream's backing indices. + *

+ * Merging reduces the number of segments in each shard by merging some of them + * together and also frees up the space used by deleted documents. Merging + * normally happens automatically, but sometimes it is useful to trigger a merge + * manually. + *

+ * WARNING: We recommend force merging only a read-only index (meaning the index + * is no longer receiving writes). When documents are updated or deleted, the + * old version is not immediately removed but instead soft-deleted and marked + * with a "tombstone". These soft-deleted documents are automatically + * cleaned up during regular segment merges. But force merge can cause very + * large (greater than 5 GB) segments to be produced, which are not eligible for + * regular merges. So the number of soft-deleted documents can then grow + * rapidly, resulting in higher disk usage and worse search performance. If you + * regularly force merge an index receiving writes, this can also make snapshots + * more expensive, since the new documents can't be backed up incrementally. * * @see Documentation @@ -954,7 +1177,25 @@ public ForcemergeResponse forcemerge(ForcemergeRequest request) throws IOExcepti } /** - * Performs the force merge operation on one or more indices. + * Force a merge. Perform the force merge operation on the shards of one or more + * indices. For data streams, the API forces a merge on the shards of the + * stream's backing indices. + *

+ * Merging reduces the number of segments in each shard by merging some of them + * together and also frees up the space used by deleted documents. Merging + * normally happens automatically, but sometimes it is useful to trigger a merge + * manually. + *

+ * WARNING: We recommend force merging only a read-only index (meaning the index + * is no longer receiving writes). When documents are updated or deleted, the + * old version is not immediately removed but instead soft-deleted and marked + * with a "tombstone". These soft-deleted documents are automatically + * cleaned up during regular segment merges. But force merge can cause very + * large (greater than 5 GB) segments to be produced, which are not eligible for + * regular merges. So the number of soft-deleted documents can then grow + * rapidly, resulting in higher disk usage and worse search performance. If you + * regularly force merge an index receiving writes, this can also make snapshots + * more expensive, since the new documents can't be backed up incrementally. * * @param fn * a function that initializes a builder to create the @@ -970,7 +1211,25 @@ public final ForcemergeResponse forcemerge(Function + * Merging reduces the number of segments in each shard by merging some of them + * together and also frees up the space used by deleted documents. Merging + * normally happens automatically, but sometimes it is useful to trigger a merge + * manually. + *

+ * WARNING: We recommend force merging only a read-only index (meaning the index + * is no longer receiving writes). When documents are updated or deleted, the + * old version is not immediately removed but instead soft-deleted and marked + * with a "tombstone". These soft-deleted documents are automatically + * cleaned up during regular segment merges. But force merge can cause very + * large (greater than 5 GB) segments to be produced, which are not eligible for + * regular merges. So the number of soft-deleted documents can then grow + * rapidly, resulting in higher disk usage and worse search performance. If you + * regularly force merge an index receiving writes, this can also make snapshots + * more expensive, since the new documents can't be backed up incrementally. * * @see Documentation @@ -1514,8 +1773,22 @@ public final OpenResponse open(Function + * With CCR auto following, a data stream from a remote cluster can be + * replicated to the local cluster. These data streams can't be rolled over in + * the local cluster. These replicated data streams roll over only if the + * upstream data stream rolls over. In the event that the remote cluster is no + * longer available, the data stream in the local cluster can be promoted to a + * regular data stream, which allows these data streams to be rolled over in the + * local cluster. + *

+ * NOTE: When promoting a data stream, ensure the local cluster has a data + * stream enabled index template that matches the data stream. If this is + * missing, the data stream will not be able to roll over until a matching index + * template is created. This will affect the lifecycle management of the data + * stream and interfere with the data stream size and retention. * * @see Documentation @@ -1531,8 +1804,22 @@ public PromoteDataStreamResponse promoteDataStream(PromoteDataStreamRequest requ } /** - * Promotes a data stream from a replicated data stream managed by CCR to a - * regular data stream + * Promote a data stream. Promote a data stream from a replicated data stream + * managed by cross-cluster replication (CCR) to a regular data stream. + *

+ * With CCR auto following, a data stream from a remote cluster can be + * replicated to the local cluster. These data streams can't be rolled over in + * the local cluster. These replicated data streams roll over only if the + * upstream data stream rolls over. In the event that the remote cluster is no + * longer available, the data stream in the local cluster can be promoted to a + * regular data stream, which allows these data streams to be rolled over in the + * local cluster. + *

+ * NOTE: When promoting a data stream, ensure the local cluster has a data + * stream enabled index template that matches the data stream. If this is + * missing, the data stream will not be able to roll over until a matching index + * template is created. This will affect the lifecycle management of the data + * stream and interfere with the data stream size and retention. * * @param fn * a function that initializes a builder to create the @@ -1750,6 +2037,21 @@ public PutIndicesSettingsResponse putSettings() throws IOException, Elasticsearc /** * Create or update an index template. Index templates define settings, * mappings, and aliases that can be applied automatically to new indices. + * Elasticsearch applies templates to new indices based on an index pattern that + * matches the index name. + *

+ * IMPORTANT: This documentation is about legacy index templates, which are + * deprecated and will be replaced by the composable templates introduced in + * Elasticsearch 7.8. + *

+ * Composable templates always take precedence over legacy templates. If no + * composable template matches a new index, matching legacy templates are + * applied according to their order. + *

+ * Index templates are only applied during index creation. Changes to index + * templates do not affect existing indices. Settings and mappings specified in + * create index API requests override any settings or mappings specified in an + * index template. * * @see Documentation @@ -1766,6 +2068,21 @@ public PutTemplateResponse putTemplate(PutTemplateRequest request) throws IOExce /** * Create or update an index template. Index templates define settings, * mappings, and aliases that can be applied automatically to new indices. + * Elasticsearch applies templates to new indices based on an index pattern that + * matches the index name. + *

+ * IMPORTANT: This documentation is about legacy index templates, which are + * deprecated and will be replaced by the composable templates introduced in + * Elasticsearch 7.8. + *

+ * Composable templates always take precedence over legacy templates. If no + * composable template matches a new index, matching legacy templates are + * applied according to their order. + *

+ * Index templates are only applied during index creation. Changes to index + * templates do not affect existing indices. Settings and mappings specified in + * create index API requests override any settings or mappings specified in an + * index template. * * @param fn * a function that initializes a builder to create the @@ -1784,9 +2101,37 @@ public final PutTemplateResponse putTemplate( // ----- Endpoint: indices.recovery /** - * Returns information about ongoing and completed shard recoveries for one or - * more indices. For data streams, the API returns information for the stream’s - * backing indices. + * Get index recovery information. Get information about ongoing and completed + * shard recoveries for one or more indices. For data streams, the API returns + * information for the stream's backing indices. + *

+ * Shard recovery is the process of initializing a shard copy, such as restoring + * a primary shard from a snapshot or creating a replica shard from a primary + * shard. When a shard recovery completes, the recovered shard is available for + * search and indexing. + *

+ * Recovery automatically occurs during the following processes: + *

+ *

+ * You can determine the cause of a shard recovery using the recovery or cat + * recovery APIs. + *

+ * The index recovery API reports information about completed recoveries only + * for shard copies that currently exist in the cluster. It only reports the + * last recovery for each shard copy and does not report historical information + * about earlier recoveries, nor does it report information about the recoveries + * of shard copies that no longer exist. This means that if a shard copy + * completes a recovery and then Elasticsearch relocates it onto a different + * node then the information about the original recovery will not be shown in + * the recovery API. * * @see Documentation @@ -1801,9 +2146,37 @@ public RecoveryResponse recovery(RecoveryRequest request) throws IOException, El } /** - * Returns information about ongoing and completed shard recoveries for one or - * more indices. For data streams, the API returns information for the stream’s - * backing indices. + * Get index recovery information. Get information about ongoing and completed + * shard recoveries for one or more indices. For data streams, the API returns + * information for the stream's backing indices. + *

+ * Shard recovery is the process of initializing a shard copy, such as restoring + * a primary shard from a snapshot or creating a replica shard from a primary + * shard. When a shard recovery completes, the recovered shard is available for + * search and indexing. + *

+ * Recovery automatically occurs during the following processes: + *

+ *

+ * You can determine the cause of a shard recovery using the recovery or cat + * recovery APIs. + *

+ * The index recovery API reports information about completed recoveries only + * for shard copies that currently exist in the cluster. It only reports the + * last recovery for each shard copy and does not report historical information + * about earlier recoveries, nor does it report information about the recoveries + * of shard copies that no longer exist. This means that if a shard copy + * completes a recovery and then Elasticsearch relocates it onto a different + * node then the information about the original recovery will not be shown in + * the recovery API. * * @param fn * a function that initializes a builder to create the @@ -1819,9 +2192,37 @@ public final RecoveryResponse recovery(Function + * Shard recovery is the process of initializing a shard copy, such as restoring + * a primary shard from a snapshot or creating a replica shard from a primary + * shard. When a shard recovery completes, the recovered shard is available for + * search and indexing. + *

+ * Recovery automatically occurs during the following processes: + *

    + *
  • When creating an index for the first time.
  • + *
  • When a node rejoins the cluster and starts up any missing primary shard + * copies using the data that it holds in its data path.
  • + *
  • Creation of new replica shard copies from the primary.
  • + *
  • Relocation of a shard copy to a different node in the same cluster.
  • + *
  • A snapshot restore operation.
  • + *
  • A clone, shrink, or split operation.
  • + *
+ *

+ * You can determine the cause of a shard recovery using the recovery or cat + * recovery APIs. + *

+ * The index recovery API reports information about completed recoveries only + * for shard copies that currently exist in the cluster. It only reports the + * last recovery for each shard copy and does not report historical information + * about earlier recoveries, nor does it report information about the recoveries + * of shard copies that no longer exist. This means that if a shard copy + * completes a recovery and then Elasticsearch relocates it onto a different + * node then the information about the original recovery will not be shown in + * the recovery API. * * @see Documentation @@ -1888,7 +2289,28 @@ public RefreshResponse refresh() throws IOException, ElasticsearchException { // ----- Endpoint: indices.reload_search_analyzers /** - * Reloads an index's search analyzers and their resources. + * Reload search analyzers. Reload an index's search analyzers and their + * resources. For data streams, the API reloads search analyzers and resources + * for the stream's backing indices. + *

+ * IMPORTANT: After reloading the search analyzers you should clear the request + * cache to make sure it doesn't contain responses derived from the previous + * versions of the analyzer. + *

+ * You can use the reload search analyzers API to pick up changes to synonym + * files used in the synonym_graph or synonym token + * filter of a search analyzer. To be eligible, the token filter must have an + * updateable flag of true and only be used in search + * analyzers. + *

+ * NOTE: This API does not perform a reload for each shard of an index. Instead, + * it performs a reload for each node containing index shards. As a result, the + * total shard count returned by the API can differ from the number of index + * shards. Because reloading affects every node with an index shard, it is + * important to update the synonym file on every data node in the + * cluster--including nodes that don't contain a shard replica--before using + * this API. This ensures the synonym file is updated everywhere in the cluster + * in case shards are relocated in the future. * * @see Documentation @@ -1904,7 +2326,28 @@ public ReloadSearchAnalyzersResponse reloadSearchAnalyzers(ReloadSearchAnalyzers } /** - * Reloads an index's search analyzers and their resources. + * Reload search analyzers. Reload an index's search analyzers and their + * resources. For data streams, the API reloads search analyzers and resources + * for the stream's backing indices. + *

+ * IMPORTANT: After reloading the search analyzers you should clear the request + * cache to make sure it doesn't contain responses derived from the previous + * versions of the analyzer. + *

+ * You can use the reload search analyzers API to pick up changes to synonym + * files used in the synonym_graph or synonym token + * filter of a search analyzer. To be eligible, the token filter must have an + * updateable flag of true and only be used in search + * analyzers. + *

+ * NOTE: This API does not perform a reload for each shard of an index. Instead, + * it performs a reload for each node containing index shards. As a result, the + * total shard count returned by the API can differ from the number of index + * shards. Because reloading affects every node with an index shard, it is + * important to update the synonym file on every data node in the + * cluster--including nodes that don't contain a shard replica--before using + * this API. This ensures the synonym file is updated everywhere in the cluster + * in case shards are relocated in the future. * * @param fn * a function that initializes a builder to create the @@ -1923,10 +2366,33 @@ public final ReloadSearchAnalyzersResponse reloadSearchAnalyzers( // ----- Endpoint: indices.resolve_cluster /** - * Resolves the specified index expressions to return information about each - * cluster, including the local cluster, if included. Multiple patterns and - * remote clusters are supported. - * + * Resolve the cluster. Resolve the specified index expressions to return + * information about each cluster, including the local cluster, if included. + * Multiple patterns and remote clusters are supported. + *

+ * This endpoint is useful before doing a cross-cluster search in order to + * determine which remote clusters should be included in a search. + *

+ * You use the same index expression with this endpoint as you would for + * cross-cluster search. Index and cluster exclusions are also supported with + * this endpoint. + *

+ * For each cluster in the index expression, information is returned about: + *

+ * * @see Documentation * on elastic.co @@ -1941,10 +2407,33 @@ public ResolveClusterResponse resolveCluster(ResolveClusterRequest request) } /** - * Resolves the specified index expressions to return information about each - * cluster, including the local cluster, if included. Multiple patterns and - * remote clusters are supported. - * + * Resolve the cluster. Resolve the specified index expressions to return + * information about each cluster, including the local cluster, if included. + * Multiple patterns and remote clusters are supported. + *

+ * This endpoint is useful before doing a cross-cluster search in order to + * determine which remote clusters should be included in a search. + *

+ * You use the same index expression with this endpoint as you would for + * cross-cluster search. Index and cluster exclusions are also supported with + * this endpoint. + *

+ * For each cluster in the index expression, information is returned about: + *

    + *
  • Whether the querying ("local") cluster is currently connected + * to each remote cluster in the index expression scope.
  • + *
  • Whether each remote cluster is configured with + * skip_unavailable as true or + * false.
  • + *
  • Whether there are any indices, aliases, or data streams on that cluster + * that match the index expression.
  • + *
  • Whether the search is likely to have errors returned when you do the + * cross-cluster search (including any authorization errors if you do not have + * permission to query the index).
  • + *
  • Cluster version information, including the Elasticsearch server + * version.
  • + *
+ * * @param fn * a function that initializes a builder to create the * {@link ResolveClusterRequest} @@ -2035,8 +2524,9 @@ public final RolloverResponse rollover(FunctionDocumentation @@ -2051,8 +2541,9 @@ public SegmentsResponse segments(SegmentsRequest request) throws IOException, El } /** - * Returns low-level information about the Lucene segments in index shards. For - * data streams, the API returns information about the stream’s backing indices. + * Get index segments. Get low-level information about the Lucene segments in + * index shards. For data streams, the API returns information about the + * stream's backing indices. * * @param fn * a function that initializes a builder to create the @@ -2068,8 +2559,9 @@ public final SegmentsResponse segments(FunctionDocumentation @@ -2084,9 +2576,21 @@ public SegmentsResponse segments() throws IOException, ElasticsearchException { // ----- Endpoint: indices.shard_stores /** - * Retrieves store information about replica shards in one or more indices. For - * data streams, the API retrieves store information for the stream’s backing - * indices. + * Get index shard stores. Get store information about replica shards in one or + * more indices. For data streams, the API retrieves store information for the + * stream's backing indices. + *

+ * The index shard stores API returns the following information: + *

    + *
  • The node on which each replica shard exists.
  • + *
  • The allocation ID for each replica shard.
  • + *
  • A unique ID for each replica shard.
  • + *
  • Any errors encountered while opening the shard index or from an earlier + * failure.
  • + *
+ *

+ * By default, the API returns store information only for primary shards that + * are unassigned or have one or more unassigned replica shards. * * @see Documentation @@ -2101,9 +2605,21 @@ public ShardStoresResponse shardStores(ShardStoresRequest request) throws IOExce } /** - * Retrieves store information about replica shards in one or more indices. For - * data streams, the API retrieves store information for the stream’s backing - * indices. + * Get index shard stores. Get store information about replica shards in one or + * more indices. For data streams, the API retrieves store information for the + * stream's backing indices. + *

+ * The index shard stores API returns the following information: + *

+ *

+ * By default, the API returns store information only for primary shards that + * are unassigned or have one or more unassigned replica shards. * * @param fn * a function that initializes a builder to create the @@ -2120,9 +2636,21 @@ public final ShardStoresResponse shardStores( } /** - * Retrieves store information about replica shards in one or more indices. For - * data streams, the API retrieves store information for the stream’s backing - * indices. + * Get index shard stores. Get store information about replica shards in one or + * more indices. For data streams, the API retrieves store information for the + * stream's backing indices. + *

+ * The index shard stores API returns the following information: + *

    + *
  • The node on which each replica shard exists.
  • + *
  • The allocation ID for each replica shard.
  • + *
  • A unique ID for each replica shard.
  • + *
  • Any errors encountered while opening the shard index or from an earlier + * failure.
  • + *
+ *

+ * By default, the API returns store information only for primary shards that + * are unassigned or have one or more unassigned replica shards. * * @see Documentation @@ -2137,8 +2665,62 @@ public ShardStoresResponse shardStores() throws IOException, ElasticsearchExcept // ----- Endpoint: indices.shrink /** - * Shrinks an existing index into a new index with fewer primary shards. - * + * Shrink an index. Shrink an index into a new index with fewer primary shards. + *

+ * Before you can shrink an index: + *

+ *

+ * To make shard allocation easier, we recommend you also remove the index's + * replica shards. You can later re-add replica shards as part of the shrink + * operation. + *

+ * The requested number of primary shards in the target index must be a factor + * of the number of shards in the source index. For example an index with 8 + * primary shards can be shrunk into 4, 2 or 1 primary shards or an index with + * 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in + * the index is a prime number it can only be shrunk into a single primary shard + * Before shrinking, a (primary or replica) copy of every shard in the index + * must be present on the same node. + *

+ * The current write index on a data stream cannot be shrunk. In order to shrink + * the current write index, the data stream must first be rolled over so that a + * new write index is created and then the previous write index can be shrunk. + *

+ * A shrink operation: + *

    + *
  • Creates a new target index with the same definition as the source index, + * but with a smaller number of primary shards.
  • + *
  • Hard-links segments from the source index into the target index. If the + * file system does not support hard-linking, then all segments are copied into + * the new index, which is a much more time consuming process. Also if using + * multiple data paths, shards on different data paths require a full copy of + * segment files if they are not on the same disk since hardlinks do not work + * across disks.
  • + *
  • Recovers the target index as though it were a closed index which had just + * been re-opened. Recovers shards to the + * .routing.allocation.initial_recovery._id index setting.
  • + *
+ *

+ * IMPORTANT: Indices can only be shrunk if they satisfy the following + * requirements: + *

    + *
  • The target index must not exist.
  • + *
  • The source index must have more primary shards than the target + * index.
  • + *
  • The number of primary shards in the target index must be a factor of the + * number of primary shards in the source index. The source index must have more + * primary shards than the target index.
  • + *
  • The index must not contain more than 2,147,483,519 documents in total + * across all shards that will be shrunk into a single shard on the target index + * as this is the maximum number of docs that can fit into a single shard.
  • + *
  • The node handling the shrink process must have sufficient free disk space + * to accommodate a second copy of the existing index.
  • + *
+ * * @see
Documentation * on elastic.co @@ -2152,8 +2734,62 @@ public ShrinkResponse shrink(ShrinkRequest request) throws IOException, Elastics } /** - * Shrinks an existing index into a new index with fewer primary shards. - * + * Shrink an index. Shrink an index into a new index with fewer primary shards. + *

+ * Before you can shrink an index: + *

    + *
  • The index must be read-only.
  • + *
  • A copy of every shard in the index must reside on the same node.
  • + *
  • The index must have a green health status.
  • + *
+ *

+ * To make shard allocation easier, we recommend you also remove the index's + * replica shards. You can later re-add replica shards as part of the shrink + * operation. + *

+ * The requested number of primary shards in the target index must be a factor + * of the number of shards in the source index. For example an index with 8 + * primary shards can be shrunk into 4, 2 or 1 primary shards or an index with + * 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in + * the index is a prime number it can only be shrunk into a single primary shard + * Before shrinking, a (primary or replica) copy of every shard in the index + * must be present on the same node. + *

+ * The current write index on a data stream cannot be shrunk. In order to shrink + * the current write index, the data stream must first be rolled over so that a + * new write index is created and then the previous write index can be shrunk. + *

+ * A shrink operation: + *

    + *
  • Creates a new target index with the same definition as the source index, + * but with a smaller number of primary shards.
  • + *
  • Hard-links segments from the source index into the target index. If the + * file system does not support hard-linking, then all segments are copied into + * the new index, which is a much more time consuming process. Also if using + * multiple data paths, shards on different data paths require a full copy of + * segment files if they are not on the same disk since hardlinks do not work + * across disks.
  • + *
  • Recovers the target index as though it were a closed index which had just + * been re-opened. Recovers shards to the + * .routing.allocation.initial_recovery._id index setting.
  • + *
+ *

+ * IMPORTANT: Indices can only be shrunk if they satisfy the following + * requirements: + *

    + *
  • The target index must not exist.
  • + *
  • The source index must have more primary shards than the target + * index.
  • + *
  • The number of primary shards in the target index must be a factor of the + * number of primary shards in the source index. The source index must have more + * primary shards than the target index.
  • + *
  • The index must not contain more than 2,147,483,519 documents in total + * across all shards that will be shrunk into a single shard on the target index + * as this is the maximum number of docs that can fit into a single shard.
  • + *
  • The node handling the shrink process must have sufficient free disk space + * to accommodate a second copy of the existing index.
  • + *
+ * * @param fn * a function that initializes a builder to create the * {@link ShrinkRequest} @@ -2258,8 +2894,58 @@ public SimulateTemplateResponse simulateTemplate() throws IOException, Elasticse // ----- Endpoint: indices.split /** - * Splits an existing index into a new index with more primary shards. - * + * Split an index. Split an index into a new index with more primary shards. + *
    + *
  • + *

    + * Before you can split an index: + *

    + *
  • + *
  • + *

    + * The index must be read-only. + *

    + *
  • + *
  • + *

    + * The cluster health status must be green. + *

    + *
  • + *
+ *

+ * The number of times the index can be split (and the number of shards that + * each original shard can be split into) is determined by the + * index.number_of_routing_shards setting. The number of routing + * shards specifies the hashing space that is used internally to distribute + * documents across shards with consistent hashing. For instance, a 5 shard + * index with number_of_routing_shards set to 30 (5 x 2 x 3) could + * be split by a factor of 2 or 3. + *

+ * A split operation: + *

    + *
  • Creates a new target index with the same definition as the source index, + * but with a larger number of primary shards.
  • + *
  • Hard-links segments from the source index into the target index. If the + * file system doesn't support hard-linking, all segments are copied into the + * new index, which is a much more time consuming process.
  • + *
  • Hashes all documents again, after low level files are created, to delete + * documents that belong to a different shard.
  • + *
  • Recovers the target index as though it were a closed index which had just + * been re-opened.
  • + *
+ *

+ * IMPORTANT: Indices can only be split if they satisfy the following + * requirements: + *

    + *
  • The target index must not exist.
  • + *
  • The source index must have fewer primary shards than the target + * index.
  • + *
  • The number of primary shards in the target index must be a multiple of + * the number of primary shards in the source index.
  • + *
  • The node handling the split process must have sufficient free disk space + * to accommodate a second copy of the existing index.
  • + *
+ * * @see Documentation * on elastic.co @@ -2273,8 +2959,58 @@ public SplitResponse split(SplitRequest request) throws IOException, Elasticsear } /** - * Splits an existing index into a new index with more primary shards. - * + * Split an index. Split an index into a new index with more primary shards. + *
    + *
  • + *

    + * Before you can split an index: + *

    + *
  • + *
  • + *

    + * The index must be read-only. + *

    + *
  • + *
  • + *

    + * The cluster health status must be green. + *

    + *
  • + *
+ *

+ * The number of times the index can be split (and the number of shards that + * each original shard can be split into) is determined by the + * index.number_of_routing_shards setting. The number of routing + * shards specifies the hashing space that is used internally to distribute + * documents across shards with consistent hashing. For instance, a 5 shard + * index with number_of_routing_shards set to 30 (5 x 2 x 3) could + * be split by a factor of 2 or 3. + *

+ * A split operation: + *

    + *
  • Creates a new target index with the same definition as the source index, + * but with a larger number of primary shards.
  • + *
  • Hard-links segments from the source index into the target index. If the + * file system doesn't support hard-linking, all segments are copied into the + * new index, which is a much more time consuming process.
  • + *
  • Hashes all documents again, after low level files are created, to delete + * documents that belong to a different shard.
  • + *
  • Recovers the target index as though it were a closed index which had just + * been re-opened.
  • + *
+ *

+ * IMPORTANT: Indices can only be split if they satisfy the following + * requirements: + *

    + *
  • The target index must not exist.
  • + *
  • The source index must have fewer primary shards than the target + * index.
  • + *
  • The number of primary shards in the target index must be a multiple of + * the number of primary shards in the source index.
  • + *
  • The node handling the split process must have sufficient free disk space + * to accommodate a second copy of the existing index.
  • + *
+ * * @param fn * a function that initializes a builder to create the * {@link SplitRequest} @@ -2291,8 +3027,21 @@ public final SplitResponse split(Function + * By default, the returned statistics are index-level with + * primaries and total aggregations. + * primaries are the values for only the primary shards. + * total are the accumulated values for both primary and replica + * shards. + *

+ * To get shard-level statistics, set the level parameter to + * shards. + *

+ * NOTE: When moving to another node, the shard-level statistics for a shard are + * cleared. Although the shard is no longer part of the node, that node retains + * any node-level statistics to which the shard contributed. * * @see Documentation @@ -2307,8 +3056,21 @@ public IndicesStatsResponse stats(IndicesStatsRequest request) throws IOExceptio } /** - * Returns statistics for one or more indices. For data streams, the API - * retrieves statistics for the stream’s backing indices. + * Get index statistics. For data streams, the API retrieves statistics for the + * stream's backing indices. + *

+ * By default, the returned statistics are index-level with + * primaries and total aggregations. + * primaries are the values for only the primary shards. + * total are the accumulated values for both primary and replica + * shards. + *

+ * To get shard-level statistics, set the level parameter to + * shards. + *

+ * NOTE: When moving to another node, the shard-level statistics for a shard are + * cleared. Although the shard is no longer part of the node, that node retains + * any node-level statistics to which the shard contributed. * * @param fn * a function that initializes a builder to create the @@ -2325,8 +3087,21 @@ public final IndicesStatsResponse stats( } /** - * Returns statistics for one or more indices. For data streams, the API - * retrieves statistics for the stream’s backing indices. + * Get index statistics. For data streams, the API retrieves statistics for the + * stream's backing indices. + *

+ * By default, the returned statistics are index-level with + * primaries and total aggregations. + * primaries are the values for only the primary shards. + * total are the accumulated values for both primary and replica + * shards. + *

+ * To get shard-level statistics, set the level parameter to + * shards. + *

+ * NOTE: When moving to another node, the shard-level statistics for a shard are + * cleared. Although the shard is no longer part of the node, that node retains + * any node-level statistics to which the shard contributed. * * @see Documentation @@ -2341,7 +3116,8 @@ public IndicesStatsResponse stats() throws IOException, ElasticsearchException { // ----- Endpoint: indices.unfreeze /** - * Unfreezes an index. + * Unfreeze an index. When a frozen index is unfrozen, the index goes through + * the normal recovery process and becomes writeable again. * * @see Documentation @@ -2356,7 +3132,8 @@ public UnfreezeResponse unfreeze(UnfreezeRequest request) throws IOException, El } /** - * Unfreezes an index. + * Unfreeze an index. When a frozen index is unfrozen, the index goes through + * the normal recovery process and becomes writeable again. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExplainDataLifecycleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExplainDataLifecycleRequest.java index 1af1f6e3d..a4a4a4d5f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExplainDataLifecycleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ExplainDataLifecycleRequest.java @@ -59,10 +59,10 @@ // typedef: indices.explain_data_lifecycle.Request /** - * Get the status for a data stream lifecycle. Retrieves information about an - * index or data stream’s current data stream lifecycle status, such as time - * since index creation, time since rollover, the lifecycle configuration - * managing the index, or any errors encountered during lifecycle execution. + * Get the status for a data stream lifecycle. Get information about an index or + * data stream's current data stream lifecycle status, such as time since index + * creation, time since rollover, the lifecycle configuration managing the + * index, or any errors encountered during lifecycle execution. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/FieldUsageStatsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/FieldUsageStatsRequest.java index 27cd7608f..9a5822f96 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/FieldUsageStatsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/FieldUsageStatsRequest.java @@ -61,7 +61,11 @@ // typedef: indices.field_usage_stats.Request /** - * Returns field usage information for each shard and field of an index. + * Get field usage stats. Get field usage information for each shard and field + * of an index. Field usage statistics are automatically captured when queries + * are running on a cluster. A shard-level search request that accesses a given + * field, even if multiple times during that request, is counted as a single + * use. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/FlushRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/FlushRequest.java index cc937d327..f23846239 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/FlushRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/FlushRequest.java @@ -59,7 +59,26 @@ // typedef: indices.flush.Request /** - * Flushes one or more data streams or indices. + * Flush data streams or indices. Flushing a data stream or index is the process + * of making sure that any data that is currently only stored in the transaction + * log is also permanently stored in the Lucene index. When restarting, + * Elasticsearch replays any unflushed operations from the transaction log into + * the Lucene index to bring it back into the state that it was in before the + * restart. Elasticsearch automatically triggers flushes as needed, using + * heuristics that trade off the size of the unflushed transaction log against + * the cost of performing each flush. + *

+ * After each operation has been flushed it is permanently stored in the Lucene + * index. This may mean that there is no need to maintain an additional copy of + * it in the transaction log. The transaction log is made up of multiple files, + * called generations, and Elasticsearch will delete any generation files when + * they are no longer needed, freeing up disk space. + *

+ * It is also possible to trigger a flush on one or more indices using the flush + * API, although it is rare for users to need to call this API directly. If you + * call the flush API after indexing some documents then a successful response + * indicates that Elasticsearch has flushed all the documents that were indexed + * before the flush API was called. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ForcemergeRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ForcemergeRequest.java index 2eb961c31..81db9161d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ForcemergeRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ForcemergeRequest.java @@ -60,7 +60,25 @@ // typedef: indices.forcemerge.Request /** - * Performs the force merge operation on one or more indices. + * Force a merge. Perform the force merge operation on the shards of one or more + * indices. For data streams, the API forces a merge on the shards of the + * stream's backing indices. + *

+ * Merging reduces the number of segments in each shard by merging some of them + * together and also frees up the space used by deleted documents. Merging + * normally happens automatically, but sometimes it is useful to trigger a merge + * manually. + *

+ * WARNING: We recommend force merging only a read-only index (meaning the index + * is no longer receiving writes). When documents are updated or deleted, the + * old version is not immediately removed but instead soft-deleted and marked + * with a "tombstone". These soft-deleted documents are automatically + * cleaned up during regular segment merges. But force merge can cause very + * large (greater than 5 GB) segments to be produced, which are not eligible for + * regular merges. So the number of soft-deleted documents can then grow + * rapidly, resulting in higher disk usage and worse search performance. If you + * regularly force merge an index receiving writes, this can also make snapshots + * more expensive, since the new documents can't be backed up incrementally. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/IndicesStatsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/IndicesStatsRequest.java index 0e9993414..0d752ea00 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/IndicesStatsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/IndicesStatsRequest.java @@ -60,8 +60,21 @@ // typedef: indices.stats.Request /** - * Returns statistics for one or more indices. For data streams, the API - * retrieves statistics for the stream’s backing indices. + * Get index statistics. For data streams, the API retrieves statistics for the + * stream's backing indices. + *

+ * By default, the returned statistics are index-level with + * primaries and total aggregations. + * primaries are the values for only the primary shards. + * total are the accumulated values for both primary and replica + * shards. + *

+ * To get shard-level statistics, set the level parameter to + * shards. + *

+ * NOTE: When moving to another node, the shard-level statistics for a shard are + * cleared. Although the shard is no longer part of the node, that node retains + * any node-level statistics to which the shard contributed. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PromoteDataStreamRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PromoteDataStreamRequest.java index 9be86a794..3dcbf5299 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PromoteDataStreamRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PromoteDataStreamRequest.java @@ -56,8 +56,22 @@ // typedef: indices.promote_data_stream.Request /** - * Promotes a data stream from a replicated data stream managed by CCR to a - * regular data stream + * Promote a data stream. Promote a data stream from a replicated data stream + * managed by cross-cluster replication (CCR) to a regular data stream. + *

+ * With CCR auto following, a data stream from a remote cluster can be + * replicated to the local cluster. These data streams can't be rolled over in + * the local cluster. These replicated data streams roll over only if the + * upstream data stream rolls over. In the event that the remote cluster is no + * longer available, the data stream in the local cluster can be promoted to a + * regular data stream, which allows these data streams to be rolled over in the + * local cluster. + *

+ * NOTE: When promoting a data stream, ensure the local cluster has a data + * stream enabled index template that matches the data stream. If this is + * missing, the data stream will not be able to roll over until a matching index + * template is created. This will affect the lifecycle management of the data + * stream and interfere with the data stream size and retention. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutTemplateRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutTemplateRequest.java index 5565de8d4..c62cb7bb2 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutTemplateRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/PutTemplateRequest.java @@ -65,6 +65,21 @@ /** * Create or update an index template. Index templates define settings, * mappings, and aliases that can be applied automatically to new indices. + * Elasticsearch applies templates to new indices based on an index pattern that + * matches the index name. + *

+ * IMPORTANT: This documentation is about legacy index templates, which are + * deprecated and will be replaced by the composable templates introduced in + * Elasticsearch 7.8. + *

+ * Composable templates always take precedence over legacy templates. If no + * composable template matches a new index, matching legacy templates are + * applied according to their order. + *

+ * Index templates are only applied during index creation. Changes to index + * templates do not affect existing indices. Settings and mappings specified in + * create index API requests override any settings or mappings specified in an + * index template. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/RecoveryRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/RecoveryRequest.java index 065bddde6..80e7c436a 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/RecoveryRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/RecoveryRequest.java @@ -58,9 +58,37 @@ // typedef: indices.recovery.Request /** - * Returns information about ongoing and completed shard recoveries for one or - * more indices. For data streams, the API returns information for the stream’s - * backing indices. + * Get index recovery information. Get information about ongoing and completed + * shard recoveries for one or more indices. For data streams, the API returns + * information for the stream's backing indices. + *

+ * Shard recovery is the process of initializing a shard copy, such as restoring + * a primary shard from a snapshot or creating a replica shard from a primary + * shard. When a shard recovery completes, the recovered shard is available for + * search and indexing. + *

+ * Recovery automatically occurs during the following processes: + *

    + *
  • When creating an index for the first time.
  • + *
  • When a node rejoins the cluster and starts up any missing primary shard + * copies using the data that it holds in its data path.
  • + *
  • Creation of new replica shard copies from the primary.
  • + *
  • Relocation of a shard copy to a different node in the same cluster.
  • + *
  • A snapshot restore operation.
  • + *
  • A clone, shrink, or split operation.
  • + *
+ *

+ * You can determine the cause of a shard recovery using the recovery or cat + * recovery APIs. + *

+ * The index recovery API reports information about completed recoveries only + * for shard copies that currently exist in the cluster. It only reports the + * last recovery for each shard copy and does not report historical information + * about earlier recoveries, nor does it report information about the recoveries + * of shard copies that no longer exist. This means that if a shard copy + * completes a recovery and then Elasticsearch relocates it onto a different + * node then the information about the original recovery will not be shown in + * the recovery API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ReloadSearchAnalyzersRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ReloadSearchAnalyzersRequest.java index 557144e68..fe15077bf 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ReloadSearchAnalyzersRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ReloadSearchAnalyzersRequest.java @@ -59,7 +59,28 @@ // typedef: indices.reload_search_analyzers.Request /** - * Reloads an index's search analyzers and their resources. + * Reload search analyzers. Reload an index's search analyzers and their + * resources. For data streams, the API reloads search analyzers and resources + * for the stream's backing indices. + *

+ * IMPORTANT: After reloading the search analyzers you should clear the request + * cache to make sure it doesn't contain responses derived from the previous + * versions of the analyzer. + *

+ * You can use the reload search analyzers API to pick up changes to synonym + * files used in the synonym_graph or synonym token + * filter of a search analyzer. To be eligible, the token filter must have an + * updateable flag of true and only be used in search + * analyzers. + *

+ * NOTE: This API does not perform a reload for each shard of an index. Instead, + * it performs a reload for each node containing index shards. As a result, the + * total shard count returned by the API can differ from the number of index + * shards. Because reloading affects every node with an index shard, it is + * important to update the synonym file on every data node in the + * cluster--including nodes that don't contain a shard replica--before using + * this API. This ensures the synonym file is updated everywhere in the cluster + * in case shards are relocated in the future. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ResolveClusterRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ResolveClusterRequest.java index 8656ec355..b19c7a765 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ResolveClusterRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ResolveClusterRequest.java @@ -59,10 +59,33 @@ // typedef: indices.resolve_cluster.Request /** - * Resolves the specified index expressions to return information about each - * cluster, including the local cluster, if included. Multiple patterns and - * remote clusters are supported. - * + * Resolve the cluster. Resolve the specified index expressions to return + * information about each cluster, including the local cluster, if included. + * Multiple patterns and remote clusters are supported. + *

+ * This endpoint is useful before doing a cross-cluster search in order to + * determine which remote clusters should be included in a search. + *

+ * You use the same index expression with this endpoint as you would for + * cross-cluster search. Index and cluster exclusions are also supported with + * this endpoint. + *

+ * For each cluster in the index expression, information is returned about: + *

+ * * @see API * specification */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/SegmentsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/SegmentsRequest.java index 0de776681..87cd28ed4 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/SegmentsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/SegmentsRequest.java @@ -59,8 +59,9 @@ // typedef: indices.segments.Request /** - * Returns low-level information about the Lucene segments in index shards. For - * data streams, the API returns information about the stream’s backing indices. + * Get index segments. Get low-level information about the Lucene segments in + * index shards. For data streams, the API returns information about the + * stream's backing indices. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ShardStoresRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ShardStoresRequest.java index 04e059dd7..e7584b2f1 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ShardStoresRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ShardStoresRequest.java @@ -60,9 +60,21 @@ // typedef: indices.shard_stores.Request /** - * Retrieves store information about replica shards in one or more indices. For - * data streams, the API retrieves store information for the stream’s backing - * indices. + * Get index shard stores. Get store information about replica shards in one or + * more indices. For data streams, the API retrieves store information for the + * stream's backing indices. + *

+ * The index shard stores API returns the following information: + *

    + *
  • The node on which each replica shard exists.
  • + *
  • The allocation ID for each replica shard.
  • + *
  • A unique ID for each replica shard.
  • + *
  • Any errors encountered while opening the shard index or from an earlier + * failure.
  • + *
+ *

+ * By default, the API returns store information only for primary shards that + * are unassigned or have one or more unassigned replica shards. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ShrinkRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ShrinkRequest.java index 7614e021c..58bc421f1 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ShrinkRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/ShrinkRequest.java @@ -60,8 +60,62 @@ // typedef: indices.shrink.Request /** - * Shrinks an existing index into a new index with fewer primary shards. - * + * Shrink an index. Shrink an index into a new index with fewer primary shards. + *

+ * Before you can shrink an index: + *

    + *
  • The index must be read-only.
  • + *
  • A copy of every shard in the index must reside on the same node.
  • + *
  • The index must have a green health status.
  • + *
+ *

+ * To make shard allocation easier, we recommend you also remove the index's + * replica shards. You can later re-add replica shards as part of the shrink + * operation. + *

+ * The requested number of primary shards in the target index must be a factor + * of the number of shards in the source index. For example an index with 8 + * primary shards can be shrunk into 4, 2 or 1 primary shards or an index with + * 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in + * the index is a prime number it can only be shrunk into a single primary shard + * Before shrinking, a (primary or replica) copy of every shard in the index + * must be present on the same node. + *

+ * The current write index on a data stream cannot be shrunk. In order to shrink + * the current write index, the data stream must first be rolled over so that a + * new write index is created and then the previous write index can be shrunk. + *

+ * A shrink operation: + *

    + *
  • Creates a new target index with the same definition as the source index, + * but with a smaller number of primary shards.
  • + *
  • Hard-links segments from the source index into the target index. If the + * file system does not support hard-linking, then all segments are copied into + * the new index, which is a much more time consuming process. Also if using + * multiple data paths, shards on different data paths require a full copy of + * segment files if they are not on the same disk since hardlinks do not work + * across disks.
  • + *
  • Recovers the target index as though it were a closed index which had just + * been re-opened. Recovers shards to the + * .routing.allocation.initial_recovery._id index setting.
  • + *
+ *

+ * IMPORTANT: Indices can only be shrunk if they satisfy the following + * requirements: + *

    + *
  • The target index must not exist.
  • + *
  • The source index must have more primary shards than the target + * index.
  • + *
  • The number of primary shards in the target index must be a factor of the + * number of primary shards in the source index. The source index must have more + * primary shards than the target index.
  • + *
  • The index must not contain more than 2,147,483,519 documents in total + * across all shards that will be shrunk into a single shard on the target index + * as this is the maximum number of docs that can fit into a single shard.
  • + *
  • The node handling the shrink process must have sufficient free disk space + * to accommodate a second copy of the existing index.
  • + *
+ * * @see API * specification */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/SplitRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/SplitRequest.java index 1cf7e7758..0fc0cdcba 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/SplitRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/SplitRequest.java @@ -60,8 +60,58 @@ // typedef: indices.split.Request /** - * Splits an existing index into a new index with more primary shards. - * + * Split an index. Split an index into a new index with more primary shards. + *
    + *
  • + *

    + * Before you can split an index: + *

    + *
  • + *
  • + *

    + * The index must be read-only. + *

    + *
  • + *
  • + *

    + * The cluster health status must be green. + *

    + *
  • + *
+ *

+ * The number of times the index can be split (and the number of shards that + * each original shard can be split into) is determined by the + * index.number_of_routing_shards setting. The number of routing + * shards specifies the hashing space that is used internally to distribute + * documents across shards with consistent hashing. For instance, a 5 shard + * index with number_of_routing_shards set to 30 (5 x 2 x 3) could + * be split by a factor of 2 or 3. + *

+ * A split operation: + *

    + *
  • Creates a new target index with the same definition as the source index, + * but with a larger number of primary shards.
  • + *
  • Hard-links segments from the source index into the target index. If the + * file system doesn't support hard-linking, all segments are copied into the + * new index, which is a much more time consuming process.
  • + *
  • Hashes all documents again, after low level files are created, to delete + * documents that belong to a different shard.
  • + *
  • Recovers the target index as though it were a closed index which had just + * been re-opened.
  • + *
+ *

+ * IMPORTANT: Indices can only be split if they satisfy the following + * requirements: + *

    + *
  • The target index must not exist.
  • + *
  • The source index must have fewer primary shards than the target + * index.
  • + *
  • The number of primary shards in the target index must be a multiple of + * the number of primary shards in the source index.
  • + *
  • The node handling the split process must have sufficient free disk space + * to accommodate a second copy of the existing index.
  • + *
+ * * @see API * specification */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/UnfreezeRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/UnfreezeRequest.java index 9e4b98fc1..562ae0122 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/UnfreezeRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/indices/UnfreezeRequest.java @@ -60,7 +60,8 @@ // typedef: indices.unfreeze.Request /** - * Unfreezes an index. + * Unfreeze an index. When a frozen index is unfrozen, the index goes through + * the normal recovery process and becomes writeable again. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceAsyncClient.java index 64685ed53..12346a688 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceAsyncClient.java @@ -182,7 +182,25 @@ public final CompletableFuture inference( // ----- Endpoint: inference.put /** - * Create an inference endpoint + * Create an inference endpoint. When you create an inference endpoint, the + * associated machine learning model is automatically deployed if it is not + * already running. After creating the endpoint, wait for the model deployment + * to complete before using it. To verify the deployment status, use the get + * trained model statistics API. Look for + * "state": "fully_allocated" in the response + * and ensure that the "allocation_count" matches the + * "target_allocation_count". Avoid creating multiple + * endpoints for the same model unless required, as each endpoint consumes + * significant resources. + *

+ * IMPORTANT: The inference APIs enable you to use certain services, such as + * built-in machine learning models (ELSER, E5), models uploaded through Eland, + * Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, + * Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models + * uploaded through Eland, the inference APIs offer an alternative way to use + * and manage trained models. However, if you do not plan to use the inference + * APIs to use these models or if you want to use non-NLP models, use the + * machine learning trained model APIs. * * @see Documentation @@ -197,7 +215,25 @@ public CompletableFuture put(PutRequest request) { } /** - * Create an inference endpoint + * Create an inference endpoint. When you create an inference endpoint, the + * associated machine learning model is automatically deployed if it is not + * already running. After creating the endpoint, wait for the model deployment + * to complete before using it. To verify the deployment status, use the get + * trained model statistics API. Look for + * "state": "fully_allocated" in the response + * and ensure that the "allocation_count" matches the + * "target_allocation_count". Avoid creating multiple + * endpoints for the same model unless required, as each endpoint consumes + * significant resources. + *

+ * IMPORTANT: The inference APIs enable you to use certain services, such as + * built-in machine learning models (ELSER, E5), models uploaded through Eland, + * Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, + * Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models + * uploaded through Eland, the inference APIs offer an alternative way to use + * and manage trained models. However, if you do not plan to use the inference + * APIs to use these models or if you want to use non-NLP models, use the + * machine learning trained model APIs. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceClient.java index 753d3646c..9910b5717 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/ElasticsearchInferenceClient.java @@ -181,7 +181,25 @@ public final InferenceResponse inference(Function"state": "fully_allocated" in the response + * and ensure that the "allocation_count" matches the + * "target_allocation_count". Avoid creating multiple + * endpoints for the same model unless required, as each endpoint consumes + * significant resources. + *

+ * IMPORTANT: The inference APIs enable you to use certain services, such as + * built-in machine learning models (ELSER, E5), models uploaded through Eland, + * Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, + * Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models + * uploaded through Eland, the inference APIs offer an alternative way to use + * and manage trained models. However, if you do not plan to use the inference + * APIs to use these models or if you want to use non-NLP models, use the + * machine learning trained model APIs. * * @see Documentation @@ -196,7 +214,25 @@ public PutResponse put(PutRequest request) throws IOException, ElasticsearchExce } /** - * Create an inference endpoint + * Create an inference endpoint. When you create an inference endpoint, the + * associated machine learning model is automatically deployed if it is not + * already running. After creating the endpoint, wait for the model deployment + * to complete before using it. To verify the deployment status, use the get + * trained model statistics API. Look for + * "state": "fully_allocated" in the response + * and ensure that the "allocation_count" matches the + * "target_allocation_count". Avoid creating multiple + * endpoints for the same model unless required, as each endpoint consumes + * significant resources. + *

+ * IMPORTANT: The inference APIs enable you to use certain services, such as + * built-in machine learning models (ELSER, E5), models uploaded through Eland, + * Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, + * Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models + * uploaded through Eland, the inference APIs offer an alternative way to use + * and manage trained models. However, if you do not plan to use the inference + * APIs to use these models or if you want to use non-NLP models, use the + * machine learning trained model APIs. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutRequest.java index 3de1fbd28..6f734f226 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/inference/PutRequest.java @@ -59,7 +59,25 @@ // typedef: inference.put.Request /** - * Create an inference endpoint + * Create an inference endpoint. When you create an inference endpoint, the + * associated machine learning model is automatically deployed if it is not + * already running. After creating the endpoint, wait for the model deployment + * to complete before using it. To verify the deployment status, use the get + * trained model statistics API. Look for + * "state": "fully_allocated" in the response + * and ensure that the "allocation_count" matches the + * "target_allocation_count". Avoid creating multiple + * endpoints for the same model unless required, as each endpoint consumes + * significant resources. + *

+ * IMPORTANT: The inference APIs enable you to use certain services, such as + * built-in machine learning models (ELSER, E5), models uploaded through Eland, + * Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, + * Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models + * uploaded through Eland, the inference APIs offer an alternative way to use + * and manage trained models. However, if you do not plan to use the inference + * APIs to use these models or if you want to use non-NLP models, use the + * machine learning trained model APIs. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/license/DeleteLicenseRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/license/DeleteLicenseRequest.java index 1ad0663e4..be5baec04 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/license/DeleteLicenseRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/license/DeleteLicenseRequest.java @@ -50,7 +50,11 @@ // typedef: license.delete.Request /** - * Deletes licensing information for the cluster + * Delete the license. When the license expires, your subscription level reverts + * to Basic. + *

+ * If the operator privileges feature is enabled, only operator users can use + * this API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/license/ElasticsearchLicenseAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/license/ElasticsearchLicenseAsyncClient.java index 78e40884a..d1f95c494 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/license/ElasticsearchLicenseAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/license/ElasticsearchLicenseAsyncClient.java @@ -70,7 +70,11 @@ public ElasticsearchLicenseAsyncClient withTransportOptions(@Nullable TransportO // ----- Endpoint: license.delete /** - * Deletes licensing information for the cluster + * Delete the license. When the license expires, your subscription level reverts + * to Basic. + *

+ * If the operator privileges feature is enabled, only operator users can use + * this API. * * @see Documentation @@ -84,11 +88,13 @@ public CompletableFuture delete() { // ----- Endpoint: license.get /** - * Get license information. Returns information about your Elastic license, - * including its type, its status, when it was issued, and when it expires. For - * more information about the different types of licenses, refer to - * Elastic Stack - * subscriptions. + * Get license information. Get information about your Elastic license including + * its type, its status, when it was issued, and when it expires. + *

+ * NOTE: If the master node is generating a new cluster state, the get license + * API may return a 404 Not Found response. If you receive an + * unexpected 404 response after cluster startup, wait a short period and retry + * the request. * * @see Documentation @@ -103,11 +109,13 @@ public CompletableFuture get(GetLicenseRequest request) { } /** - * Get license information. Returns information about your Elastic license, - * including its type, its status, when it was issued, and when it expires. For - * more information about the different types of licenses, refer to - * Elastic Stack - * subscriptions. + * Get license information. Get information about your Elastic license including + * its type, its status, when it was issued, and when it expires. + *

+ * NOTE: If the master node is generating a new cluster state, the get license + * API may return a 404 Not Found response. If you receive an + * unexpected 404 response after cluster startup, wait a short period and retry + * the request. * * @param fn * a function that initializes a builder to create the @@ -123,11 +131,13 @@ public final CompletableFuture get( } /** - * Get license information. Returns information about your Elastic license, - * including its type, its status, when it was issued, and when it expires. For - * more information about the different types of licenses, refer to - * Elastic Stack - * subscriptions. + * Get license information. Get information about your Elastic license including + * its type, its status, when it was issued, and when it expires. + *

+ * NOTE: If the master node is generating a new cluster state, the get license + * API may return a 404 Not Found response. If you receive an + * unexpected 404 response after cluster startup, wait a short period and retry + * the request. * * @see Documentation @@ -142,7 +152,7 @@ public CompletableFuture get() { // ----- Endpoint: license.get_basic_status /** - * Retrieves information about the status of the basic license. + * Get the basic license status. * * @see Documentation @@ -156,7 +166,7 @@ public CompletableFuture getBasicStatus() { // ----- Endpoint: license.get_trial_status /** - * Retrieves information about the status of the trial license. + * Get the trial status. * * @see Documentation @@ -170,7 +180,16 @@ public CompletableFuture getTrialStatus() { // ----- Endpoint: license.post /** - * Updates the license for the cluster. + * Update the license. You can update your license at runtime without shutting + * down your nodes. License updates take effect immediately. If the license you + * are installing does not support all of the features that were available with + * your previous license, however, you are notified in the response. You must + * then re-submit the API request with the acknowledge parameter set to true. + *

+ * NOTE: If Elasticsearch security features are enabled and you are installing a + * gold or higher license, you must enable TLS on the transport networking layer + * before you install the license. If the operator privileges feature is + * enabled, only operator users can use this API. * * @see Documentation @@ -185,7 +204,16 @@ public CompletableFuture post(PostRequest request) { } /** - * Updates the license for the cluster. + * Update the license. You can update your license at runtime without shutting + * down your nodes. License updates take effect immediately. If the license you + * are installing does not support all of the features that were available with + * your previous license, however, you are notified in the response. You must + * then re-submit the API request with the acknowledge parameter set to true. + *

+ * NOTE: If Elasticsearch security features are enabled and you are installing a + * gold or higher license, you must enable TLS on the transport networking layer + * before you install the license. If the operator privileges feature is + * enabled, only operator users can use this API. * * @param fn * a function that initializes a builder to create the @@ -200,7 +228,16 @@ public final CompletableFuture post(Function + * NOTE: If Elasticsearch security features are enabled and you are installing a + * gold or higher license, you must enable TLS on the transport networking layer + * before you install the license. If the operator privileges feature is + * enabled, only operator users can use this API. * * @see Documentation @@ -215,14 +252,18 @@ public CompletableFuture post() { // ----- Endpoint: license.post_start_basic /** - * The start basic API enables you to initiate an indefinite basic license, - * which gives access to all the basic features. If the basic license does not - * support all of the features that are available with your current license, - * however, you are notified in the response. You must then re-submit the API - * request with the acknowledge parameter set to true. To check the status of - * your basic license, use the following API: Get - * basic status. + * Start a basic license. Start an indefinite basic license, which gives access + * to all the basic features. + *

+ * NOTE: In order to start a basic license, you must not currently have a basic + * license. + *

+ * If the basic license does not support all of the features that are available + * with your current license, however, you are notified in the response. You + * must then re-submit the API request with the acknowledge + * parameter set to true. + *

+ * To check the status of your basic license, use the get basic license API. * * @see Documentation @@ -237,14 +278,18 @@ public CompletableFuture postStartBasic(PostStartBasicRe } /** - * The start basic API enables you to initiate an indefinite basic license, - * which gives access to all the basic features. If the basic license does not - * support all of the features that are available with your current license, - * however, you are notified in the response. You must then re-submit the API - * request with the acknowledge parameter set to true. To check the status of - * your basic license, use the following API: Get - * basic status. + * Start a basic license. Start an indefinite basic license, which gives access + * to all the basic features. + *

+ * NOTE: In order to start a basic license, you must not currently have a basic + * license. + *

+ * If the basic license does not support all of the features that are available + * with your current license, however, you are notified in the response. You + * must then re-submit the API request with the acknowledge + * parameter set to true. + *

+ * To check the status of your basic license, use the get basic license API. * * @param fn * a function that initializes a builder to create the @@ -260,14 +305,18 @@ public final CompletableFuture postStartBasic( } /** - * The start basic API enables you to initiate an indefinite basic license, - * which gives access to all the basic features. If the basic license does not - * support all of the features that are available with your current license, - * however, you are notified in the response. You must then re-submit the API - * request with the acknowledge parameter set to true. To check the status of - * your basic license, use the following API: Get - * basic status. + * Start a basic license. Start an indefinite basic license, which gives access + * to all the basic features. + *

+ * NOTE: In order to start a basic license, you must not currently have a basic + * license. + *

+ * If the basic license does not support all of the features that are available + * with your current license, however, you are notified in the response. You + * must then re-submit the API request with the acknowledge + * parameter set to true. + *

+ * To check the status of your basic license, use the get basic license API. * * @see Documentation @@ -282,8 +331,16 @@ public CompletableFuture postStartBasic() { // ----- Endpoint: license.post_start_trial /** - * The start trial API enables you to start a 30-day trial, which gives access - * to all subscription features. + * Start a trial. Start a 30-day trial, which gives access to all subscription + * features. + *

+ * NOTE: You are allowed to start a trial only if your cluster has not already + * activated a trial for the current major product version. For example, if you + * have already activated a trial for v8.0, you cannot start a new trial until + * v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension. + *

+ * To check the status of your trial, use the get trial status API. * * @see Documentation @@ -298,8 +355,16 @@ public CompletableFuture postStartTrial(PostStartTrialRe } /** - * The start trial API enables you to start a 30-day trial, which gives access - * to all subscription features. + * Start a trial. Start a 30-day trial, which gives access to all subscription + * features. + *

+ * NOTE: You are allowed to start a trial only if your cluster has not already + * activated a trial for the current major product version. For example, if you + * have already activated a trial for v8.0, you cannot start a new trial until + * v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension. + *

+ * To check the status of your trial, use the get trial status API. * * @param fn * a function that initializes a builder to create the @@ -315,8 +380,16 @@ public final CompletableFuture postStartTrial( } /** - * The start trial API enables you to start a 30-day trial, which gives access - * to all subscription features. + * Start a trial. Start a 30-day trial, which gives access to all subscription + * features. + *

+ * NOTE: You are allowed to start a trial only if your cluster has not already + * activated a trial for the current major product version. For example, if you + * have already activated a trial for v8.0, you cannot start a new trial until + * v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension. + *

+ * To check the status of your trial, use the get trial status API. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/license/ElasticsearchLicenseClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/license/ElasticsearchLicenseClient.java index d05f20af2..f6e8662b6 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/license/ElasticsearchLicenseClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/license/ElasticsearchLicenseClient.java @@ -68,7 +68,11 @@ public ElasticsearchLicenseClient withTransportOptions(@Nullable TransportOption // ----- Endpoint: license.delete /** - * Deletes licensing information for the cluster + * Delete the license. When the license expires, your subscription level reverts + * to Basic. + *

+ * If the operator privileges feature is enabled, only operator users can use + * this API. * * @see Documentation @@ -82,11 +86,13 @@ public DeleteLicenseResponse delete() throws IOException, ElasticsearchException // ----- Endpoint: license.get /** - * Get license information. Returns information about your Elastic license, - * including its type, its status, when it was issued, and when it expires. For - * more information about the different types of licenses, refer to - * Elastic Stack - * subscriptions. + * Get license information. Get information about your Elastic license including + * its type, its status, when it was issued, and when it expires. + *

+ * NOTE: If the master node is generating a new cluster state, the get license + * API may return a 404 Not Found response. If you receive an + * unexpected 404 response after cluster startup, wait a short period and retry + * the request. * * @see Documentation @@ -101,11 +107,13 @@ public GetLicenseResponse get(GetLicenseRequest request) throws IOException, Ela } /** - * Get license information. Returns information about your Elastic license, - * including its type, its status, when it was issued, and when it expires. For - * more information about the different types of licenses, refer to - * Elastic Stack - * subscriptions. + * Get license information. Get information about your Elastic license including + * its type, its status, when it was issued, and when it expires. + *

+ * NOTE: If the master node is generating a new cluster state, the get license + * API may return a 404 Not Found response. If you receive an + * unexpected 404 response after cluster startup, wait a short period and retry + * the request. * * @param fn * a function that initializes a builder to create the @@ -121,11 +129,13 @@ public final GetLicenseResponse get(FunctionElastic Stack - * subscriptions. + * Get license information. Get information about your Elastic license including + * its type, its status, when it was issued, and when it expires. + *

+ * NOTE: If the master node is generating a new cluster state, the get license + * API may return a 404 Not Found response. If you receive an + * unexpected 404 response after cluster startup, wait a short period and retry + * the request. * * @see Documentation @@ -140,7 +150,7 @@ public GetLicenseResponse get() throws IOException, ElasticsearchException { // ----- Endpoint: license.get_basic_status /** - * Retrieves information about the status of the basic license. + * Get the basic license status. * * @see Documentation @@ -154,7 +164,7 @@ public GetBasicStatusResponse getBasicStatus() throws IOException, Elasticsearch // ----- Endpoint: license.get_trial_status /** - * Retrieves information about the status of the trial license. + * Get the trial status. * * @see Documentation @@ -168,7 +178,16 @@ public GetTrialStatusResponse getTrialStatus() throws IOException, Elasticsearch // ----- Endpoint: license.post /** - * Updates the license for the cluster. + * Update the license. You can update your license at runtime without shutting + * down your nodes. License updates take effect immediately. If the license you + * are installing does not support all of the features that were available with + * your previous license, however, you are notified in the response. You must + * then re-submit the API request with the acknowledge parameter set to true. + *

+ * NOTE: If Elasticsearch security features are enabled and you are installing a + * gold or higher license, you must enable TLS on the transport networking layer + * before you install the license. If the operator privileges feature is + * enabled, only operator users can use this API. * * @see Documentation @@ -183,7 +202,16 @@ public PostResponse post(PostRequest request) throws IOException, ElasticsearchE } /** - * Updates the license for the cluster. + * Update the license. You can update your license at runtime without shutting + * down your nodes. License updates take effect immediately. If the license you + * are installing does not support all of the features that were available with + * your previous license, however, you are notified in the response. You must + * then re-submit the API request with the acknowledge parameter set to true. + *

+ * NOTE: If Elasticsearch security features are enabled and you are installing a + * gold or higher license, you must enable TLS on the transport networking layer + * before you install the license. If the operator privileges feature is + * enabled, only operator users can use this API. * * @param fn * a function that initializes a builder to create the @@ -199,7 +227,16 @@ public final PostResponse post(Function + * NOTE: If Elasticsearch security features are enabled and you are installing a + * gold or higher license, you must enable TLS on the transport networking layer + * before you install the license. If the operator privileges feature is + * enabled, only operator users can use this API. * * @see Documentation @@ -214,14 +251,18 @@ public PostResponse post() throws IOException, ElasticsearchException { // ----- Endpoint: license.post_start_basic /** - * The start basic API enables you to initiate an indefinite basic license, - * which gives access to all the basic features. If the basic license does not - * support all of the features that are available with your current license, - * however, you are notified in the response. You must then re-submit the API - * request with the acknowledge parameter set to true. To check the status of - * your basic license, use the following API: Get - * basic status. + * Start a basic license. Start an indefinite basic license, which gives access + * to all the basic features. + *

+ * NOTE: In order to start a basic license, you must not currently have a basic + * license. + *

+ * If the basic license does not support all of the features that are available + * with your current license, however, you are notified in the response. You + * must then re-submit the API request with the acknowledge + * parameter set to true. + *

+ * To check the status of your basic license, use the get basic license API. * * @see Documentation @@ -237,14 +278,18 @@ public PostStartBasicResponse postStartBasic(PostStartBasicRequest request) } /** - * The start basic API enables you to initiate an indefinite basic license, - * which gives access to all the basic features. If the basic license does not - * support all of the features that are available with your current license, - * however, you are notified in the response. You must then re-submit the API - * request with the acknowledge parameter set to true. To check the status of - * your basic license, use the following API: Get - * basic status. + * Start a basic license. Start an indefinite basic license, which gives access + * to all the basic features. + *

+ * NOTE: In order to start a basic license, you must not currently have a basic + * license. + *

+ * If the basic license does not support all of the features that are available + * with your current license, however, you are notified in the response. You + * must then re-submit the API request with the acknowledge + * parameter set to true. + *

+ * To check the status of your basic license, use the get basic license API. * * @param fn * a function that initializes a builder to create the @@ -261,14 +306,18 @@ public final PostStartBasicResponse postStartBasic( } /** - * The start basic API enables you to initiate an indefinite basic license, - * which gives access to all the basic features. If the basic license does not - * support all of the features that are available with your current license, - * however, you are notified in the response. You must then re-submit the API - * request with the acknowledge parameter set to true. To check the status of - * your basic license, use the following API: Get - * basic status. + * Start a basic license. Start an indefinite basic license, which gives access + * to all the basic features. + *

+ * NOTE: In order to start a basic license, you must not currently have a basic + * license. + *

+ * If the basic license does not support all of the features that are available + * with your current license, however, you are notified in the response. You + * must then re-submit the API request with the acknowledge + * parameter set to true. + *

+ * To check the status of your basic license, use the get basic license API. * * @see Documentation @@ -283,8 +332,16 @@ public PostStartBasicResponse postStartBasic() throws IOException, Elasticsearch // ----- Endpoint: license.post_start_trial /** - * The start trial API enables you to start a 30-day trial, which gives access - * to all subscription features. + * Start a trial. Start a 30-day trial, which gives access to all subscription + * features. + *

+ * NOTE: You are allowed to start a trial only if your cluster has not already + * activated a trial for the current major product version. For example, if you + * have already activated a trial for v8.0, you cannot start a new trial until + * v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension. + *

+ * To check the status of your trial, use the get trial status API. * * @see Documentation @@ -300,8 +357,16 @@ public PostStartTrialResponse postStartTrial(PostStartTrialRequest request) } /** - * The start trial API enables you to start a 30-day trial, which gives access - * to all subscription features. + * Start a trial. Start a 30-day trial, which gives access to all subscription + * features. + *

+ * NOTE: You are allowed to start a trial only if your cluster has not already + * activated a trial for the current major product version. For example, if you + * have already activated a trial for v8.0, you cannot start a new trial until + * v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension. + *

+ * To check the status of your trial, use the get trial status API. * * @param fn * a function that initializes a builder to create the @@ -318,8 +383,16 @@ public final PostStartTrialResponse postStartTrial( } /** - * The start trial API enables you to start a 30-day trial, which gives access - * to all subscription features. + * Start a trial. Start a 30-day trial, which gives access to all subscription + * features. + *

+ * NOTE: You are allowed to start a trial only if your cluster has not already + * activated a trial for the current major product version. For example, if you + * have already activated a trial for v8.0, you cannot start a new trial until + * v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension. + *

+ * To check the status of your trial, use the get trial status API. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/license/GetBasicStatusRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/license/GetBasicStatusRequest.java index 8d6761900..c4a087525 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/license/GetBasicStatusRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/license/GetBasicStatusRequest.java @@ -50,7 +50,7 @@ // typedef: license.get_basic_status.Request /** - * Retrieves information about the status of the basic license. + * Get the basic license status. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/license/GetLicenseRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/license/GetLicenseRequest.java index 029998023..4b8d3e20d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/license/GetLicenseRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/license/GetLicenseRequest.java @@ -55,11 +55,13 @@ // typedef: license.get.Request /** - * Get license information. Returns information about your Elastic license, - * including its type, its status, when it was issued, and when it expires. For - * more information about the different types of licenses, refer to - * Elastic Stack - * subscriptions. + * Get license information. Get information about your Elastic license including + * its type, its status, when it was issued, and when it expires. + *

+ * NOTE: If the master node is generating a new cluster state, the get license + * API may return a 404 Not Found response. If you receive an + * unexpected 404 response after cluster startup, wait a short period and retry + * the request. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/license/GetTrialStatusRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/license/GetTrialStatusRequest.java index 86b5476f1..9c8825929 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/license/GetTrialStatusRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/license/GetTrialStatusRequest.java @@ -50,7 +50,7 @@ // typedef: license.get_trial_status.Request /** - * Retrieves information about the status of the trial license. + * Get the trial status. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/license/PostRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/license/PostRequest.java index 669883385..11620e9b3 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/license/PostRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/license/PostRequest.java @@ -59,7 +59,16 @@ // typedef: license.post.Request /** - * Updates the license for the cluster. + * Update the license. You can update your license at runtime without shutting + * down your nodes. License updates take effect immediately. If the license you + * are installing does not support all of the features that were available with + * your previous license, however, you are notified in the response. You must + * then re-submit the API request with the acknowledge parameter set to true. + *

+ * NOTE: If Elasticsearch security features are enabled and you are installing a + * gold or higher license, you must enable TLS on the transport networking layer + * before you install the license. If the operator privileges feature is + * enabled, only operator users can use this API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/license/PostStartBasicRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/license/PostStartBasicRequest.java index 1db385d95..e00ca6eeb 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/license/PostStartBasicRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/license/PostStartBasicRequest.java @@ -55,14 +55,18 @@ // typedef: license.post_start_basic.Request /** - * The start basic API enables you to initiate an indefinite basic license, - * which gives access to all the basic features. If the basic license does not - * support all of the features that are available with your current license, - * however, you are notified in the response. You must then re-submit the API - * request with the acknowledge parameter set to true. To check the status of - * your basic license, use the following API: Get - * basic status. + * Start a basic license. Start an indefinite basic license, which gives access + * to all the basic features. + *

+ * NOTE: In order to start a basic license, you must not currently have a basic + * license. + *

+ * If the basic license does not support all of the features that are available + * with your current license, however, you are notified in the response. You + * must then re-submit the API request with the acknowledge + * parameter set to true. + *

+ * To check the status of your basic license, use the get basic license API. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/license/PostStartTrialRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/license/PostStartTrialRequest.java index 05783ca55..b51dcb1dc 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/license/PostStartTrialRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/license/PostStartTrialRequest.java @@ -56,8 +56,16 @@ // typedef: license.post_start_trial.Request /** - * The start trial API enables you to start a 30-day trial, which gives access - * to all subscription features. + * Start a trial. Start a 30-day trial, which gives access to all subscription + * features. + *

+ * NOTE: You are allowed to start a trial only if your cluster has not already + * activated a trial for the current major product version. For example, if you + * have already activated a trial for v8.0, you cannot start a new trial until + * v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension. + *

+ * To check the status of your trial, use the get trial status API. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/DeletePipelineRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/DeletePipelineRequest.java index ffa231af7..ae6b4a3c0 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/DeletePipelineRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/DeletePipelineRequest.java @@ -58,7 +58,9 @@ // typedef: logstash.delete_pipeline.Request /** - * Deletes a pipeline used for Logstash Central Management. + * Delete a Logstash pipeline. + *

+ * Delete a pipeline that is used for Logstash Central Management. * * @see API @@ -81,7 +83,7 @@ public static DeletePipelineRequest of(Function * API name: {@code id} */ @@ -101,7 +103,7 @@ public static class Builder extends RequestBase.AbstractBuilder private String id; /** - * Required - Identifier for the pipeline. + * Required - An identifier for the pipeline. *

* API name: {@code id} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/ElasticsearchLogstashAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/ElasticsearchLogstashAsyncClient.java index af70c837a..772cd4613 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/ElasticsearchLogstashAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/ElasticsearchLogstashAsyncClient.java @@ -71,7 +71,9 @@ public ElasticsearchLogstashAsyncClient withTransportOptions(@Nullable Transport // ----- Endpoint: logstash.delete_pipeline /** - * Deletes a pipeline used for Logstash Central Management. + * Delete a Logstash pipeline. + *

+ * Delete a pipeline that is used for Logstash Central Management. * * @see Documentation @@ -86,7 +88,9 @@ public CompletableFuture deletePipeline(DeletePipelineRequest r } /** - * Deletes a pipeline used for Logstash Central Management. + * Delete a Logstash pipeline. + *

+ * Delete a pipeline that is used for Logstash Central Management. * * @param fn * a function that initializes a builder to create the @@ -104,7 +108,9 @@ public final CompletableFuture deletePipeline( // ----- Endpoint: logstash.get_pipeline /** - * Retrieves pipelines used for Logstash Central Management. + * Get Logstash pipelines. + *

+ * Get pipelines that are used for Logstash Central Management. * * @see Documentation @@ -119,7 +125,9 @@ public CompletableFuture getPipeline(GetPipelineRequest req } /** - * Retrieves pipelines used for Logstash Central Management. + * Get Logstash pipelines. + *

+ * Get pipelines that are used for Logstash Central Management. * * @param fn * a function that initializes a builder to create the @@ -135,7 +143,9 @@ public final CompletableFuture getPipeline( } /** - * Retrieves pipelines used for Logstash Central Management. + * Get Logstash pipelines. + *

+ * Get pipelines that are used for Logstash Central Management. * * @see Documentation @@ -150,7 +160,10 @@ public CompletableFuture getPipeline() { // ----- Endpoint: logstash.put_pipeline /** - * Creates or updates a pipeline used for Logstash Central Management. + * Create or update a Logstash pipeline. + *

+ * Create a pipeline that is used for Logstash Central Management. If the + * specified pipeline exists, it is replaced. * * @see Documentation @@ -165,7 +178,10 @@ public CompletableFuture putPipeline(PutPipelineRequest request } /** - * Creates or updates a pipeline used for Logstash Central Management. + * Create or update a Logstash pipeline. + *

+ * Create a pipeline that is used for Logstash Central Management. If the + * specified pipeline exists, it is replaced. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/ElasticsearchLogstashClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/ElasticsearchLogstashClient.java index d0fa36a78..874f6cdd7 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/ElasticsearchLogstashClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/ElasticsearchLogstashClient.java @@ -69,7 +69,9 @@ public ElasticsearchLogstashClient withTransportOptions(@Nullable TransportOptio // ----- Endpoint: logstash.delete_pipeline /** - * Deletes a pipeline used for Logstash Central Management. + * Delete a Logstash pipeline. + *

+ * Delete a pipeline that is used for Logstash Central Management. * * @see Documentation @@ -84,7 +86,9 @@ public BooleanResponse deletePipeline(DeletePipelineRequest request) throws IOEx } /** - * Deletes a pipeline used for Logstash Central Management. + * Delete a Logstash pipeline. + *

+ * Delete a pipeline that is used for Logstash Central Management. * * @param fn * a function that initializes a builder to create the @@ -103,7 +107,9 @@ public final BooleanResponse deletePipeline( // ----- Endpoint: logstash.get_pipeline /** - * Retrieves pipelines used for Logstash Central Management. + * Get Logstash pipelines. + *

+ * Get pipelines that are used for Logstash Central Management. * * @see Documentation @@ -118,7 +124,9 @@ public GetPipelineResponse getPipeline(GetPipelineRequest request) throws IOExce } /** - * Retrieves pipelines used for Logstash Central Management. + * Get Logstash pipelines. + *

+ * Get pipelines that are used for Logstash Central Management. * * @param fn * a function that initializes a builder to create the @@ -135,7 +143,9 @@ public final GetPipelineResponse getPipeline( } /** - * Retrieves pipelines used for Logstash Central Management. + * Get Logstash pipelines. + *

+ * Get pipelines that are used for Logstash Central Management. * * @see Documentation @@ -150,7 +160,10 @@ public GetPipelineResponse getPipeline() throws IOException, ElasticsearchExcept // ----- Endpoint: logstash.put_pipeline /** - * Creates or updates a pipeline used for Logstash Central Management. + * Create or update a Logstash pipeline. + *

+ * Create a pipeline that is used for Logstash Central Management. If the + * specified pipeline exists, it is replaced. * * @see Documentation @@ -165,7 +178,10 @@ public BooleanResponse putPipeline(PutPipelineRequest request) throws IOExceptio } /** - * Creates or updates a pipeline used for Logstash Central Management. + * Create or update a Logstash pipeline. + *

+ * Create a pipeline that is used for Logstash Central Management. If the + * specified pipeline exists, it is replaced. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/GetPipelineRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/GetPipelineRequest.java index bedf522d9..b3e608143 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/GetPipelineRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/GetPipelineRequest.java @@ -58,7 +58,9 @@ // typedef: logstash.get_pipeline.Request /** - * Retrieves pipelines used for Logstash Central Management. + * Get Logstash pipelines. + *

+ * Get pipelines that are used for Logstash Central Management. * * @see API * specification @@ -80,7 +82,7 @@ public static GetPipelineRequest of(Function * API name: {@code id} */ @@ -101,7 +103,7 @@ public static class Builder extends RequestBase.AbstractBuilder private List id; /** - * Comma-separated list of pipeline identifiers. + * A comma-separated list of pipeline identifiers. *

* API name: {@code id} *

@@ -113,7 +115,7 @@ public final Builder id(List list) { } /** - * Comma-separated list of pipeline identifiers. + * A comma-separated list of pipeline identifiers. *

* API name: {@code id} *

diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/Pipeline.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/Pipeline.java index 069e7da6d..3619ab418 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/Pipeline.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/logstash/Pipeline.java @@ -64,24 +64,24 @@ public class Pipeline implements JsonpSerializable { private final DateTime lastModified; - private final PipelineMetadata pipelineMetadata; - - private final String username; - private final String pipeline; + private final PipelineMetadata pipelineMetadata; + private final PipelineSettings pipelineSettings; + private final String username; + // --------------------------------------------------------------------------------------------- private Pipeline(Builder builder) { this.description = ApiTypeHelper.requireNonNull(builder.description, this, "description"); this.lastModified = ApiTypeHelper.requireNonNull(builder.lastModified, this, "lastModified"); - this.pipelineMetadata = ApiTypeHelper.requireNonNull(builder.pipelineMetadata, this, "pipelineMetadata"); - this.username = ApiTypeHelper.requireNonNull(builder.username, this, "username"); this.pipeline = ApiTypeHelper.requireNonNull(builder.pipeline, this, "pipeline"); + this.pipelineMetadata = ApiTypeHelper.requireNonNull(builder.pipelineMetadata, this, "pipelineMetadata"); this.pipelineSettings = ApiTypeHelper.requireNonNull(builder.pipelineSettings, this, "pipelineSettings"); + this.username = ApiTypeHelper.requireNonNull(builder.username, this, "username"); } @@ -90,7 +90,7 @@ public static Pipeline of(Function> fn) { } /** - * Required - Description of the pipeline. This description is not used by + * Required - A description of the pipeline. This description is not used by * Elasticsearch or Logstash. *

* API name: {@code description} @@ -100,7 +100,7 @@ public final String description() { } /** - * Required - Date the pipeline was last updated. Must be in the + * Required - The date the pipeline was last updated. It must be in the * yyyy-MM-dd'T'HH:mm:ss.SSSZZ strict_date_time format. *

* API name: {@code last_modified} @@ -110,41 +110,41 @@ public final DateTime lastModified() { } /** - * Required - Optional metadata about the pipeline. May have any contents. This - * metadata is not generated or used by Elasticsearch or Logstash. + * Required - The configuration for the pipeline. *

- * API name: {@code pipeline_metadata} + * API name: {@code pipeline} */ - public final PipelineMetadata pipelineMetadata() { - return this.pipelineMetadata; + public final String pipeline() { + return this.pipeline; } /** - * Required - User who last updated the pipeline. + * Required - Optional metadata about the pipeline, which can have any contents. + * This metadata is not generated or used by Elasticsearch or Logstash. *

- * API name: {@code username} + * API name: {@code pipeline_metadata} */ - public final String username() { - return this.username; + public final PipelineMetadata pipelineMetadata() { + return this.pipelineMetadata; } /** - * Required - Configuration for the pipeline. + * Required - Settings for the pipeline. It supports only flat keys in dot + * notation. *

- * API name: {@code pipeline} + * API name: {@code pipeline_settings} */ - public final String pipeline() { - return this.pipeline; + public final PipelineSettings pipelineSettings() { + return this.pipelineSettings; } /** - * Required - Settings for the pipeline. Supports only flat keys in dot - * notation. + * Required - The user who last updated the pipeline. *

- * API name: {@code pipeline_settings} + * API name: {@code username} */ - public final PipelineSettings pipelineSettings() { - return this.pipelineSettings; + public final String username() { + return this.username; } /** @@ -163,18 +163,18 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.writeKey("last_modified"); this.lastModified.serialize(generator, mapper); - generator.writeKey("pipeline_metadata"); - this.pipelineMetadata.serialize(generator, mapper); - - generator.writeKey("username"); - generator.write(this.username); - generator.writeKey("pipeline"); generator.write(this.pipeline); + generator.writeKey("pipeline_metadata"); + this.pipelineMetadata.serialize(generator, mapper); + generator.writeKey("pipeline_settings"); this.pipelineSettings.serialize(generator, mapper); + generator.writeKey("username"); + generator.write(this.username); + } @Override @@ -193,16 +193,16 @@ public static class Builder extends WithJsonObjectBuilderBase implement private DateTime lastModified; - private PipelineMetadata pipelineMetadata; - - private String username; - private String pipeline; + private PipelineMetadata pipelineMetadata; + private PipelineSettings pipelineSettings; + private String username; + /** - * Required - Description of the pipeline. This description is not used by + * Required - A description of the pipeline. This description is not used by * Elasticsearch or Logstash. *

* API name: {@code description} @@ -213,7 +213,7 @@ public final Builder description(String value) { } /** - * Required - Date the pipeline was last updated. Must be in the + * Required - The date the pipeline was last updated. It must be in the * yyyy-MM-dd'T'HH:mm:ss.SSSZZ strict_date_time format. *

* API name: {@code last_modified} @@ -224,48 +224,38 @@ public final Builder lastModified(DateTime value) { } /** - * Required - Optional metadata about the pipeline. May have any contents. This - * metadata is not generated or used by Elasticsearch or Logstash. + * Required - The configuration for the pipeline. *

- * API name: {@code pipeline_metadata} + * API name: {@code pipeline} */ - public final Builder pipelineMetadata(PipelineMetadata value) { - this.pipelineMetadata = value; + public final Builder pipeline(String value) { + this.pipeline = value; return this; } /** - * Required - Optional metadata about the pipeline. May have any contents. This - * metadata is not generated or used by Elasticsearch or Logstash. + * Required - Optional metadata about the pipeline, which can have any contents. + * This metadata is not generated or used by Elasticsearch or Logstash. *

* API name: {@code pipeline_metadata} */ - public final Builder pipelineMetadata(Function> fn) { - return this.pipelineMetadata(fn.apply(new PipelineMetadata.Builder()).build()); - } - - /** - * Required - User who last updated the pipeline. - *

- * API name: {@code username} - */ - public final Builder username(String value) { - this.username = value; + public final Builder pipelineMetadata(PipelineMetadata value) { + this.pipelineMetadata = value; return this; } /** - * Required - Configuration for the pipeline. + * Required - Optional metadata about the pipeline, which can have any contents. + * This metadata is not generated or used by Elasticsearch or Logstash. *

- * API name: {@code pipeline} + * API name: {@code pipeline_metadata} */ - public final Builder pipeline(String value) { - this.pipeline = value; - return this; + public final Builder pipelineMetadata(Function> fn) { + return this.pipelineMetadata(fn.apply(new PipelineMetadata.Builder()).build()); } /** - * Required - Settings for the pipeline. Supports only flat keys in dot + * Required - Settings for the pipeline. It supports only flat keys in dot * notation. *

* API name: {@code pipeline_settings} @@ -276,7 +266,7 @@ public final Builder pipelineSettings(PipelineSettings value) { } /** - * Required - Settings for the pipeline. Supports only flat keys in dot + * Required - Settings for the pipeline. It supports only flat keys in dot * notation. *

* API name: {@code pipeline_settings} @@ -285,6 +275,16 @@ public final Builder pipelineSettings(Function + * API name: {@code username} + */ + public final Builder username(String value) { + this.username = value; + return this; + } + @Override protected Builder self() { return this; @@ -315,10 +315,10 @@ protected static void setupPipelineDeserializer(ObjectDeserializer + * Create a pipeline that is used for Logstash Central Management. If the + * specified pipeline exists, it is replaced. * * @see API * specification @@ -86,7 +89,7 @@ public static PutPipelineRequest of(Function * API name: {@code id} */ @@ -123,7 +126,7 @@ public static class Builder extends RequestBase.AbstractBuilder private Pipeline pipeline; /** - * Required - Identifier for the pipeline. + * Required - An identifier for the pipeline. *

* API name: {@code id} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/DeprecationsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/DeprecationsRequest.java index 852165c22..394685c4c 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/DeprecationsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/DeprecationsRequest.java @@ -55,9 +55,12 @@ // typedef: migration.deprecations.Request /** - * Retrieves information about different cluster, node, and index level settings - * that use deprecated features that will be removed or changed in the next - * major version. + * Get deprecation information. Get information about different cluster, node, + * and index level settings that use deprecated features that will be removed or + * changed in the next major version. + *

+ * TIP: This APIs is designed for indirect use by the Upgrade Assistant. We + * strongly recommend you use the Upgrade Assistant. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/ElasticsearchMigrationAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/ElasticsearchMigrationAsyncClient.java index a897b8d56..194c9aa01 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/ElasticsearchMigrationAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/ElasticsearchMigrationAsyncClient.java @@ -70,9 +70,12 @@ public ElasticsearchMigrationAsyncClient withTransportOptions(@Nullable Transpor // ----- Endpoint: migration.deprecations /** - * Retrieves information about different cluster, node, and index level settings - * that use deprecated features that will be removed or changed in the next - * major version. + * Get deprecation information. Get information about different cluster, node, + * and index level settings that use deprecated features that will be removed or + * changed in the next major version. + *

+ * TIP: This APIs is designed for indirect use by the Upgrade Assistant. We + * strongly recommend you use the Upgrade Assistant. * * @see Documentation @@ -87,9 +90,12 @@ public CompletableFuture deprecations(DeprecationsRequest } /** - * Retrieves information about different cluster, node, and index level settings - * that use deprecated features that will be removed or changed in the next - * major version. + * Get deprecation information. Get information about different cluster, node, + * and index level settings that use deprecated features that will be removed or + * changed in the next major version. + *

+ * TIP: This APIs is designed for indirect use by the Upgrade Assistant. We + * strongly recommend you use the Upgrade Assistant. * * @param fn * a function that initializes a builder to create the @@ -105,9 +111,12 @@ public final CompletableFuture deprecations( } /** - * Retrieves information about different cluster, node, and index level settings - * that use deprecated features that will be removed or changed in the next - * major version. + * Get deprecation information. Get information about different cluster, node, + * and index level settings that use deprecated features that will be removed or + * changed in the next major version. + *

+ * TIP: This APIs is designed for indirect use by the Upgrade Assistant. We + * strongly recommend you use the Upgrade Assistant. * * @see Documentation @@ -122,7 +131,13 @@ public CompletableFuture deprecations() { // ----- Endpoint: migration.get_feature_upgrade_status /** - * Find out whether system features need to be upgraded or not + * Get feature migration information. Version upgrades sometimes require changes + * to how features store configuration information and data in system indices. + * Check which features need to be migrated and the status of any migrations + * that are in progress. + *

+ * TIP: This API is designed for indirect use by the Upgrade Assistant. We + * strongly recommend you use the Upgrade Assistant. * * @see Documentation @@ -136,7 +151,15 @@ public CompletableFuture getFeatureUpgradeStatu // ----- Endpoint: migration.post_feature_upgrade /** - * Begin upgrades for system features + * Start the feature migration. Version upgrades sometimes require changes to + * how features store configuration information and data in system indices. This + * API starts the automatic migration process. + *

+ * Some functionality might be temporarily unavailable during the migration + * process. + *

+ * TIP: The API is designed for indirect use by the Upgrade Assistant. We + * strongly recommend you use the Upgrade Assistant. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/ElasticsearchMigrationClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/ElasticsearchMigrationClient.java index 0ec1c3ffd..b9e21bb84 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/ElasticsearchMigrationClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/ElasticsearchMigrationClient.java @@ -68,9 +68,12 @@ public ElasticsearchMigrationClient withTransportOptions(@Nullable TransportOpti // ----- Endpoint: migration.deprecations /** - * Retrieves information about different cluster, node, and index level settings - * that use deprecated features that will be removed or changed in the next - * major version. + * Get deprecation information. Get information about different cluster, node, + * and index level settings that use deprecated features that will be removed or + * changed in the next major version. + *

+ * TIP: This APIs is designed for indirect use by the Upgrade Assistant. We + * strongly recommend you use the Upgrade Assistant. * * @see Documentation @@ -85,9 +88,12 @@ public DeprecationsResponse deprecations(DeprecationsRequest request) throws IOE } /** - * Retrieves information about different cluster, node, and index level settings - * that use deprecated features that will be removed or changed in the next - * major version. + * Get deprecation information. Get information about different cluster, node, + * and index level settings that use deprecated features that will be removed or + * changed in the next major version. + *

+ * TIP: This APIs is designed for indirect use by the Upgrade Assistant. We + * strongly recommend you use the Upgrade Assistant. * * @param fn * a function that initializes a builder to create the @@ -104,9 +110,12 @@ public final DeprecationsResponse deprecations( } /** - * Retrieves information about different cluster, node, and index level settings - * that use deprecated features that will be removed or changed in the next - * major version. + * Get deprecation information. Get information about different cluster, node, + * and index level settings that use deprecated features that will be removed or + * changed in the next major version. + *

+ * TIP: This APIs is designed for indirect use by the Upgrade Assistant. We + * strongly recommend you use the Upgrade Assistant. * * @see Documentation @@ -121,7 +130,13 @@ public DeprecationsResponse deprecations() throws IOException, ElasticsearchExce // ----- Endpoint: migration.get_feature_upgrade_status /** - * Find out whether system features need to be upgraded or not + * Get feature migration information. Version upgrades sometimes require changes + * to how features store configuration information and data in system indices. + * Check which features need to be migrated and the status of any migrations + * that are in progress. + *

+ * TIP: This API is designed for indirect use by the Upgrade Assistant. We + * strongly recommend you use the Upgrade Assistant. * * @see Documentation @@ -135,7 +150,15 @@ public GetFeatureUpgradeStatusResponse getFeatureUpgradeStatus() throws IOExcept // ----- Endpoint: migration.post_feature_upgrade /** - * Begin upgrades for system features + * Start the feature migration. Version upgrades sometimes require changes to + * how features store configuration information and data in system indices. This + * API starts the automatic migration process. + *

+ * Some functionality might be temporarily unavailable during the migration + * process. + *

+ * TIP: The API is designed for indirect use by the Upgrade Assistant. We + * strongly recommend you use the Upgrade Assistant. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/GetFeatureUpgradeStatusRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/GetFeatureUpgradeStatusRequest.java index 27411dc69..f9d0cf485 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/GetFeatureUpgradeStatusRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/GetFeatureUpgradeStatusRequest.java @@ -50,7 +50,13 @@ // typedef: migration.get_feature_upgrade_status.Request /** - * Find out whether system features need to be upgraded or not + * Get feature migration information. Version upgrades sometimes require changes + * to how features store configuration information and data in system indices. + * Check which features need to be migrated and the status of any migrations + * that are in progress. + *

+ * TIP: This API is designed for indirect use by the Upgrade Assistant. We + * strongly recommend you use the Upgrade Assistant. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/PostFeatureUpgradeRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/PostFeatureUpgradeRequest.java index c016b79f8..95e329319 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/PostFeatureUpgradeRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/migration/PostFeatureUpgradeRequest.java @@ -50,7 +50,15 @@ // typedef: migration.post_feature_upgrade.Request /** - * Begin upgrades for system features + * Start the feature migration. Version upgrades sometimes require changes to + * how features store configuration information and data in system indices. This + * API starts the automatic migration process. + *

+ * Some functionality might be temporarily unavailable during the migration + * process. + *

+ * TIP: The API is designed for indirect use by the Upgrade Assistant. We + * strongly recommend you use the Upgrade Assistant. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/AdaptiveAllocationsSettings.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/AdaptiveAllocationsSettings.java new file mode 100644 index 000000000..0753bdd7f --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/AdaptiveAllocationsSettings.java @@ -0,0 +1,217 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.ml; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.Boolean; +import java.lang.Integer; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: ml._types.AdaptiveAllocationsSettings + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class AdaptiveAllocationsSettings implements JsonpSerializable { + private final boolean enabled; + + @Nullable + private final Integer minNumberOfAllocations; + + @Nullable + private final Integer maxNumberOfAllocations; + + // --------------------------------------------------------------------------------------------- + + private AdaptiveAllocationsSettings(Builder builder) { + + this.enabled = ApiTypeHelper.requireNonNull(builder.enabled, this, "enabled"); + this.minNumberOfAllocations = builder.minNumberOfAllocations; + this.maxNumberOfAllocations = builder.maxNumberOfAllocations; + + } + + public static AdaptiveAllocationsSettings of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - API name: {@code enabled} + */ + public final boolean enabled() { + return this.enabled; + } + + /** + * API name: {@code min_number_of_allocations} + */ + @Nullable + public final Integer minNumberOfAllocations() { + return this.minNumberOfAllocations; + } + + /** + * API name: {@code max_number_of_allocations} + */ + @Nullable + public final Integer maxNumberOfAllocations() { + return this.maxNumberOfAllocations; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeKey("enabled"); + generator.write(this.enabled); + + if (this.minNumberOfAllocations != null) { + generator.writeKey("min_number_of_allocations"); + generator.write(this.minNumberOfAllocations); + + } + if (this.maxNumberOfAllocations != null) { + generator.writeKey("max_number_of_allocations"); + generator.write(this.maxNumberOfAllocations); + + } + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link AdaptiveAllocationsSettings}. + */ + + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { + private Boolean enabled; + + @Nullable + private Integer minNumberOfAllocations; + + @Nullable + private Integer maxNumberOfAllocations; + + /** + * Required - API name: {@code enabled} + */ + public final Builder enabled(boolean value) { + this.enabled = value; + return this; + } + + /** + * API name: {@code min_number_of_allocations} + */ + public final Builder minNumberOfAllocations(@Nullable Integer value) { + this.minNumberOfAllocations = value; + return this; + } + + /** + * API name: {@code max_number_of_allocations} + */ + public final Builder maxNumberOfAllocations(@Nullable Integer value) { + this.maxNumberOfAllocations = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link AdaptiveAllocationsSettings}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public AdaptiveAllocationsSettings build() { + _checkSingleUse(); + + return new AdaptiveAllocationsSettings(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link AdaptiveAllocationsSettings} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, AdaptiveAllocationsSettings::setupAdaptiveAllocationsSettingsDeserializer); + + protected static void setupAdaptiveAllocationsSettingsDeserializer( + ObjectDeserializer op) { + + op.add(Builder::enabled, JsonpDeserializer.booleanDeserializer(), "enabled"); + op.add(Builder::minNumberOfAllocations, JsonpDeserializer.integerDeserializer(), "min_number_of_allocations"); + op.add(Builder::maxNumberOfAllocations, JsonpDeserializer.integerDeserializer(), "max_number_of_allocations"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/CommonTokenizationConfig.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/CommonTokenizationConfig.java new file mode 100644 index 000000000..65dca426c --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/CommonTokenizationConfig.java @@ -0,0 +1,270 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.ml; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.Boolean; +import java.lang.Integer; +import java.util.Objects; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: ml._types.CommonTokenizationConfig + +/** + * + * @see API + * specification + */ + +public abstract class CommonTokenizationConfig implements JsonpSerializable { + @Nullable + private final Boolean doLowerCase; + + @Nullable + private final Integer maxSequenceLength; + + @Nullable + private final Integer span; + + @Nullable + private final TokenizationTruncate truncate; + + @Nullable + private final Boolean withSpecialTokens; + + // --------------------------------------------------------------------------------------------- + + protected CommonTokenizationConfig(AbstractBuilder builder) { + + this.doLowerCase = builder.doLowerCase; + this.maxSequenceLength = builder.maxSequenceLength; + this.span = builder.span; + this.truncate = builder.truncate; + this.withSpecialTokens = builder.withSpecialTokens; + + } + + /** + * Should the tokenizer lower case the text + *

+ * API name: {@code do_lower_case} + */ + @Nullable + public final Boolean doLowerCase() { + return this.doLowerCase; + } + + /** + * Maximum input sequence length for the model + *

+ * API name: {@code max_sequence_length} + */ + @Nullable + public final Integer maxSequenceLength() { + return this.maxSequenceLength; + } + + /** + * Tokenization spanning options. Special value of -1 indicates no spanning + * takes place + *

+ * API name: {@code span} + */ + @Nullable + public final Integer span() { + return this.span; + } + + /** + * Should tokenization input be automatically truncated before sending to the + * model for inference + *

+ * API name: {@code truncate} + */ + @Nullable + public final TokenizationTruncate truncate() { + return this.truncate; + } + + /** + * Is tokenization completed with special tokens + *

+ * API name: {@code with_special_tokens} + */ + @Nullable + public final Boolean withSpecialTokens() { + return this.withSpecialTokens; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + if (this.doLowerCase != null) { + generator.writeKey("do_lower_case"); + generator.write(this.doLowerCase); + + } + if (this.maxSequenceLength != null) { + generator.writeKey("max_sequence_length"); + generator.write(this.maxSequenceLength); + + } + if (this.span != null) { + generator.writeKey("span"); + generator.write(this.span); + + } + if (this.truncate != null) { + generator.writeKey("truncate"); + this.truncate.serialize(generator, mapper); + } + if (this.withSpecialTokens != null) { + generator.writeKey("with_special_tokens"); + generator.write(this.withSpecialTokens); + + } + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + public abstract static class AbstractBuilder> + extends + WithJsonObjectBuilderBase { + @Nullable + private Boolean doLowerCase; + + @Nullable + private Integer maxSequenceLength; + + @Nullable + private Integer span; + + @Nullable + private TokenizationTruncate truncate; + + @Nullable + private Boolean withSpecialTokens; + + /** + * Should the tokenizer lower case the text + *

+ * API name: {@code do_lower_case} + */ + public final BuilderT doLowerCase(@Nullable Boolean value) { + this.doLowerCase = value; + return self(); + } + + /** + * Maximum input sequence length for the model + *

+ * API name: {@code max_sequence_length} + */ + public final BuilderT maxSequenceLength(@Nullable Integer value) { + this.maxSequenceLength = value; + return self(); + } + + /** + * Tokenization spanning options. Special value of -1 indicates no spanning + * takes place + *

+ * API name: {@code span} + */ + public final BuilderT span(@Nullable Integer value) { + this.span = value; + return self(); + } + + /** + * Should tokenization input be automatically truncated before sending to the + * model for inference + *

+ * API name: {@code truncate} + */ + public final BuilderT truncate(@Nullable TokenizationTruncate value) { + this.truncate = value; + return self(); + } + + /** + * Is tokenization completed with special tokens + *

+ * API name: {@code with_special_tokens} + */ + public final BuilderT withSpecialTokens(@Nullable Boolean value) { + this.withSpecialTokens = value; + return self(); + } + + protected abstract BuilderT self(); + + } + + // --------------------------------------------------------------------------------------------- + protected static > void setupCommonTokenizationConfigDeserializer( + ObjectDeserializer op) { + + op.add(AbstractBuilder::doLowerCase, JsonpDeserializer.booleanDeserializer(), "do_lower_case"); + op.add(AbstractBuilder::maxSequenceLength, JsonpDeserializer.integerDeserializer(), "max_sequence_length"); + op.add(AbstractBuilder::span, JsonpDeserializer.integerDeserializer(), "span"); + op.add(AbstractBuilder::truncate, TokenizationTruncate._DESERIALIZER, "truncate"); + op.add(AbstractBuilder::withSpecialTokens, JsonpDeserializer.booleanDeserializer(), "with_special_tokens"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/DatafeedStats.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/DatafeedStats.java index 8b2eec816..176d1d804 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/DatafeedStats.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/DatafeedStats.java @@ -65,10 +65,11 @@ public class DatafeedStats implements JsonpSerializable { private final String datafeedId; @Nullable - private final DiscoveryNode node; + private final DiscoveryNodeCompact node; private final DatafeedState state; + @Nullable private final DatafeedTimingStats timingStats; @Nullable @@ -82,7 +83,7 @@ private DatafeedStats(Builder builder) { this.datafeedId = ApiTypeHelper.requireNonNull(builder.datafeedId, this, "datafeedId"); this.node = builder.node; this.state = ApiTypeHelper.requireNonNull(builder.state, this, "state"); - this.timingStats = ApiTypeHelper.requireNonNull(builder.timingStats, this, "timingStats"); + this.timingStats = builder.timingStats; this.runningState = builder.runningState; } @@ -121,7 +122,7 @@ public final String datafeedId() { * API name: {@code node} */ @Nullable - public final DiscoveryNode node() { + public final DiscoveryNodeCompact node() { return this.node; } @@ -137,11 +138,12 @@ public final DatafeedState state() { } /** - * Required - An object that provides statistical information about timing - * aspect of this datafeed. + * An object that provides statistical information about timing aspect of this + * datafeed. *

* API name: {@code timing_stats} */ + @Nullable public final DatafeedTimingStats timingStats() { return this.timingStats; } @@ -183,9 +185,11 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { } generator.writeKey("state"); this.state.serialize(generator, mapper); - generator.writeKey("timing_stats"); - this.timingStats.serialize(generator, mapper); + if (this.timingStats != null) { + generator.writeKey("timing_stats"); + this.timingStats.serialize(generator, mapper); + } if (this.runningState != null) { generator.writeKey("running_state"); this.runningState.serialize(generator, mapper); @@ -212,10 +216,11 @@ public static class Builder extends WithJsonObjectBuilderBase implement private String datafeedId; @Nullable - private DiscoveryNode node; + private DiscoveryNodeCompact node; private DatafeedState state; + @Nullable private DatafeedTimingStats timingStats; @Nullable @@ -251,7 +256,7 @@ public final Builder datafeedId(String value) { *

* API name: {@code node} */ - public final Builder node(@Nullable DiscoveryNode value) { + public final Builder node(@Nullable DiscoveryNodeCompact value) { this.node = value; return this; } @@ -262,8 +267,8 @@ public final Builder node(@Nullable DiscoveryNode value) { *

* API name: {@code node} */ - public final Builder node(Function> fn) { - return this.node(fn.apply(new DiscoveryNode.Builder()).build()); + public final Builder node(Function> fn) { + return this.node(fn.apply(new DiscoveryNodeCompact.Builder()).build()); } /** @@ -279,19 +284,19 @@ public final Builder state(DatafeedState value) { } /** - * Required - An object that provides statistical information about timing - * aspect of this datafeed. + * An object that provides statistical information about timing aspect of this + * datafeed. *

* API name: {@code timing_stats} */ - public final Builder timingStats(DatafeedTimingStats value) { + public final Builder timingStats(@Nullable DatafeedTimingStats value) { this.timingStats = value; return this; } /** - * Required - An object that provides statistical information about timing - * aspect of this datafeed. + * An object that provides statistical information about timing aspect of this + * datafeed. *

* API name: {@code timing_stats} */ @@ -351,7 +356,7 @@ protected static void setupDatafeedStatsDeserializer(ObjectDeserializer @@ -164,6 +176,11 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.writeKey("exponential_average_search_time_per_hour_ms"); generator.write(this.exponentialAverageSearchTimePerHourMs); + if (this.exponentialAverageCalculationContext != null) { + generator.writeKey("exponential_average_calculation_context"); + this.exponentialAverageCalculationContext.serialize(generator, mapper); + + } generator.writeKey("job_id"); generator.write(this.jobId); @@ -199,6 +216,9 @@ public static class Builder extends WithJsonObjectBuilderBase private Double exponentialAverageSearchTimePerHourMs; + @Nullable + private ExponentialAverageCalculationContext exponentialAverageCalculationContext; + private String jobId; private Long searchCount; @@ -228,6 +248,24 @@ public final Builder exponentialAverageSearchTimePerHourMs(double value) { return this; } + /** + * API name: {@code exponential_average_calculation_context} + */ + public final Builder exponentialAverageCalculationContext( + @Nullable ExponentialAverageCalculationContext value) { + this.exponentialAverageCalculationContext = value; + return this; + } + + /** + * API name: {@code exponential_average_calculation_context} + */ + public final Builder exponentialAverageCalculationContext( + Function> fn) { + return this.exponentialAverageCalculationContext( + fn.apply(new ExponentialAverageCalculationContext.Builder()).build()); + } + /** * Required - Identifier for the anomaly detection job. *

@@ -299,6 +337,8 @@ protected static void setupDatafeedTimingStatsDeserializer(ObjectDeserializer meta; + // --------------------------------------------------------------------------------------------- private DataframeAnalyticsSummary(Builder builder) { @@ -111,6 +115,7 @@ private DataframeAnalyticsSummary(Builder builder) { this.modelMemoryLimit = builder.modelMemoryLimit; this.source = ApiTypeHelper.requireNonNull(builder.source, this, "source"); this.version = builder.version; + this.meta = ApiTypeHelper.unmodifiable(builder.meta); } @@ -214,6 +219,13 @@ public final String version() { return this.version; } + /** + * API name: {@code _meta} + */ + public final Map meta() { + return this.meta; + } + /** * Serialize this object to JSON. */ @@ -277,6 +289,17 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.write(this.version); } + if (ApiTypeHelper.isDefined(this.meta)) { + generator.writeKey("_meta"); + generator.writeStartObject(); + for (Map.Entry item0 : this.meta.entrySet()) { + generator.writeKey(item0.getKey()); + item0.getValue().serialize(generator, mapper); + + } + generator.writeEnd(); + + } } @@ -326,6 +349,9 @@ public static class Builder extends WithJsonObjectBuilderBase @Nullable private String version; + @Nullable + private Map meta; + /** * API name: {@code allow_lazy_start} */ @@ -469,6 +495,26 @@ public final Builder version(@Nullable String value) { return this; } + /** + * API name: {@code _meta} + *

+ * Adds all entries of map to meta. + */ + public final Builder meta(Map map) { + this.meta = _mapPutAll(this.meta, map); + return this; + } + + /** + * API name: {@code _meta} + *

+ * Adds an entry to meta. + */ + public final Builder meta(String key, JsonData value) { + this.meta = _mapPut(this.meta, key, value); + return this; + } + @Override protected Builder self() { return this; @@ -510,6 +556,7 @@ protected static void setupDataframeAnalyticsSummaryDeserializer( op.add(Builder::modelMemoryLimit, JsonpDeserializer.stringDeserializer(), "model_memory_limit"); op.add(Builder::source, DataframeAnalyticsSource._DESERIALIZER, "source"); op.add(Builder::version, JsonpDeserializer.stringDeserializer(), "version"); + op.add(Builder::meta, JsonpDeserializer.stringMapDeserializer(JsonData._DESERIALIZER), "_meta"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/DetectorUpdate.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/DetectorUpdate.java new file mode 100644 index 000000000..a99721189 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/DetectorUpdate.java @@ -0,0 +1,270 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.ml; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.Integer; +import java.lang.String; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: ml._types.DetectorUpdate + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class DetectorUpdate implements JsonpSerializable { + private final int detectorIndex; + + @Nullable + private final String description; + + private final List customRules; + + // --------------------------------------------------------------------------------------------- + + private DetectorUpdate(Builder builder) { + + this.detectorIndex = ApiTypeHelper.requireNonNull(builder.detectorIndex, this, "detectorIndex"); + this.description = builder.description; + this.customRules = ApiTypeHelper.unmodifiable(builder.customRules); + + } + + public static DetectorUpdate of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - A unique identifier for the detector. This identifier is based on + * the order of the detectors in the analysis_config, starting at + * zero. + *

+ * API name: {@code detector_index} + */ + public final int detectorIndex() { + return this.detectorIndex; + } + + /** + * A description of the detector. + *

+ * API name: {@code description} + */ + @Nullable + public final String description() { + return this.description; + } + + /** + * An array of custom rule objects, which enable you to customize the way + * detectors operate. For example, a rule may dictate to the detector conditions + * under which results should be skipped. Kibana refers to custom rules as job + * rules. + *

+ * API name: {@code custom_rules} + */ + public final List customRules() { + return this.customRules; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeKey("detector_index"); + generator.write(this.detectorIndex); + + if (this.description != null) { + generator.writeKey("description"); + generator.write(this.description); + + } + if (ApiTypeHelper.isDefined(this.customRules)) { + generator.writeKey("custom_rules"); + generator.writeStartArray(); + for (DetectionRule item0 : this.customRules) { + item0.serialize(generator, mapper); + + } + generator.writeEnd(); + + } + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link DetectorUpdate}. + */ + + public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { + private Integer detectorIndex; + + @Nullable + private String description; + + @Nullable + private List customRules; + + /** + * Required - A unique identifier for the detector. This identifier is based on + * the order of the detectors in the analysis_config, starting at + * zero. + *

+ * API name: {@code detector_index} + */ + public final Builder detectorIndex(int value) { + this.detectorIndex = value; + return this; + } + + /** + * A description of the detector. + *

+ * API name: {@code description} + */ + public final Builder description(@Nullable String value) { + this.description = value; + return this; + } + + /** + * An array of custom rule objects, which enable you to customize the way + * detectors operate. For example, a rule may dictate to the detector conditions + * under which results should be skipped. Kibana refers to custom rules as job + * rules. + *

+ * API name: {@code custom_rules} + *

+ * Adds all elements of list to customRules. + */ + public final Builder customRules(List list) { + this.customRules = _listAddAll(this.customRules, list); + return this; + } + + /** + * An array of custom rule objects, which enable you to customize the way + * detectors operate. For example, a rule may dictate to the detector conditions + * under which results should be skipped. Kibana refers to custom rules as job + * rules. + *

+ * API name: {@code custom_rules} + *

+ * Adds one or more values to customRules. + */ + public final Builder customRules(DetectionRule value, DetectionRule... values) { + this.customRules = _listAdd(this.customRules, value, values); + return this; + } + + /** + * An array of custom rule objects, which enable you to customize the way + * detectors operate. For example, a rule may dictate to the detector conditions + * under which results should be skipped. Kibana refers to custom rules as job + * rules. + *

+ * API name: {@code custom_rules} + *

+ * Adds a value to customRules using a builder lambda. + */ + public final Builder customRules(Function> fn) { + return customRules(fn.apply(new DetectionRule.Builder()).build()); + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link DetectorUpdate}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public DetectorUpdate build() { + _checkSingleUse(); + + return new DetectorUpdate(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link DetectorUpdate} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer.lazy(Builder::new, + DetectorUpdate::setupDetectorUpdateDeserializer); + + protected static void setupDetectorUpdateDeserializer(ObjectDeserializer op) { + + op.add(Builder::detectorIndex, JsonpDeserializer.integerDeserializer(), "detector_index"); + op.add(Builder::description, JsonpDeserializer.stringDeserializer(), "description"); + op.add(Builder::customRules, JsonpDeserializer.arrayDeserializer(DetectionRule._DESERIALIZER), "custom_rules"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/DiscoveryNode.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/DiscoveryNodeCompact.java similarity index 87% rename from java-client/src/main/java/co/elastic/clients/elasticsearch/ml/DiscoveryNode.java rename to java-client/src/main/java/co/elastic/clients/elasticsearch/ml/DiscoveryNodeCompact.java index b48216889..fd5083c99 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/DiscoveryNode.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/DiscoveryNodeCompact.java @@ -51,46 +51,48 @@ // //---------------------------------------------------------------- -// typedef: ml._types.DiscoveryNode +// typedef: ml._types.DiscoveryNodeCompact /** - * - * @see API + * Alternative representation of DiscoveryNode used in ml.get_job_stats and + * ml.get_datafeed_stats + * + * @see API * specification */ @JsonpDeserializable -public class DiscoveryNode implements JsonpSerializable { - private final Map attributes; +public class DiscoveryNodeCompact implements JsonpSerializable { + private final String name; private final String ephemeralId; private final String id; - private final String name; - private final String transportAddress; + private final Map attributes; + // --------------------------------------------------------------------------------------------- - private DiscoveryNode(Builder builder) { + private DiscoveryNodeCompact(Builder builder) { - this.attributes = ApiTypeHelper.unmodifiableRequired(builder.attributes, this, "attributes"); + this.name = ApiTypeHelper.requireNonNull(builder.name, this, "name"); this.ephemeralId = ApiTypeHelper.requireNonNull(builder.ephemeralId, this, "ephemeralId"); this.id = ApiTypeHelper.requireNonNull(builder.id, this, "id"); - this.name = ApiTypeHelper.requireNonNull(builder.name, this, "name"); this.transportAddress = ApiTypeHelper.requireNonNull(builder.transportAddress, this, "transportAddress"); + this.attributes = ApiTypeHelper.unmodifiableRequired(builder.attributes, this, "attributes"); } - public static DiscoveryNode of(Function> fn) { + public static DiscoveryNodeCompact of(Function> fn) { return fn.apply(new Builder()).build(); } /** - * Required - API name: {@code attributes} + * Required - API name: {@code name} */ - public final Map attributes() { - return this.attributes; + public final String name() { + return this.name; } /** @@ -108,17 +110,17 @@ public final String id() { } /** - * Required - API name: {@code name} + * Required - API name: {@code transport_address} */ - public final String name() { - return this.name; + public final String transportAddress() { + return this.transportAddress; } /** - * Required - API name: {@code transport_address} + * Required - API name: {@code attributes} */ - public final String transportAddress() { - return this.transportAddress; + public final Map attributes() { + return this.attributes; } /** @@ -132,6 +134,18 @@ public void serialize(JsonGenerator generator, JsonpMapper mapper) { protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + generator.writeKey("name"); + generator.write(this.name); + + generator.writeKey("ephemeral_id"); + generator.write(this.ephemeralId); + + generator.writeKey("id"); + generator.write(this.id); + + generator.writeKey("transport_address"); + generator.write(this.transportAddress); + if (ApiTypeHelper.isDefined(this.attributes)) { generator.writeKey("attributes"); generator.writeStartObject(); @@ -143,17 +157,6 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.writeEnd(); } - generator.writeKey("ephemeral_id"); - generator.write(this.ephemeralId); - - generator.writeKey("id"); - generator.write(this.id); - - generator.writeKey("name"); - generator.write(this.name); - - generator.writeKey("transport_address"); - generator.write(this.transportAddress); } @@ -165,37 +168,27 @@ public String toString() { // --------------------------------------------------------------------------------------------- /** - * Builder for {@link DiscoveryNode}. + * Builder for {@link DiscoveryNodeCompact}. */ - public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { - private Map attributes; + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { + private String name; private String ephemeralId; private String id; - private String name; - private String transportAddress; - /** - * Required - API name: {@code attributes} - *

- * Adds all entries of map to attributes. - */ - public final Builder attributes(Map map) { - this.attributes = _mapPutAll(this.attributes, map); - return this; - } + private Map attributes; /** - * Required - API name: {@code attributes} - *

- * Adds an entry to attributes. + * Required - API name: {@code name} */ - public final Builder attributes(String key, String value) { - this.attributes = _mapPut(this.attributes, key, value); + public final Builder name(String value) { + this.name = value; return this; } @@ -216,18 +209,30 @@ public final Builder id(String value) { } /** - * Required - API name: {@code name} + * Required - API name: {@code transport_address} */ - public final Builder name(String value) { - this.name = value; + public final Builder transportAddress(String value) { + this.transportAddress = value; return this; } /** - * Required - API name: {@code transport_address} + * Required - API name: {@code attributes} + *

+ * Adds all entries of map to attributes. */ - public final Builder transportAddress(String value) { - this.transportAddress = value; + public final Builder attributes(Map map) { + this.attributes = _mapPutAll(this.attributes, map); + return this; + } + + /** + * Required - API name: {@code attributes} + *

+ * Adds an entry to attributes. + */ + public final Builder attributes(String key, String value) { + this.attributes = _mapPut(this.attributes, key, value); return this; } @@ -237,34 +242,34 @@ protected Builder self() { } /** - * Builds a {@link DiscoveryNode}. + * Builds a {@link DiscoveryNodeCompact}. * * @throws NullPointerException * if some of the required fields are null. */ - public DiscoveryNode build() { + public DiscoveryNodeCompact build() { _checkSingleUse(); - return new DiscoveryNode(this); + return new DiscoveryNodeCompact(this); } } // --------------------------------------------------------------------------------------------- /** - * Json deserializer for {@link DiscoveryNode} + * Json deserializer for {@link DiscoveryNodeCompact} */ - public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer.lazy(Builder::new, - DiscoveryNode::setupDiscoveryNodeDeserializer); + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, DiscoveryNodeCompact::setupDiscoveryNodeCompactDeserializer); - protected static void setupDiscoveryNodeDeserializer(ObjectDeserializer op) { + protected static void setupDiscoveryNodeCompactDeserializer(ObjectDeserializer op) { - op.add(Builder::attributes, JsonpDeserializer.stringMapDeserializer(JsonpDeserializer.stringDeserializer()), - "attributes"); + op.add(Builder::name, JsonpDeserializer.stringDeserializer(), "name"); op.add(Builder::ephemeralId, JsonpDeserializer.stringDeserializer(), "ephemeral_id"); op.add(Builder::id, JsonpDeserializer.stringDeserializer(), "id"); - op.add(Builder::name, JsonpDeserializer.stringDeserializer(), "name"); op.add(Builder::transportAddress, JsonpDeserializer.stringDeserializer(), "transport_address"); + op.add(Builder::attributes, JsonpDeserializer.stringMapDeserializer(JsonpDeserializer.stringDeserializer()), + "attributes"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/DiscoveryNodeContent.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/DiscoveryNodeContent.java new file mode 100644 index 000000000..634858284 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/DiscoveryNodeContent.java @@ -0,0 +1,422 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.ml; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.Integer; +import java.lang.String; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: ml._types.DiscoveryNodeContent + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class DiscoveryNodeContent implements JsonpSerializable { + // Single key dictionary + private final String id; + + @Nullable + private final String name; + + private final String ephemeralId; + + private final String transportAddress; + + private final String externalId; + + private final Map attributes; + + private final List roles; + + private final String version; + + private final int minIndexVersion; + + private final int maxIndexVersion; + + // --------------------------------------------------------------------------------------------- + + private DiscoveryNodeContent(Builder builder) { + + this.id = ApiTypeHelper.requireNonNull(builder.id, this, "id"); + + this.name = builder.name; + this.ephemeralId = ApiTypeHelper.requireNonNull(builder.ephemeralId, this, "ephemeralId"); + this.transportAddress = ApiTypeHelper.requireNonNull(builder.transportAddress, this, "transportAddress"); + this.externalId = ApiTypeHelper.requireNonNull(builder.externalId, this, "externalId"); + this.attributes = ApiTypeHelper.unmodifiableRequired(builder.attributes, this, "attributes"); + this.roles = ApiTypeHelper.unmodifiableRequired(builder.roles, this, "roles"); + this.version = ApiTypeHelper.requireNonNull(builder.version, this, "version"); + this.minIndexVersion = ApiTypeHelper.requireNonNull(builder.minIndexVersion, this, "minIndexVersion"); + this.maxIndexVersion = ApiTypeHelper.requireNonNull(builder.maxIndexVersion, this, "maxIndexVersion"); + + } + + public static DiscoveryNodeContent of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - + */ + public final String id() { + return this.id; + } + + /** + * API name: {@code name} + */ + @Nullable + public final String name() { + return this.name; + } + + /** + * Required - API name: {@code ephemeral_id} + */ + public final String ephemeralId() { + return this.ephemeralId; + } + + /** + * Required - API name: {@code transport_address} + */ + public final String transportAddress() { + return this.transportAddress; + } + + /** + * Required - API name: {@code external_id} + */ + public final String externalId() { + return this.externalId; + } + + /** + * Required - API name: {@code attributes} + */ + public final Map attributes() { + return this.attributes; + } + + /** + * Required - API name: {@code roles} + */ + public final List roles() { + return this.roles; + } + + /** + * Required - API name: {@code version} + */ + public final String version() { + return this.version; + } + + /** + * Required - API name: {@code min_index_version} + */ + public final int minIndexVersion() { + return this.minIndexVersion; + } + + /** + * Required - API name: {@code max_index_version} + */ + public final int maxIndexVersion() { + return this.maxIndexVersion; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(this.id); + + if (this.name != null) { + generator.writeKey("name"); + generator.write(this.name); + + } + generator.writeKey("ephemeral_id"); + generator.write(this.ephemeralId); + + generator.writeKey("transport_address"); + generator.write(this.transportAddress); + + generator.writeKey("external_id"); + generator.write(this.externalId); + + if (ApiTypeHelper.isDefined(this.attributes)) { + generator.writeKey("attributes"); + generator.writeStartObject(); + for (Map.Entry item0 : this.attributes.entrySet()) { + generator.writeKey(item0.getKey()); + generator.write(item0.getValue()); + + } + generator.writeEnd(); + + } + if (ApiTypeHelper.isDefined(this.roles)) { + generator.writeKey("roles"); + generator.writeStartArray(); + for (String item0 : this.roles) { + generator.write(item0); + + } + generator.writeEnd(); + + } + generator.writeKey("version"); + generator.write(this.version); + + generator.writeKey("min_index_version"); + generator.write(this.minIndexVersion); + + generator.writeKey("max_index_version"); + generator.write(this.maxIndexVersion); + + generator.writeEnd(); + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link DiscoveryNodeContent}. + */ + + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { + private String id; + + /** + * Required - + */ + public final Builder id(String value) { + this.id = value; + return this; + } + + @Nullable + private String name; + + private String ephemeralId; + + private String transportAddress; + + private String externalId; + + private Map attributes; + + private List roles; + + private String version; + + private Integer minIndexVersion; + + private Integer maxIndexVersion; + + /** + * API name: {@code name} + */ + public final Builder name(@Nullable String value) { + this.name = value; + return this; + } + + /** + * Required - API name: {@code ephemeral_id} + */ + public final Builder ephemeralId(String value) { + this.ephemeralId = value; + return this; + } + + /** + * Required - API name: {@code transport_address} + */ + public final Builder transportAddress(String value) { + this.transportAddress = value; + return this; + } + + /** + * Required - API name: {@code external_id} + */ + public final Builder externalId(String value) { + this.externalId = value; + return this; + } + + /** + * Required - API name: {@code attributes} + *

+ * Adds all entries of map to attributes. + */ + public final Builder attributes(Map map) { + this.attributes = _mapPutAll(this.attributes, map); + return this; + } + + /** + * Required - API name: {@code attributes} + *

+ * Adds an entry to attributes. + */ + public final Builder attributes(String key, String value) { + this.attributes = _mapPut(this.attributes, key, value); + return this; + } + + /** + * Required - API name: {@code roles} + *

+ * Adds all elements of list to roles. + */ + public final Builder roles(List list) { + this.roles = _listAddAll(this.roles, list); + return this; + } + + /** + * Required - API name: {@code roles} + *

+ * Adds one or more values to roles. + */ + public final Builder roles(String value, String... values) { + this.roles = _listAdd(this.roles, value, values); + return this; + } + + /** + * Required - API name: {@code version} + */ + public final Builder version(String value) { + this.version = value; + return this; + } + + /** + * Required - API name: {@code min_index_version} + */ + public final Builder minIndexVersion(int value) { + this.minIndexVersion = value; + return this; + } + + /** + * Required - API name: {@code max_index_version} + */ + public final Builder maxIndexVersion(int value) { + this.maxIndexVersion = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link DiscoveryNodeContent}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public DiscoveryNodeContent build() { + _checkSingleUse(); + + return new DiscoveryNodeContent(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link DiscoveryNodeContent} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, DiscoveryNodeContent::setupDiscoveryNodeContentDeserializer); + + protected static void setupDiscoveryNodeContentDeserializer(ObjectDeserializer op) { + + op.add(Builder::name, JsonpDeserializer.stringDeserializer(), "name"); + op.add(Builder::ephemeralId, JsonpDeserializer.stringDeserializer(), "ephemeral_id"); + op.add(Builder::transportAddress, JsonpDeserializer.stringDeserializer(), "transport_address"); + op.add(Builder::externalId, JsonpDeserializer.stringDeserializer(), "external_id"); + op.add(Builder::attributes, JsonpDeserializer.stringMapDeserializer(JsonpDeserializer.stringDeserializer()), + "attributes"); + op.add(Builder::roles, JsonpDeserializer.arrayDeserializer(JsonpDeserializer.stringDeserializer()), "roles"); + op.add(Builder::version, JsonpDeserializer.stringDeserializer(), "version"); + op.add(Builder::minIndexVersion, JsonpDeserializer.integerDeserializer(), "min_index_version"); + op.add(Builder::maxIndexVersion, JsonpDeserializer.integerDeserializer(), "max_index_version"); + + op.setKey(Builder::id, JsonpDeserializer.stringDeserializer()); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ElasticsearchMlAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ElasticsearchMlAsyncClient.java index 376f521b2..84113a71c 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ElasticsearchMlAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ElasticsearchMlAsyncClient.java @@ -1832,7 +1832,7 @@ public final CompletableFuture inferTrainedModel( // ----- Endpoint: ml.info /** - * Return ML defaults and limits. Returns defaults and limits used by machine + * Get machine learning information. Get defaults and limits used by machine * learning. This endpoint is designed to be used by a user interface that needs * to fully understand machine learning configurations where some options are * not specified, meaning that the defaults should be used. This endpoint may be @@ -3256,7 +3256,7 @@ public CompletableFuture validate() { // ----- Endpoint: ml.validate_detector /** - * Validates an anomaly detection detector. + * Validate an anomaly detection job. * * @see Documentation @@ -3271,7 +3271,7 @@ public CompletableFuture validateDetector(ValidateDete } /** - * Validates an anomaly detection detector. + * Validate an anomaly detection job. * * @param fn * a function that initializes a builder to create the @@ -3287,7 +3287,7 @@ public final CompletableFuture validateDetector( } /** - * Validates an anomaly detection detector. + * Validate an anomaly detection job. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ElasticsearchMlClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ElasticsearchMlClient.java index a7d3862fe..bdfc22fe7 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ElasticsearchMlClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ElasticsearchMlClient.java @@ -1883,7 +1883,7 @@ public final InferTrainedModelResponse inferTrainedModel( // ----- Endpoint: ml.info /** - * Return ML defaults and limits. Returns defaults and limits used by machine + * Get machine learning information. Get defaults and limits used by machine * learning. This endpoint is designed to be used by a user interface that needs * to fully understand machine learning configurations where some options are * not specified, meaning that the defaults should be used. This endpoint may be @@ -3343,7 +3343,7 @@ public ValidateResponse validate() throws IOException, ElasticsearchException { // ----- Endpoint: ml.validate_detector /** - * Validates an anomaly detection detector. + * Validate an anomaly detection job. * * @see Documentation @@ -3359,7 +3359,7 @@ public ValidateDetectorResponse validateDetector(ValidateDetectorRequest request } /** - * Validates an anomaly detection detector. + * Validate an anomaly detection job. * * @param fn * a function that initializes a builder to create the @@ -3376,7 +3376,7 @@ public final ValidateDetectorResponse validateDetector( } /** - * Validates an anomaly detection detector. + * Validate an anomaly detection job. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ExponentialAverageCalculationContext.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ExponentialAverageCalculationContext.java new file mode 100644 index 000000000..fb22297b4 --- /dev/null +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ExponentialAverageCalculationContext.java @@ -0,0 +1,222 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package co.elastic.clients.elasticsearch.ml; + +import co.elastic.clients.json.JsonpDeserializable; +import co.elastic.clients.json.JsonpDeserializer; +import co.elastic.clients.json.JsonpMapper; +import co.elastic.clients.json.JsonpSerializable; +import co.elastic.clients.json.JsonpUtils; +import co.elastic.clients.json.ObjectBuilderDeserializer; +import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; +import co.elastic.clients.util.ObjectBuilder; +import co.elastic.clients.util.WithJsonObjectBuilderBase; +import jakarta.json.stream.JsonGenerator; +import java.lang.Double; +import java.lang.Long; +import java.util.Objects; +import java.util.function.Function; +import javax.annotation.Nullable; + +//---------------------------------------------------------------- +// THIS CODE IS GENERATED. MANUAL EDITS WILL BE LOST. +//---------------------------------------------------------------- +// +// This code is generated from the Elasticsearch API specification +// at https://github.com/elastic/elasticsearch-specification +// +// Manual updates to this file will be lost when the code is +// re-generated. +// +// If you find a property that is missing or wrongly typed, please +// open an issue or a PR on the API specification repository. +// +//---------------------------------------------------------------- + +// typedef: ml._types.ExponentialAverageCalculationContext + +/** + * + * @see API + * specification + */ +@JsonpDeserializable +public class ExponentialAverageCalculationContext implements JsonpSerializable { + private final double incrementalMetricValueMs; + + @Nullable + private final Long latestTimestamp; + + @Nullable + private final Double previousExponentialAverageMs; + + // --------------------------------------------------------------------------------------------- + + private ExponentialAverageCalculationContext(Builder builder) { + + this.incrementalMetricValueMs = ApiTypeHelper.requireNonNull(builder.incrementalMetricValueMs, this, + "incrementalMetricValueMs"); + this.latestTimestamp = builder.latestTimestamp; + this.previousExponentialAverageMs = builder.previousExponentialAverageMs; + + } + + public static ExponentialAverageCalculationContext of( + Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * Required - API name: {@code incremental_metric_value_ms} + */ + public final double incrementalMetricValueMs() { + return this.incrementalMetricValueMs; + } + + /** + * API name: {@code latest_timestamp} + */ + @Nullable + public final Long latestTimestamp() { + return this.latestTimestamp; + } + + /** + * API name: {@code previous_exponential_average_ms} + */ + @Nullable + public final Double previousExponentialAverageMs() { + return this.previousExponentialAverageMs; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + generator.writeKey("incremental_metric_value_ms"); + generator.write(this.incrementalMetricValueMs); + + if (this.latestTimestamp != null) { + generator.writeKey("latest_timestamp"); + generator.write(this.latestTimestamp); + + } + if (this.previousExponentialAverageMs != null) { + generator.writeKey("previous_exponential_average_ms"); + generator.write(this.previousExponentialAverageMs); + + } + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link ExponentialAverageCalculationContext}. + */ + + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { + private Double incrementalMetricValueMs; + + @Nullable + private Long latestTimestamp; + + @Nullable + private Double previousExponentialAverageMs; + + /** + * Required - API name: {@code incremental_metric_value_ms} + */ + public final Builder incrementalMetricValueMs(double value) { + this.incrementalMetricValueMs = value; + return this; + } + + /** + * API name: {@code latest_timestamp} + */ + public final Builder latestTimestamp(@Nullable Long value) { + this.latestTimestamp = value; + return this; + } + + /** + * API name: {@code previous_exponential_average_ms} + */ + public final Builder previousExponentialAverageMs(@Nullable Double value) { + this.previousExponentialAverageMs = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link ExponentialAverageCalculationContext}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public ExponentialAverageCalculationContext build() { + _checkSingleUse(); + + return new ExponentialAverageCalculationContext(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link ExponentialAverageCalculationContext} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, + ExponentialAverageCalculationContext::setupExponentialAverageCalculationContextDeserializer); + + protected static void setupExponentialAverageCalculationContextDeserializer( + ObjectDeserializer op) { + + op.add(Builder::incrementalMetricValueMs, JsonpDeserializer.doubleDeserializer(), + "incremental_metric_value_ms"); + op.add(Builder::latestTimestamp, JsonpDeserializer.longDeserializer(), "latest_timestamp"); + op.add(Builder::previousExponentialAverageMs, JsonpDeserializer.doubleDeserializer(), + "previous_exponential_average_ms"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/FillMaskInferenceOptions.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/FillMaskInferenceOptions.java index f62697910..76ddbdb1d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/FillMaskInferenceOptions.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/FillMaskInferenceOptions.java @@ -26,6 +26,7 @@ import co.elastic.clients.json.JsonpUtils; import co.elastic.clients.json.ObjectBuilderDeserializer; import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; import co.elastic.clients.util.ObjectBuilder; import co.elastic.clients.util.WithJsonObjectBuilderBase; import jakarta.json.stream.JsonGenerator; @@ -73,6 +74,8 @@ public class FillMaskInferenceOptions implements InferenceConfigCreateVariant, J @Nullable private final String resultsField; + private final Vocabulary vocabulary; + // --------------------------------------------------------------------------------------------- private FillMaskInferenceOptions(Builder builder) { @@ -81,6 +84,7 @@ private FillMaskInferenceOptions(Builder builder) { this.numTopClasses = builder.numTopClasses; this.tokenization = builder.tokenization; this.resultsField = builder.resultsField; + this.vocabulary = ApiTypeHelper.requireNonNull(builder.vocabulary, this, "vocabulary"); } @@ -143,6 +147,13 @@ public final String resultsField() { return this.resultsField; } + /** + * Required - API name: {@code vocabulary} + */ + public final Vocabulary vocabulary() { + return this.vocabulary; + } + /** * Serialize this object to JSON. */ @@ -174,6 +185,8 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.write(this.resultsField); } + generator.writeKey("vocabulary"); + this.vocabulary.serialize(generator, mapper); } @@ -203,6 +216,8 @@ public static class Builder extends WithJsonObjectBuilderBase @Nullable private String resultsField; + private Vocabulary vocabulary; + /** * The string/token which will be removed from incoming documents and replaced * with the inference prediction(s). In a response, this field contains the mask @@ -259,6 +274,21 @@ public final Builder resultsField(@Nullable String value) { return this; } + /** + * Required - API name: {@code vocabulary} + */ + public final Builder vocabulary(Vocabulary value) { + this.vocabulary = value; + return this; + } + + /** + * Required - API name: {@code vocabulary} + */ + public final Builder vocabulary(Function> fn) { + return this.vocabulary(fn.apply(new Vocabulary.Builder()).build()); + } + @Override protected Builder self() { return this; @@ -292,6 +322,7 @@ protected static void setupFillMaskInferenceOptionsDeserializer( op.add(Builder::numTopClasses, JsonpDeserializer.integerDeserializer(), "num_top_classes"); op.add(Builder::tokenization, TokenizationConfig._DESERIALIZER, "tokenization"); op.add(Builder::resultsField, JsonpDeserializer.stringDeserializer(), "results_field"); + op.add(Builder::vocabulary, Vocabulary._DESERIALIZER, "vocabulary"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/GetTrainedModelsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/GetTrainedModelsRequest.java index 31d96413d..bd0070355 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/GetTrainedModelsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/GetTrainedModelsRequest.java @@ -81,6 +81,9 @@ public class GetTrainedModelsRequest extends RequestBase { @Nullable private final Include include; + @Nullable + private final Boolean includeModelDefinition; + private final List modelId; @Nullable @@ -97,6 +100,7 @@ private GetTrainedModelsRequest(Builder builder) { this.excludeGenerated = builder.excludeGenerated; this.from = builder.from; this.include = builder.include; + this.includeModelDefinition = builder.includeModelDefinition; this.modelId = ApiTypeHelper.unmodifiable(builder.modelId); this.size = builder.size; this.tags = ApiTypeHelper.unmodifiable(builder.tags); @@ -168,6 +172,19 @@ public final Include include() { return this.include; } + /** + * parameter is deprecated! Use [include=definition] instead + *

+ * API name: {@code include_model_definition} + * + * @deprecated 7.10.0 + */ + @Deprecated + @Nullable + public final Boolean includeModelDefinition() { + return this.includeModelDefinition; + } + /** * The unique identifier of the trained model or a model alias. *

@@ -225,6 +242,9 @@ public static class Builder extends RequestBase.AbstractBuilder @Nullable private Include include; + @Nullable + private Boolean includeModelDefinition; + @Nullable private List modelId; @@ -295,6 +315,19 @@ public final Builder include(@Nullable Include value) { return this; } + /** + * parameter is deprecated! Use [include=definition] instead + *

+ * API name: {@code include_model_definition} + * + * @deprecated 7.10.0 + */ + @Deprecated + public final Builder includeModelDefinition(@Nullable Boolean value) { + this.includeModelDefinition = value; + return this; + } + /** * The unique identifier of the trained model or a model alias. *

@@ -459,6 +492,9 @@ public GetTrainedModelsRequest build() { if (request.from != null) { params.put("from", String.valueOf(request.from)); } + if (request.includeModelDefinition != null) { + params.put("include_model_definition", String.valueOf(request.includeModelDefinition)); + } if (request.allowNoMatch != null) { params.put("allow_no_match", String.valueOf(request.allowNoMatch)); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/JobStats.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/JobStats.java index 5c2e622c0..e13134d08 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/JobStats.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/JobStats.java @@ -73,7 +73,7 @@ public class JobStats implements JsonpSerializable { private final ModelSizeStats modelSizeStats; @Nullable - private final DiscoveryNode node; + private final DiscoveryNodeCompact node; @Nullable private final DateTime openTime; @@ -166,7 +166,7 @@ public final ModelSizeStats modelSizeStats() { * API name: {@code node} */ @Nullable - public final DiscoveryNode node() { + public final DiscoveryNodeCompact node() { return this.node; } @@ -286,7 +286,7 @@ public static class Builder extends WithJsonObjectBuilderBase implement private ModelSizeStats modelSizeStats; @Nullable - private DiscoveryNode node; + private DiscoveryNodeCompact node; @Nullable private DateTime openTime; @@ -395,7 +395,7 @@ public final Builder modelSizeStats(Function * API name: {@code node} */ - public final Builder node(@Nullable DiscoveryNode value) { + public final Builder node(@Nullable DiscoveryNodeCompact value) { this.node = value; return this; } @@ -406,8 +406,8 @@ public final Builder node(@Nullable DiscoveryNode value) { *

* API name: {@code node} */ - public final Builder node(Function> fn) { - return this.node(fn.apply(new DiscoveryNode.Builder()).build()); + public final Builder node(Function> fn) { + return this.node(fn.apply(new DiscoveryNodeCompact.Builder()).build()); } /** @@ -497,7 +497,7 @@ protected static void setupJobStatsDeserializer(ObjectDeserializerAPI + * specification + */ +@JsonpDeserializable +public class ModelPackageConfig implements JsonpSerializable { + @Nullable + private final Long createTime; + + @Nullable + private final String description; + + private final Map inferenceConfig; + + private final Map metadata; + + @Nullable + private final String minimumVersion; + + @Nullable + private final String modelRepository; + + @Nullable + private final String modelType; + + private final String packagedModelId; + + @Nullable + private final String platformArchitecture; + + @Nullable + private final TrainedModelPrefixStrings prefixStrings; + + @Nullable + private final String size; + + @Nullable + private final String sha256; + + private final List tags; + + @Nullable + private final String vocabularyFile; + + // --------------------------------------------------------------------------------------------- + + private ModelPackageConfig(Builder builder) { + + this.createTime = builder.createTime; + this.description = builder.description; + this.inferenceConfig = ApiTypeHelper.unmodifiable(builder.inferenceConfig); + this.metadata = ApiTypeHelper.unmodifiable(builder.metadata); + this.minimumVersion = builder.minimumVersion; + this.modelRepository = builder.modelRepository; + this.modelType = builder.modelType; + this.packagedModelId = ApiTypeHelper.requireNonNull(builder.packagedModelId, this, "packagedModelId"); + this.platformArchitecture = builder.platformArchitecture; + this.prefixStrings = builder.prefixStrings; + this.size = builder.size; + this.sha256 = builder.sha256; + this.tags = ApiTypeHelper.unmodifiable(builder.tags); + this.vocabularyFile = builder.vocabularyFile; + + } + + public static ModelPackageConfig of(Function> fn) { + return fn.apply(new Builder()).build(); + } + + /** + * API name: {@code create_time} + */ + @Nullable + public final Long createTime() { + return this.createTime; + } + + /** + * API name: {@code description} + */ + @Nullable + public final String description() { + return this.description; + } + + /** + * API name: {@code inference_config} + */ + public final Map inferenceConfig() { + return this.inferenceConfig; + } + + /** + * API name: {@code metadata} + */ + public final Map metadata() { + return this.metadata; + } + + /** + * API name: {@code minimum_version} + */ + @Nullable + public final String minimumVersion() { + return this.minimumVersion; + } + + /** + * API name: {@code model_repository} + */ + @Nullable + public final String modelRepository() { + return this.modelRepository; + } + + /** + * API name: {@code model_type} + */ + @Nullable + public final String modelType() { + return this.modelType; + } + + /** + * Required - API name: {@code packaged_model_id} + */ + public final String packagedModelId() { + return this.packagedModelId; + } + + /** + * API name: {@code platform_architecture} + */ + @Nullable + public final String platformArchitecture() { + return this.platformArchitecture; + } + + /** + * API name: {@code prefix_strings} + */ + @Nullable + public final TrainedModelPrefixStrings prefixStrings() { + return this.prefixStrings; + } + + /** + * API name: {@code size} + */ + @Nullable + public final String size() { + return this.size; + } + + /** + * API name: {@code sha256} + */ + @Nullable + public final String sha256() { + return this.sha256; + } + + /** + * API name: {@code tags} + */ + public final List tags() { + return this.tags; + } + + /** + * API name: {@code vocabulary_file} + */ + @Nullable + public final String vocabularyFile() { + return this.vocabularyFile; + } + + /** + * Serialize this object to JSON. + */ + public void serialize(JsonGenerator generator, JsonpMapper mapper) { + generator.writeStartObject(); + serializeInternal(generator, mapper); + generator.writeEnd(); + } + + protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + + if (this.createTime != null) { + generator.writeKey("create_time"); + generator.write(this.createTime); + + } + if (this.description != null) { + generator.writeKey("description"); + generator.write(this.description); + + } + if (ApiTypeHelper.isDefined(this.inferenceConfig)) { + generator.writeKey("inference_config"); + generator.writeStartObject(); + for (Map.Entry item0 : this.inferenceConfig.entrySet()) { + generator.writeKey(item0.getKey()); + item0.getValue().serialize(generator, mapper); + + } + generator.writeEnd(); + + } + if (ApiTypeHelper.isDefined(this.metadata)) { + generator.writeKey("metadata"); + generator.writeStartObject(); + for (Map.Entry item0 : this.metadata.entrySet()) { + generator.writeKey(item0.getKey()); + item0.getValue().serialize(generator, mapper); + + } + generator.writeEnd(); + + } + if (this.minimumVersion != null) { + generator.writeKey("minimum_version"); + generator.write(this.minimumVersion); + + } + if (this.modelRepository != null) { + generator.writeKey("model_repository"); + generator.write(this.modelRepository); + + } + if (this.modelType != null) { + generator.writeKey("model_type"); + generator.write(this.modelType); + + } + generator.writeKey("packaged_model_id"); + generator.write(this.packagedModelId); + + if (this.platformArchitecture != null) { + generator.writeKey("platform_architecture"); + generator.write(this.platformArchitecture); + + } + if (this.prefixStrings != null) { + generator.writeKey("prefix_strings"); + this.prefixStrings.serialize(generator, mapper); + + } + if (this.size != null) { + generator.writeKey("size"); + generator.write(this.size); + + } + if (this.sha256 != null) { + generator.writeKey("sha256"); + generator.write(this.sha256); + + } + if (ApiTypeHelper.isDefined(this.tags)) { + generator.writeKey("tags"); + generator.writeStartArray(); + for (String item0 : this.tags) { + generator.write(item0); + + } + generator.writeEnd(); + + } + if (this.vocabularyFile != null) { + generator.writeKey("vocabulary_file"); + generator.write(this.vocabularyFile); + + } + + } + + @Override + public String toString() { + return JsonpUtils.toString(this); + } + + // --------------------------------------------------------------------------------------------- + + /** + * Builder for {@link ModelPackageConfig}. + */ + + public static class Builder extends WithJsonObjectBuilderBase + implements + ObjectBuilder { + @Nullable + private Long createTime; + + @Nullable + private String description; + + @Nullable + private Map inferenceConfig; + + @Nullable + private Map metadata; + + @Nullable + private String minimumVersion; + + @Nullable + private String modelRepository; + + @Nullable + private String modelType; + + private String packagedModelId; + + @Nullable + private String platformArchitecture; + + @Nullable + private TrainedModelPrefixStrings prefixStrings; + + @Nullable + private String size; + + @Nullable + private String sha256; + + @Nullable + private List tags; + + @Nullable + private String vocabularyFile; + + /** + * API name: {@code create_time} + */ + public final Builder createTime(@Nullable Long value) { + this.createTime = value; + return this; + } + + /** + * API name: {@code description} + */ + public final Builder description(@Nullable String value) { + this.description = value; + return this; + } + + /** + * API name: {@code inference_config} + *

+ * Adds all entries of map to inferenceConfig. + */ + public final Builder inferenceConfig(Map map) { + this.inferenceConfig = _mapPutAll(this.inferenceConfig, map); + return this; + } + + /** + * API name: {@code inference_config} + *

+ * Adds an entry to inferenceConfig. + */ + public final Builder inferenceConfig(String key, JsonData value) { + this.inferenceConfig = _mapPut(this.inferenceConfig, key, value); + return this; + } + + /** + * API name: {@code metadata} + *

+ * Adds all entries of map to metadata. + */ + public final Builder metadata(Map map) { + this.metadata = _mapPutAll(this.metadata, map); + return this; + } + + /** + * API name: {@code metadata} + *

+ * Adds an entry to metadata. + */ + public final Builder metadata(String key, JsonData value) { + this.metadata = _mapPut(this.metadata, key, value); + return this; + } + + /** + * API name: {@code minimum_version} + */ + public final Builder minimumVersion(@Nullable String value) { + this.minimumVersion = value; + return this; + } + + /** + * API name: {@code model_repository} + */ + public final Builder modelRepository(@Nullable String value) { + this.modelRepository = value; + return this; + } + + /** + * API name: {@code model_type} + */ + public final Builder modelType(@Nullable String value) { + this.modelType = value; + return this; + } + + /** + * Required - API name: {@code packaged_model_id} + */ + public final Builder packagedModelId(String value) { + this.packagedModelId = value; + return this; + } + + /** + * API name: {@code platform_architecture} + */ + public final Builder platformArchitecture(@Nullable String value) { + this.platformArchitecture = value; + return this; + } + + /** + * API name: {@code prefix_strings} + */ + public final Builder prefixStrings(@Nullable TrainedModelPrefixStrings value) { + this.prefixStrings = value; + return this; + } + + /** + * API name: {@code prefix_strings} + */ + public final Builder prefixStrings( + Function> fn) { + return this.prefixStrings(fn.apply(new TrainedModelPrefixStrings.Builder()).build()); + } + + /** + * API name: {@code size} + */ + public final Builder size(@Nullable String value) { + this.size = value; + return this; + } + + /** + * API name: {@code sha256} + */ + public final Builder sha256(@Nullable String value) { + this.sha256 = value; + return this; + } + + /** + * API name: {@code tags} + *

+ * Adds all elements of list to tags. + */ + public final Builder tags(List list) { + this.tags = _listAddAll(this.tags, list); + return this; + } + + /** + * API name: {@code tags} + *

+ * Adds one or more values to tags. + */ + public final Builder tags(String value, String... values) { + this.tags = _listAdd(this.tags, value, values); + return this; + } + + /** + * API name: {@code vocabulary_file} + */ + public final Builder vocabularyFile(@Nullable String value) { + this.vocabularyFile = value; + return this; + } + + @Override + protected Builder self() { + return this; + } + + /** + * Builds a {@link ModelPackageConfig}. + * + * @throws NullPointerException + * if some of the required fields are null. + */ + public ModelPackageConfig build() { + _checkSingleUse(); + + return new ModelPackageConfig(this); + } + } + + // --------------------------------------------------------------------------------------------- + + /** + * Json deserializer for {@link ModelPackageConfig} + */ + public static final JsonpDeserializer _DESERIALIZER = ObjectBuilderDeserializer + .lazy(Builder::new, ModelPackageConfig::setupModelPackageConfigDeserializer); + + protected static void setupModelPackageConfigDeserializer(ObjectDeserializer op) { + + op.add(Builder::createTime, JsonpDeserializer.longDeserializer(), "create_time"); + op.add(Builder::description, JsonpDeserializer.stringDeserializer(), "description"); + op.add(Builder::inferenceConfig, JsonpDeserializer.stringMapDeserializer(JsonData._DESERIALIZER), + "inference_config"); + op.add(Builder::metadata, JsonpDeserializer.stringMapDeserializer(JsonData._DESERIALIZER), "metadata"); + op.add(Builder::minimumVersion, JsonpDeserializer.stringDeserializer(), "minimum_version"); + op.add(Builder::modelRepository, JsonpDeserializer.stringDeserializer(), "model_repository"); + op.add(Builder::modelType, JsonpDeserializer.stringDeserializer(), "model_type"); + op.add(Builder::packagedModelId, JsonpDeserializer.stringDeserializer(), "packaged_model_id"); + op.add(Builder::platformArchitecture, JsonpDeserializer.stringDeserializer(), "platform_architecture"); + op.add(Builder::prefixStrings, TrainedModelPrefixStrings._DESERIALIZER, "prefix_strings"); + op.add(Builder::size, JsonpDeserializer.stringDeserializer(), "size"); + op.add(Builder::sha256, JsonpDeserializer.stringDeserializer(), "sha256"); + op.add(Builder::tags, JsonpDeserializer.arrayDeserializer(JsonpDeserializer.stringDeserializer()), "tags"); + op.add(Builder::vocabularyFile, JsonpDeserializer.stringDeserializer(), "vocabulary_file"); + + } + +} diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ModelSizeStats.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ModelSizeStats.java index 06e4bee89..326be2765 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ModelSizeStats.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/ModelSizeStats.java @@ -78,6 +78,9 @@ public class ModelSizeStats implements JsonpSerializable { @Nullable private final String modelBytesMemoryLimit; + @Nullable + private final String outputMemoryAllocatorBytes; + @Nullable private final String peakModelBytes; @@ -121,6 +124,7 @@ private ModelSizeStats(Builder builder) { this.modelBytes = ApiTypeHelper.requireNonNull(builder.modelBytes, this, "modelBytes"); this.modelBytesExceeded = builder.modelBytesExceeded; this.modelBytesMemoryLimit = builder.modelBytesMemoryLimit; + this.outputMemoryAllocatorBytes = builder.outputMemoryAllocatorBytes; this.peakModelBytes = builder.peakModelBytes; this.assignmentMemoryBasis = builder.assignmentMemoryBasis; this.resultType = ApiTypeHelper.requireNonNull(builder.resultType, this, "resultType"); @@ -199,6 +203,14 @@ public final String modelBytesMemoryLimit() { return this.modelBytesMemoryLimit; } + /** + * API name: {@code output_memory_allocator_bytes} + */ + @Nullable + public final String outputMemoryAllocatorBytes() { + return this.outputMemoryAllocatorBytes; + } + /** * API name: {@code peak_model_bytes} */ @@ -333,6 +345,11 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.writeKey("model_bytes_memory_limit"); generator.write(this.modelBytesMemoryLimit); + } + if (this.outputMemoryAllocatorBytes != null) { + generator.writeKey("output_memory_allocator_bytes"); + generator.write(this.outputMemoryAllocatorBytes); + } if (this.peakModelBytes != null) { generator.writeKey("peak_model_bytes"); @@ -412,6 +429,9 @@ public static class Builder extends WithJsonObjectBuilderBase implement @Nullable private String modelBytesMemoryLimit; + @Nullable + private String outputMemoryAllocatorBytes; + @Nullable private String peakModelBytes; @@ -499,6 +519,14 @@ public final Builder modelBytesMemoryLimit(@Nullable String value) { return this; } + /** + * API name: {@code output_memory_allocator_bytes} + */ + public final Builder outputMemoryAllocatorBytes(@Nullable String value) { + this.outputMemoryAllocatorBytes = value; + return this; + } + /** * API name: {@code peak_model_bytes} */ @@ -647,6 +675,8 @@ protected static void setupModelSizeStatsDeserializer(ObjectDeserializer private SnapshotUpgradeState state; - private DiscoveryNode node; + private DiscoveryNodeContent node; private String assignmentExplanation; @@ -199,7 +199,7 @@ public final Builder state(SnapshotUpgradeState value) { /** * Required - API name: {@code node} */ - public final Builder node(DiscoveryNode value) { + public final Builder node(DiscoveryNodeContent value) { this.node = value; return this; } @@ -207,8 +207,8 @@ public final Builder node(DiscoveryNode value) { /** * Required - API name: {@code node} */ - public final Builder node(Function> fn) { - return this.node(fn.apply(new DiscoveryNode.Builder()).build()); + public final Builder node(Function> fn) { + return this.node(fn.apply(new DiscoveryNodeContent.Builder()).build()); } /** @@ -250,7 +250,7 @@ protected static void setupModelSnapshotUpgradeDeserializer(ObjectDeserializer */ @JsonpDeserializable -public class NlpBertTokenizationConfig implements TokenizationConfigVariant, JsonpSerializable { - @Nullable - private final Boolean doLowerCase; - - @Nullable - private final Boolean withSpecialTokens; - - @Nullable - private final Integer maxSequenceLength; - - @Nullable - private final TokenizationTruncate truncate; - - @Nullable - private final Integer span; - +public class NlpBertTokenizationConfig extends CommonTokenizationConfig implements TokenizationConfigVariant { // --------------------------------------------------------------------------------------------- private NlpBertTokenizationConfig(Builder builder) { - - this.doLowerCase = builder.doLowerCase; - this.withSpecialTokens = builder.withSpecialTokens; - this.maxSequenceLength = builder.maxSequenceLength; - this.truncate = builder.truncate; - this.span = builder.span; + super(builder); } @@ -100,177 +73,15 @@ public TokenizationConfig.Kind _tokenizationConfigKind() { return TokenizationConfig.Kind.Mpnet; } - /** - * Should the tokenizer lower case the text - *

- * API name: {@code do_lower_case} - */ - @Nullable - public final Boolean doLowerCase() { - return this.doLowerCase; - } - - /** - * Is tokenization completed with special tokens - *

- * API name: {@code with_special_tokens} - */ - @Nullable - public final Boolean withSpecialTokens() { - return this.withSpecialTokens; - } - - /** - * Maximum input sequence length for the model - *

- * API name: {@code max_sequence_length} - */ - @Nullable - public final Integer maxSequenceLength() { - return this.maxSequenceLength; - } - - /** - * Should tokenization input be automatically truncated before sending to the - * model for inference - *

- * API name: {@code truncate} - */ - @Nullable - public final TokenizationTruncate truncate() { - return this.truncate; - } - - /** - * Tokenization spanning options. Special value of -1 indicates no spanning - * takes place - *

- * API name: {@code span} - */ - @Nullable - public final Integer span() { - return this.span; - } - - /** - * Serialize this object to JSON. - */ - public void serialize(JsonGenerator generator, JsonpMapper mapper) { - generator.writeStartObject(); - serializeInternal(generator, mapper); - generator.writeEnd(); - } - - protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { - - if (this.doLowerCase != null) { - generator.writeKey("do_lower_case"); - generator.write(this.doLowerCase); - - } - if (this.withSpecialTokens != null) { - generator.writeKey("with_special_tokens"); - generator.write(this.withSpecialTokens); - - } - if (this.maxSequenceLength != null) { - generator.writeKey("max_sequence_length"); - generator.write(this.maxSequenceLength); - - } - if (this.truncate != null) { - generator.writeKey("truncate"); - this.truncate.serialize(generator, mapper); - } - if (this.span != null) { - generator.writeKey("span"); - generator.write(this.span); - - } - - } - - @Override - public String toString() { - return JsonpUtils.toString(this); - } - // --------------------------------------------------------------------------------------------- /** * Builder for {@link NlpBertTokenizationConfig}. */ - public static class Builder extends WithJsonObjectBuilderBase + public static class Builder extends CommonTokenizationConfig.AbstractBuilder implements ObjectBuilder { - @Nullable - private Boolean doLowerCase; - - @Nullable - private Boolean withSpecialTokens; - - @Nullable - private Integer maxSequenceLength; - - @Nullable - private TokenizationTruncate truncate; - - @Nullable - private Integer span; - - /** - * Should the tokenizer lower case the text - *

- * API name: {@code do_lower_case} - */ - public final Builder doLowerCase(@Nullable Boolean value) { - this.doLowerCase = value; - return this; - } - - /** - * Is tokenization completed with special tokens - *

- * API name: {@code with_special_tokens} - */ - public final Builder withSpecialTokens(@Nullable Boolean value) { - this.withSpecialTokens = value; - return this; - } - - /** - * Maximum input sequence length for the model - *

- * API name: {@code max_sequence_length} - */ - public final Builder maxSequenceLength(@Nullable Integer value) { - this.maxSequenceLength = value; - return this; - } - - /** - * Should tokenization input be automatically truncated before sending to the - * model for inference - *

- * API name: {@code truncate} - */ - public final Builder truncate(@Nullable TokenizationTruncate value) { - this.truncate = value; - return this; - } - - /** - * Tokenization spanning options. Special value of -1 indicates no spanning - * takes place - *

- * API name: {@code span} - */ - public final Builder span(@Nullable Integer value) { - this.span = value; - return this; - } - @Override protected Builder self() { return this; @@ -299,12 +110,7 @@ public NlpBertTokenizationConfig build() { protected static void setupNlpBertTokenizationConfigDeserializer( ObjectDeserializer op) { - - op.add(Builder::doLowerCase, JsonpDeserializer.booleanDeserializer(), "do_lower_case"); - op.add(Builder::withSpecialTokens, JsonpDeserializer.booleanDeserializer(), "with_special_tokens"); - op.add(Builder::maxSequenceLength, JsonpDeserializer.integerDeserializer(), "max_sequence_length"); - op.add(Builder::truncate, TokenizationTruncate._DESERIALIZER, "truncate"); - op.add(Builder::span, JsonpDeserializer.integerDeserializer(), "span"); + CommonTokenizationConfig.setupCommonTokenizationConfigDeserializer(op); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/NlpRobertaTokenizationConfig.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/NlpRobertaTokenizationConfig.java index e877c4f54..574d0dd1c 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/NlpRobertaTokenizationConfig.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/NlpRobertaTokenizationConfig.java @@ -22,15 +22,11 @@ import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.JsonpMapper; -import co.elastic.clients.json.JsonpSerializable; -import co.elastic.clients.json.JsonpUtils; import co.elastic.clients.json.ObjectBuilderDeserializer; import co.elastic.clients.json.ObjectDeserializer; import co.elastic.clients.util.ObjectBuilder; -import co.elastic.clients.util.WithJsonObjectBuilderBase; import jakarta.json.stream.JsonGenerator; import java.lang.Boolean; -import java.lang.Integer; import java.util.Objects; import java.util.function.Function; import javax.annotation.Nullable; @@ -60,31 +56,16 @@ * specification */ @JsonpDeserializable -public class NlpRobertaTokenizationConfig implements TokenizationConfigVariant, JsonpSerializable { +public class NlpRobertaTokenizationConfig extends CommonTokenizationConfig implements TokenizationConfigVariant { @Nullable private final Boolean addPrefixSpace; - @Nullable - private final Boolean withSpecialTokens; - - @Nullable - private final Integer maxSequenceLength; - - @Nullable - private final TokenizationTruncate truncate; - - @Nullable - private final Integer span; - // --------------------------------------------------------------------------------------------- private NlpRobertaTokenizationConfig(Builder builder) { + super(builder); this.addPrefixSpace = builder.addPrefixSpace; - this.withSpecialTokens = builder.withSpecialTokens; - this.maxSequenceLength = builder.maxSequenceLength; - this.truncate = builder.truncate; - this.span = builder.span; } @@ -110,89 +91,15 @@ public final Boolean addPrefixSpace() { return this.addPrefixSpace; } - /** - * Is tokenization completed with special tokens - *

- * API name: {@code with_special_tokens} - */ - @Nullable - public final Boolean withSpecialTokens() { - return this.withSpecialTokens; - } - - /** - * Maximum input sequence length for the model - *

- * API name: {@code max_sequence_length} - */ - @Nullable - public final Integer maxSequenceLength() { - return this.maxSequenceLength; - } - - /** - * Should tokenization input be automatically truncated before sending to the - * model for inference - *

- * API name: {@code truncate} - */ - @Nullable - public final TokenizationTruncate truncate() { - return this.truncate; - } - - /** - * Tokenization spanning options. Special value of -1 indicates no spanning - * takes place - *

- * API name: {@code span} - */ - @Nullable - public final Integer span() { - return this.span; - } - - /** - * Serialize this object to JSON. - */ - public void serialize(JsonGenerator generator, JsonpMapper mapper) { - generator.writeStartObject(); - serializeInternal(generator, mapper); - generator.writeEnd(); - } - protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + super.serializeInternal(generator, mapper); if (this.addPrefixSpace != null) { generator.writeKey("add_prefix_space"); generator.write(this.addPrefixSpace); } - if (this.withSpecialTokens != null) { - generator.writeKey("with_special_tokens"); - generator.write(this.withSpecialTokens); - - } - if (this.maxSequenceLength != null) { - generator.writeKey("max_sequence_length"); - generator.write(this.maxSequenceLength); - } - if (this.truncate != null) { - generator.writeKey("truncate"); - this.truncate.serialize(generator, mapper); - } - if (this.span != null) { - generator.writeKey("span"); - generator.write(this.span); - - } - - } - - @Override - public String toString() { - return JsonpUtils.toString(this); } // --------------------------------------------------------------------------------------------- @@ -201,24 +108,12 @@ public String toString() { * Builder for {@link NlpRobertaTokenizationConfig}. */ - public static class Builder extends WithJsonObjectBuilderBase + public static class Builder extends CommonTokenizationConfig.AbstractBuilder implements ObjectBuilder { @Nullable private Boolean addPrefixSpace; - @Nullable - private Boolean withSpecialTokens; - - @Nullable - private Integer maxSequenceLength; - - @Nullable - private TokenizationTruncate truncate; - - @Nullable - private Integer span; - /** * Should the tokenizer prefix input with a space character *

@@ -229,48 +124,6 @@ public final Builder addPrefixSpace(@Nullable Boolean value) { return this; } - /** - * Is tokenization completed with special tokens - *

- * API name: {@code with_special_tokens} - */ - public final Builder withSpecialTokens(@Nullable Boolean value) { - this.withSpecialTokens = value; - return this; - } - - /** - * Maximum input sequence length for the model - *

- * API name: {@code max_sequence_length} - */ - public final Builder maxSequenceLength(@Nullable Integer value) { - this.maxSequenceLength = value; - return this; - } - - /** - * Should tokenization input be automatically truncated before sending to the - * model for inference - *

- * API name: {@code truncate} - */ - public final Builder truncate(@Nullable TokenizationTruncate value) { - this.truncate = value; - return this; - } - - /** - * Tokenization spanning options. Special value of -1 indicates no spanning - * takes place - *

- * API name: {@code span} - */ - public final Builder span(@Nullable Integer value) { - this.span = value; - return this; - } - @Override protected Builder self() { return this; @@ -299,12 +152,8 @@ public NlpRobertaTokenizationConfig build() { protected static void setupNlpRobertaTokenizationConfigDeserializer( ObjectDeserializer op) { - + CommonTokenizationConfig.setupCommonTokenizationConfigDeserializer(op); op.add(Builder::addPrefixSpace, JsonpDeserializer.booleanDeserializer(), "add_prefix_space"); - op.add(Builder::withSpecialTokens, JsonpDeserializer.booleanDeserializer(), "with_special_tokens"); - op.add(Builder::maxSequenceLength, JsonpDeserializer.integerDeserializer(), "max_sequence_length"); - op.add(Builder::truncate, TokenizationTruncate._DESERIALIZER, "truncate"); - op.add(Builder::span, JsonpDeserializer.integerDeserializer(), "span"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/OverallBucket.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/OverallBucket.java index 7b8a68683..4e7b29b5f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/OverallBucket.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/OverallBucket.java @@ -76,6 +76,7 @@ public class OverallBucket implements JsonpSerializable { private final long timestamp; + @Nullable private final DateTime timestampString; // --------------------------------------------------------------------------------------------- @@ -88,7 +89,7 @@ private OverallBucket(Builder builder) { this.overallScore = ApiTypeHelper.requireNonNull(builder.overallScore, this, "overallScore"); this.resultType = ApiTypeHelper.requireNonNull(builder.resultType, this, "resultType"); this.timestamp = ApiTypeHelper.requireNonNull(builder.timestamp, this, "timestamp"); - this.timestampString = ApiTypeHelper.requireNonNull(builder.timestampString, this, "timestampString"); + this.timestampString = builder.timestampString; } @@ -154,11 +155,11 @@ public final long timestamp() { } /** - * Required - The start time of the bucket for which these results were - * calculated. + * The start time of the bucket for which these results were calculated. *

* API name: {@code timestamp_string} */ + @Nullable public final DateTime timestampString() { return this.timestampString; } @@ -199,8 +200,10 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.writeKey("timestamp"); generator.write(this.timestamp); - generator.writeKey("timestamp_string"); - this.timestampString.serialize(generator, mapper); + if (this.timestampString != null) { + generator.writeKey("timestamp_string"); + this.timestampString.serialize(generator, mapper); + } } @@ -228,6 +231,7 @@ public static class Builder extends WithJsonObjectBuilderBase implement private Long timestamp; + @Nullable private DateTime timestampString; /** @@ -319,12 +323,11 @@ public final Builder timestamp(long value) { } /** - * Required - The start time of the bucket for which these results were - * calculated. + * The start time of the bucket for which these results were calculated. *

* API name: {@code timestamp_string} */ - public final Builder timestampString(DateTime value) { + public final Builder timestampString(@Nullable DateTime value) { this.timestampString = value; return this; } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/PostDataResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/PostDataResponse.java index 9547d9da5..6380bf5b5 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/PostDataResponse.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/PostDataResponse.java @@ -30,7 +30,6 @@ import co.elastic.clients.util.ObjectBuilder; import co.elastic.clients.util.WithJsonObjectBuilderBase; import jakarta.json.stream.JsonGenerator; -import java.lang.Integer; import java.lang.Long; import java.lang.String; import java.util.Objects; @@ -61,60 +60,73 @@ */ @JsonpDeserializable public class PostDataResponse implements JsonpSerializable { - private final long bucketCount; + private final String jobId; - private final long earliestRecordTimestamp; + private final long processedRecordCount; - private final long emptyBucketCount; + private final long processedFieldCount; private final long inputBytes; private final long inputFieldCount; - private final long inputRecordCount; - private final long invalidDateCount; - private final String jobId; + private final long missingFieldCount; - private final int lastDataTime; + private final long outOfOrderTimestampCount; - private final long latestRecordTimestamp; + private final long emptyBucketCount; - private final long missingFieldCount; + private final long sparseBucketCount; - private final long outOfOrderTimestampCount; + private final long bucketCount; - private final long processedFieldCount; + @Nullable + private final Long earliestRecordTimestamp; - private final long processedRecordCount; + @Nullable + private final Long latestRecordTimestamp; - private final long sparseBucketCount; + @Nullable + private final Long lastDataTime; + + @Nullable + private final Long latestEmptyBucketTimestamp; + + @Nullable + private final Long latestSparseBucketTimestamp; + + private final long inputRecordCount; + + @Nullable + private final Long logTime; // --------------------------------------------------------------------------------------------- private PostDataResponse(Builder builder) { - this.bucketCount = ApiTypeHelper.requireNonNull(builder.bucketCount, this, "bucketCount"); - this.earliestRecordTimestamp = ApiTypeHelper.requireNonNull(builder.earliestRecordTimestamp, this, - "earliestRecordTimestamp"); - this.emptyBucketCount = ApiTypeHelper.requireNonNull(builder.emptyBucketCount, this, "emptyBucketCount"); + this.jobId = ApiTypeHelper.requireNonNull(builder.jobId, this, "jobId"); + this.processedRecordCount = ApiTypeHelper.requireNonNull(builder.processedRecordCount, this, + "processedRecordCount"); + this.processedFieldCount = ApiTypeHelper.requireNonNull(builder.processedFieldCount, this, + "processedFieldCount"); this.inputBytes = ApiTypeHelper.requireNonNull(builder.inputBytes, this, "inputBytes"); this.inputFieldCount = ApiTypeHelper.requireNonNull(builder.inputFieldCount, this, "inputFieldCount"); - this.inputRecordCount = ApiTypeHelper.requireNonNull(builder.inputRecordCount, this, "inputRecordCount"); this.invalidDateCount = ApiTypeHelper.requireNonNull(builder.invalidDateCount, this, "invalidDateCount"); - this.jobId = ApiTypeHelper.requireNonNull(builder.jobId, this, "jobId"); - this.lastDataTime = ApiTypeHelper.requireNonNull(builder.lastDataTime, this, "lastDataTime"); - this.latestRecordTimestamp = ApiTypeHelper.requireNonNull(builder.latestRecordTimestamp, this, - "latestRecordTimestamp"); this.missingFieldCount = ApiTypeHelper.requireNonNull(builder.missingFieldCount, this, "missingFieldCount"); this.outOfOrderTimestampCount = ApiTypeHelper.requireNonNull(builder.outOfOrderTimestampCount, this, "outOfOrderTimestampCount"); - this.processedFieldCount = ApiTypeHelper.requireNonNull(builder.processedFieldCount, this, - "processedFieldCount"); - this.processedRecordCount = ApiTypeHelper.requireNonNull(builder.processedRecordCount, this, - "processedRecordCount"); + this.emptyBucketCount = ApiTypeHelper.requireNonNull(builder.emptyBucketCount, this, "emptyBucketCount"); this.sparseBucketCount = ApiTypeHelper.requireNonNull(builder.sparseBucketCount, this, "sparseBucketCount"); + this.bucketCount = ApiTypeHelper.requireNonNull(builder.bucketCount, this, "bucketCount"); + this.earliestRecordTimestamp = builder.earliestRecordTimestamp; + this.latestRecordTimestamp = builder.latestRecordTimestamp; + this.lastDataTime = builder.lastDataTime; + this.latestEmptyBucketTimestamp = builder.latestEmptyBucketTimestamp; + this.latestSparseBucketTimestamp = builder.latestSparseBucketTimestamp; + this.inputRecordCount = ApiTypeHelper.requireNonNull(builder.inputRecordCount, this, "inputRecordCount"); + this.logTime = builder.logTime; } @@ -123,24 +135,24 @@ public static PostDataResponse of(Function implements ObjectBuilder { - private Long bucketCount; + private String jobId; - private Long earliestRecordTimestamp; + private Long processedRecordCount; - private Long emptyBucketCount; + private Long processedFieldCount; private Long inputBytes; private Long inputFieldCount; - private Long inputRecordCount; - private Long invalidDateCount; - private String jobId; + private Long missingFieldCount; + + private Long outOfOrderTimestampCount; + + private Long emptyBucketCount; + + private Long sparseBucketCount; - private Integer lastDataTime; + private Long bucketCount; + + @Nullable + private Long earliestRecordTimestamp; + @Nullable private Long latestRecordTimestamp; - private Long missingFieldCount; + @Nullable + private Long lastDataTime; - private Long outOfOrderTimestampCount; + @Nullable + private Long latestEmptyBucketTimestamp; - private Long processedFieldCount; + @Nullable + private Long latestSparseBucketTimestamp; - private Long processedRecordCount; + private Long inputRecordCount; - private Long sparseBucketCount; + @Nullable + private Long logTime; /** - * Required - API name: {@code bucket_count} + * Required - API name: {@code job_id} */ - public final Builder bucketCount(long value) { - this.bucketCount = value; + public final Builder jobId(String value) { + this.jobId = value; return this; } /** - * Required - API name: {@code earliest_record_timestamp} + * Required - API name: {@code processed_record_count} */ - public final Builder earliestRecordTimestamp(long value) { - this.earliestRecordTimestamp = value; + public final Builder processedRecordCount(long value) { + this.processedRecordCount = value; return this; } /** - * Required - API name: {@code empty_bucket_count} + * Required - API name: {@code processed_field_count} */ - public final Builder emptyBucketCount(long value) { - this.emptyBucketCount = value; + public final Builder processedFieldCount(long value) { + this.processedFieldCount = value; return this; } @@ -368,82 +441,106 @@ public final Builder inputFieldCount(long value) { } /** - * Required - API name: {@code input_record_count} + * Required - API name: {@code invalid_date_count} */ - public final Builder inputRecordCount(long value) { - this.inputRecordCount = value; + public final Builder invalidDateCount(long value) { + this.invalidDateCount = value; return this; } /** - * Required - API name: {@code invalid_date_count} + * Required - API name: {@code missing_field_count} */ - public final Builder invalidDateCount(long value) { - this.invalidDateCount = value; + public final Builder missingFieldCount(long value) { + this.missingFieldCount = value; return this; } /** - * Required - API name: {@code job_id} + * Required - API name: {@code out_of_order_timestamp_count} */ - public final Builder jobId(String value) { - this.jobId = value; + public final Builder outOfOrderTimestampCount(long value) { + this.outOfOrderTimestampCount = value; return this; } /** - * Required - API name: {@code last_data_time} + * Required - API name: {@code empty_bucket_count} */ - public final Builder lastDataTime(int value) { - this.lastDataTime = value; + public final Builder emptyBucketCount(long value) { + this.emptyBucketCount = value; return this; } /** - * Required - API name: {@code latest_record_timestamp} + * Required - API name: {@code sparse_bucket_count} */ - public final Builder latestRecordTimestamp(long value) { + public final Builder sparseBucketCount(long value) { + this.sparseBucketCount = value; + return this; + } + + /** + * Required - API name: {@code bucket_count} + */ + public final Builder bucketCount(long value) { + this.bucketCount = value; + return this; + } + + /** + * API name: {@code earliest_record_timestamp} + */ + public final Builder earliestRecordTimestamp(@Nullable Long value) { + this.earliestRecordTimestamp = value; + return this; + } + + /** + * API name: {@code latest_record_timestamp} + */ + public final Builder latestRecordTimestamp(@Nullable Long value) { this.latestRecordTimestamp = value; return this; } /** - * Required - API name: {@code missing_field_count} + * API name: {@code last_data_time} */ - public final Builder missingFieldCount(long value) { - this.missingFieldCount = value; + public final Builder lastDataTime(@Nullable Long value) { + this.lastDataTime = value; return this; } /** - * Required - API name: {@code out_of_order_timestamp_count} + * API name: {@code latest_empty_bucket_timestamp} */ - public final Builder outOfOrderTimestampCount(long value) { - this.outOfOrderTimestampCount = value; + public final Builder latestEmptyBucketTimestamp(@Nullable Long value) { + this.latestEmptyBucketTimestamp = value; return this; } /** - * Required - API name: {@code processed_field_count} + * API name: {@code latest_sparse_bucket_timestamp} */ - public final Builder processedFieldCount(long value) { - this.processedFieldCount = value; + public final Builder latestSparseBucketTimestamp(@Nullable Long value) { + this.latestSparseBucketTimestamp = value; return this; } /** - * Required - API name: {@code processed_record_count} + * Required - API name: {@code input_record_count} */ - public final Builder processedRecordCount(long value) { - this.processedRecordCount = value; + public final Builder inputRecordCount(long value) { + this.inputRecordCount = value; return this; } /** - * Required - API name: {@code sparse_bucket_count} + * API name: {@code log_time} */ - public final Builder sparseBucketCount(long value) { - this.sparseBucketCount = value; + public final Builder logTime(@Nullable Long value) { + this.logTime = value; return this; } @@ -475,21 +572,26 @@ public PostDataResponse build() { protected static void setupPostDataResponseDeserializer(ObjectDeserializer op) { - op.add(Builder::bucketCount, JsonpDeserializer.longDeserializer(), "bucket_count"); - op.add(Builder::earliestRecordTimestamp, JsonpDeserializer.longDeserializer(), "earliest_record_timestamp"); - op.add(Builder::emptyBucketCount, JsonpDeserializer.longDeserializer(), "empty_bucket_count"); + op.add(Builder::jobId, JsonpDeserializer.stringDeserializer(), "job_id"); + op.add(Builder::processedRecordCount, JsonpDeserializer.longDeserializer(), "processed_record_count"); + op.add(Builder::processedFieldCount, JsonpDeserializer.longDeserializer(), "processed_field_count"); op.add(Builder::inputBytes, JsonpDeserializer.longDeserializer(), "input_bytes"); op.add(Builder::inputFieldCount, JsonpDeserializer.longDeserializer(), "input_field_count"); - op.add(Builder::inputRecordCount, JsonpDeserializer.longDeserializer(), "input_record_count"); op.add(Builder::invalidDateCount, JsonpDeserializer.longDeserializer(), "invalid_date_count"); - op.add(Builder::jobId, JsonpDeserializer.stringDeserializer(), "job_id"); - op.add(Builder::lastDataTime, JsonpDeserializer.integerDeserializer(), "last_data_time"); - op.add(Builder::latestRecordTimestamp, JsonpDeserializer.longDeserializer(), "latest_record_timestamp"); op.add(Builder::missingFieldCount, JsonpDeserializer.longDeserializer(), "missing_field_count"); op.add(Builder::outOfOrderTimestampCount, JsonpDeserializer.longDeserializer(), "out_of_order_timestamp_count"); - op.add(Builder::processedFieldCount, JsonpDeserializer.longDeserializer(), "processed_field_count"); - op.add(Builder::processedRecordCount, JsonpDeserializer.longDeserializer(), "processed_record_count"); + op.add(Builder::emptyBucketCount, JsonpDeserializer.longDeserializer(), "empty_bucket_count"); op.add(Builder::sparseBucketCount, JsonpDeserializer.longDeserializer(), "sparse_bucket_count"); + op.add(Builder::bucketCount, JsonpDeserializer.longDeserializer(), "bucket_count"); + op.add(Builder::earliestRecordTimestamp, JsonpDeserializer.longDeserializer(), "earliest_record_timestamp"); + op.add(Builder::latestRecordTimestamp, JsonpDeserializer.longDeserializer(), "latest_record_timestamp"); + op.add(Builder::lastDataTime, JsonpDeserializer.longDeserializer(), "last_data_time"); + op.add(Builder::latestEmptyBucketTimestamp, JsonpDeserializer.longDeserializer(), + "latest_empty_bucket_timestamp"); + op.add(Builder::latestSparseBucketTimestamp, JsonpDeserializer.longDeserializer(), + "latest_sparse_bucket_timestamp"); + op.add(Builder::inputRecordCount, JsonpDeserializer.longDeserializer(), "input_record_count"); + op.add(Builder::logTime, JsonpDeserializer.longDeserializer(), "log_time"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/PutDataFrameAnalyticsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/PutDataFrameAnalyticsRequest.java index 850e50251..39f72296c 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/PutDataFrameAnalyticsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/PutDataFrameAnalyticsRequest.java @@ -21,6 +21,7 @@ import co.elastic.clients.elasticsearch._types.ErrorResponse; import co.elastic.clients.elasticsearch._types.RequestBase; +import co.elastic.clients.json.JsonData; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.JsonpMapper; @@ -71,6 +72,8 @@ */ @JsonpDeserializable public class PutDataFrameAnalyticsRequest extends RequestBase implements JsonpSerializable { + private final Map meta; + @Nullable private final Boolean allowLazyStart; @@ -103,6 +106,7 @@ public class PutDataFrameAnalyticsRequest extends RequestBase implements JsonpSe private PutDataFrameAnalyticsRequest(Builder builder) { + this.meta = ApiTypeHelper.unmodifiable(builder.meta); this.allowLazyStart = builder.allowLazyStart; this.analysis = ApiTypeHelper.requireNonNull(builder.analysis, this, "analysis"); this.analyzedFields = builder.analyzedFields; @@ -121,6 +125,13 @@ public static PutDataFrameAnalyticsRequest of(Function meta() { + return this.meta; + } + /** * Specifies whether this job can start when there is insufficient machine * learning node capacity for it to be immediately assigned to a node. If set to @@ -280,6 +291,17 @@ public void serialize(JsonGenerator generator, JsonpMapper mapper) { protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + if (ApiTypeHelper.isDefined(this.meta)) { + generator.writeKey("_meta"); + generator.writeStartObject(); + for (Map.Entry item0 : this.meta.entrySet()) { + generator.writeKey(item0.getKey()); + item0.getValue().serialize(generator, mapper); + + } + generator.writeEnd(); + + } if (this.allowLazyStart != null) { generator.writeKey("allow_lazy_start"); generator.write(this.allowLazyStart); @@ -349,6 +371,9 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { public static class Builder extends RequestBase.AbstractBuilder implements ObjectBuilder { + @Nullable + private Map meta; + @Nullable private Boolean allowLazyStart; @@ -378,6 +403,26 @@ public static class Builder extends RequestBase.AbstractBuilder @Nullable private String version; + /** + * API name: {@code _meta} + *

+ * Adds all entries of map to meta. + */ + public final Builder meta(Map map) { + this.meta = _mapPutAll(this.meta, map); + return this; + } + + /** + * API name: {@code _meta} + *

+ * Adds an entry to meta. + */ + public final Builder meta(String key, JsonData value) { + this.meta = _mapPut(this.meta, key, value); + return this; + } + /** * Specifies whether this job can start when there is insufficient machine * learning node capacity for it to be immediately assigned to a node. If set to @@ -642,6 +687,7 @@ public PutDataFrameAnalyticsRequest build() { protected static void setupPutDataFrameAnalyticsRequestDeserializer( ObjectDeserializer op) { + op.add(Builder::meta, JsonpDeserializer.stringMapDeserializer(JsonData._DESERIALIZER), "_meta"); op.add(Builder::allowLazyStart, JsonpDeserializer.booleanDeserializer(), "allow_lazy_start"); op.add(Builder::analysis, DataframeAnalysis._DESERIALIZER, "analysis"); op.add(Builder::analyzedFields, DataframeAnalysisAnalyzedFields._DESERIALIZER, "analyzed_fields"); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/PutDataFrameAnalyticsResponse.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/PutDataFrameAnalyticsResponse.java index 54b7fb42a..3d2d1ef31 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/PutDataFrameAnalyticsResponse.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/PutDataFrameAnalyticsResponse.java @@ -19,6 +19,7 @@ package co.elastic.clients.elasticsearch.ml; +import co.elastic.clients.json.JsonData; import co.elastic.clients.json.JsonpDeserializable; import co.elastic.clients.json.JsonpDeserializer; import co.elastic.clients.json.JsonpMapper; @@ -34,6 +35,7 @@ import java.lang.Integer; import java.lang.Long; import java.lang.String; +import java.util.Map; import java.util.Objects; import java.util.function.Function; import javax.annotation.Nullable; @@ -84,6 +86,8 @@ public class PutDataFrameAnalyticsResponse implements JsonpSerializable { private final int maxNumThreads; + private final Map meta; + private final String modelMemoryLimit; private final DataframeAnalyticsSource source; @@ -103,6 +107,7 @@ private PutDataFrameAnalyticsResponse(Builder builder) { this.dest = ApiTypeHelper.requireNonNull(builder.dest, this, "dest"); this.id = ApiTypeHelper.requireNonNull(builder.id, this, "id"); this.maxNumThreads = ApiTypeHelper.requireNonNull(builder.maxNumThreads, this, "maxNumThreads"); + this.meta = ApiTypeHelper.unmodifiable(builder.meta); this.modelMemoryLimit = ApiTypeHelper.requireNonNull(builder.modelMemoryLimit, this, "modelMemoryLimit"); this.source = ApiTypeHelper.requireNonNull(builder.source, this, "source"); this.version = ApiTypeHelper.requireNonNull(builder.version, this, "version"); @@ -179,6 +184,13 @@ public final int maxNumThreads() { return this.maxNumThreads; } + /** + * API name: {@code _meta} + */ + public final Map meta() { + return this.meta; + } + /** * Required - API name: {@code model_memory_limit} */ @@ -244,6 +256,17 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.writeKey("max_num_threads"); generator.write(this.maxNumThreads); + if (ApiTypeHelper.isDefined(this.meta)) { + generator.writeKey("_meta"); + generator.writeStartObject(); + for (Map.Entry item0 : this.meta.entrySet()) { + generator.writeKey(item0.getKey()); + item0.getValue().serialize(generator, mapper); + + } + generator.writeEnd(); + + } generator.writeKey("model_memory_limit"); generator.write(this.modelMemoryLimit); @@ -290,6 +313,9 @@ public static class Builder extends WithJsonObjectBuilderBase private Integer maxNumThreads; + @Nullable + private Map meta; + private String modelMemoryLimit; private DataframeAnalyticsSource source; @@ -399,6 +425,26 @@ public final Builder maxNumThreads(int value) { return this; } + /** + * API name: {@code _meta} + *

+ * Adds all entries of map to meta. + */ + public final Builder meta(Map map) { + this.meta = _mapPutAll(this.meta, map); + return this; + } + + /** + * API name: {@code _meta} + *

+ * Adds an entry to meta. + */ + public final Builder meta(String key, JsonData value) { + this.meta = _mapPut(this.meta, key, value); + return this; + } + /** * Required - API name: {@code model_memory_limit} */ @@ -469,6 +515,7 @@ protected static void setupPutDataFrameAnalyticsResponseDeserializer( op.add(Builder::dest, DataframeAnalyticsDestination._DESERIALIZER, "dest"); op.add(Builder::id, JsonpDeserializer.stringDeserializer(), "id"); op.add(Builder::maxNumThreads, JsonpDeserializer.integerDeserializer(), "max_num_threads"); + op.add(Builder::meta, JsonpDeserializer.stringMapDeserializer(JsonData._DESERIALIZER), "_meta"); op.add(Builder::modelMemoryLimit, JsonpDeserializer.stringDeserializer(), "model_memory_limit"); op.add(Builder::source, DataframeAnalyticsSource._DESERIALIZER, "source"); op.add(Builder::version, JsonpDeserializer.stringDeserializer(), "version"); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/PutDatafeedRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/PutDatafeedRequest.java index 686bc9c5c..959991750 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/PutDatafeedRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/PutDatafeedRequest.java @@ -1048,7 +1048,7 @@ public PutDatafeedRequest build() { protected static void setupPutDatafeedRequestDeserializer(ObjectDeserializer op) { op.add(Builder::aggregations, JsonpDeserializer.stringMapDeserializer(Aggregation._DESERIALIZER), - "aggregations"); + "aggregations", "aggs"); op.add(Builder::chunkingConfig, ChunkingConfig._DESERIALIZER, "chunking_config"); op.add(Builder::delayedDataCheckConfig, DelayedDataCheckConfig._DESERIALIZER, "delayed_data_check_config"); op.add(Builder::frequency, Time._DESERIALIZER, "frequency"); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/PutJobRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/PutJobRequest.java index ab043a7a2..462bb653d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/PutJobRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/PutJobRequest.java @@ -20,6 +20,7 @@ package co.elastic.clients.elasticsearch.ml; import co.elastic.clients.elasticsearch._types.ErrorResponse; +import co.elastic.clients.elasticsearch._types.ExpandWildcard; import co.elastic.clients.elasticsearch._types.RequestBase; import co.elastic.clients.elasticsearch._types.Time; import co.elastic.clients.json.JsonData; @@ -37,12 +38,12 @@ import java.lang.Boolean; import java.lang.Long; import java.lang.String; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.function.Function; +import java.util.stream.Collectors; import javax.annotation.Nullable; //---------------------------------------------------------------- @@ -75,6 +76,9 @@ public class PutJobRequest extends RequestBase implements JsonpSerializable { @Nullable private final Boolean allowLazyOpen; + @Nullable + private final Boolean allowNoIndices; + private final AnalysisConfig analysisConfig; @Nullable @@ -97,8 +101,16 @@ public class PutJobRequest extends RequestBase implements JsonpSerializable { @Nullable private final String description; + private final List expandWildcards; + private final List groups; + @Nullable + private final Boolean ignoreThrottled; + + @Nullable + private final Boolean ignoreUnavailable; + private final String jobId; @Nullable @@ -121,6 +133,7 @@ public class PutJobRequest extends RequestBase implements JsonpSerializable { private PutJobRequest(Builder builder) { this.allowLazyOpen = builder.allowLazyOpen; + this.allowNoIndices = builder.allowNoIndices; this.analysisConfig = ApiTypeHelper.requireNonNull(builder.analysisConfig, this, "analysisConfig"); this.analysisLimits = builder.analysisLimits; this.backgroundPersistInterval = builder.backgroundPersistInterval; @@ -129,7 +142,10 @@ private PutJobRequest(Builder builder) { this.dataDescription = ApiTypeHelper.requireNonNull(builder.dataDescription, this, "dataDescription"); this.datafeedConfig = builder.datafeedConfig; this.description = builder.description; + this.expandWildcards = ApiTypeHelper.unmodifiable(builder.expandWildcards); this.groups = ApiTypeHelper.unmodifiable(builder.groups); + this.ignoreThrottled = builder.ignoreThrottled; + this.ignoreUnavailable = builder.ignoreUnavailable; this.jobId = ApiTypeHelper.requireNonNull(builder.jobId, this, "jobId"); this.modelPlotConfig = builder.modelPlotConfig; this.modelSnapshotRetentionDays = builder.modelSnapshotRetentionDays; @@ -161,6 +177,18 @@ public final Boolean allowLazyOpen() { return this.allowLazyOpen; } + /** + * If true, wildcard indices expressions that resolve into no + * concrete indices are ignored. This includes the _all string or + * when no indices are specified. + *

+ * API name: {@code allow_no_indices} + */ + @Nullable + public final Boolean allowNoIndices() { + return this.allowNoIndices; + } + /** * Required - Specifies how to analyze the data. After you create a job, you * cannot change the analysis configuration; all the properties are @@ -261,6 +289,28 @@ public final String description() { return this.description; } + /** + * Type of index that wildcard patterns can match. If the request can target + * data streams, this argument determines whether wildcard expressions match + * hidden data streams. Supports comma-separated values. Valid values are: + *

    + *
  • all: Match any data stream or index, including hidden + * ones.
  • + *
  • closed: Match closed, non-hidden indices. Also matches any + * non-hidden data stream. Data streams cannot be closed.
  • + *
  • hidden: Match hidden data streams and hidden indices. Must + * be combined with open, closed, or both.
  • + *
  • none: Wildcard patterns are not accepted.
  • + *
  • open: Match open, non-hidden indices. Also matches any + * non-hidden data stream.
  • + *
+ *

+ * API name: {@code expand_wildcards} + */ + public final List expandWildcards() { + return this.expandWildcards; + } + /** * A list of job groups. A job can belong to no groups or many. *

@@ -270,6 +320,30 @@ public final List groups() { return this.groups; } + /** + * If true, concrete, expanded or aliased indices are ignored when + * frozen. + *

+ * API name: {@code ignore_throttled} + * + * @deprecated 7.16.0 + */ + @Deprecated + @Nullable + public final Boolean ignoreThrottled() { + return this.ignoreThrottled; + } + + /** + * If true, unavailable indices (missing or closed) are ignored. + *

+ * API name: {@code ignore_unavailable} + */ + @Nullable + public final Boolean ignoreUnavailable() { + return this.ignoreUnavailable; + } + /** * Required - The identifier for the anomaly detection job. This identifier can * contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and @@ -453,6 +527,9 @@ public static class Builder extends RequestBase.AbstractBuilder impleme @Nullable private Boolean allowLazyOpen; + @Nullable + private Boolean allowNoIndices; + private AnalysisConfig analysisConfig; @Nullable @@ -475,9 +552,18 @@ public static class Builder extends RequestBase.AbstractBuilder impleme @Nullable private String description; + @Nullable + private List expandWildcards; + @Nullable private List groups; + @Nullable + private Boolean ignoreThrottled; + + @Nullable + private Boolean ignoreUnavailable; + private String jobId; @Nullable @@ -513,6 +599,18 @@ public final Builder allowLazyOpen(@Nullable Boolean value) { return this; } + /** + * If true, wildcard indices expressions that resolve into no + * concrete indices are ignored. This includes the _all string or + * when no indices are specified. + *

+ * API name: {@code allow_no_indices} + */ + public final Builder allowNoIndices(@Nullable Boolean value) { + this.allowNoIndices = value; + return this; + } + /** * Required - Specifies how to analyze the data. After you create a job, you * cannot change the analysis configuration; all the properties are @@ -678,6 +776,56 @@ public final Builder description(@Nullable String value) { return this; } + /** + * Type of index that wildcard patterns can match. If the request can target + * data streams, this argument determines whether wildcard expressions match + * hidden data streams. Supports comma-separated values. Valid values are: + *

    + *
  • all: Match any data stream or index, including hidden + * ones.
  • + *
  • closed: Match closed, non-hidden indices. Also matches any + * non-hidden data stream. Data streams cannot be closed.
  • + *
  • hidden: Match hidden data streams and hidden indices. Must + * be combined with open, closed, or both.
  • + *
  • none: Wildcard patterns are not accepted.
  • + *
  • open: Match open, non-hidden indices. Also matches any + * non-hidden data stream.
  • + *
+ *

+ * API name: {@code expand_wildcards} + *

+ * Adds all elements of list to expandWildcards. + */ + public final Builder expandWildcards(List list) { + this.expandWildcards = _listAddAll(this.expandWildcards, list); + return this; + } + + /** + * Type of index that wildcard patterns can match. If the request can target + * data streams, this argument determines whether wildcard expressions match + * hidden data streams. Supports comma-separated values. Valid values are: + *

    + *
  • all: Match any data stream or index, including hidden + * ones.
  • + *
  • closed: Match closed, non-hidden indices. Also matches any + * non-hidden data stream. Data streams cannot be closed.
  • + *
  • hidden: Match hidden data streams and hidden indices. Must + * be combined with open, closed, or both.
  • + *
  • none: Wildcard patterns are not accepted.
  • + *
  • open: Match open, non-hidden indices. Also matches any + * non-hidden data stream.
  • + *
+ *

+ * API name: {@code expand_wildcards} + *

+ * Adds one or more values to expandWildcards. + */ + public final Builder expandWildcards(ExpandWildcard value, ExpandWildcard... values) { + this.expandWildcards = _listAdd(this.expandWildcards, value, values); + return this; + } + /** * A list of job groups. A job can belong to no groups or many. *

@@ -702,6 +850,30 @@ public final Builder groups(String value, String... values) { return this; } + /** + * If true, concrete, expanded or aliased indices are ignored when + * frozen. + *

+ * API name: {@code ignore_throttled} + * + * @deprecated 7.16.0 + */ + @Deprecated + public final Builder ignoreThrottled(@Nullable Boolean value) { + this.ignoreThrottled = value; + return this; + } + + /** + * If true, unavailable indices (missing or closed) are ignored. + *

+ * API name: {@code ignore_unavailable} + */ + public final Builder ignoreUnavailable(@Nullable Boolean value) { + this.ignoreUnavailable = value; + return this; + } + /** * Required - The identifier for the anomaly detection job. This identifier can * contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and @@ -902,7 +1074,21 @@ protected static void setupPutJobRequestDeserializer(ObjectDeserializer { - return Collections.emptyMap(); + Map params = new HashMap<>(); + if (ApiTypeHelper.isDefined(request.expandWildcards)) { + params.put("expand_wildcards", + request.expandWildcards.stream().map(v -> v.jsonValue()).collect(Collectors.joining(","))); + } + if (request.ignoreUnavailable != null) { + params.put("ignore_unavailable", String.valueOf(request.ignoreUnavailable)); + } + if (request.allowNoIndices != null) { + params.put("allow_no_indices", String.valueOf(request.allowNoIndices)); + } + if (request.ignoreThrottled != null) { + params.put("ignore_throttled", String.valueOf(request.ignoreThrottled)); + } + return params; }, SimpleEndpoint.emptyMap(), true, PutJobResponse._DESERIALIZER); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TextEmbeddingInferenceOptions.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TextEmbeddingInferenceOptions.java index 4c852b865..c5a827d6b 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TextEmbeddingInferenceOptions.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TextEmbeddingInferenceOptions.java @@ -26,6 +26,7 @@ import co.elastic.clients.json.JsonpUtils; import co.elastic.clients.json.ObjectBuilderDeserializer; import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; import co.elastic.clients.util.ObjectBuilder; import co.elastic.clients.util.WithJsonObjectBuilderBase; import jakarta.json.stream.JsonGenerator; @@ -70,6 +71,8 @@ public class TextEmbeddingInferenceOptions implements InferenceConfigCreateVaria @Nullable private final String resultsField; + private final Vocabulary vocabulary; + // --------------------------------------------------------------------------------------------- private TextEmbeddingInferenceOptions(Builder builder) { @@ -77,6 +80,7 @@ private TextEmbeddingInferenceOptions(Builder builder) { this.embeddingSize = builder.embeddingSize; this.tokenization = builder.tokenization; this.resultsField = builder.resultsField; + this.vocabulary = ApiTypeHelper.requireNonNull(builder.vocabulary, this, "vocabulary"); } @@ -123,6 +127,13 @@ public final String resultsField() { return this.resultsField; } + /** + * Required - API name: {@code vocabulary} + */ + public final Vocabulary vocabulary() { + return this.vocabulary; + } + /** * Serialize this object to JSON. */ @@ -149,6 +160,8 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.write(this.resultsField); } + generator.writeKey("vocabulary"); + this.vocabulary.serialize(generator, mapper); } @@ -175,6 +188,8 @@ public static class Builder extends WithJsonObjectBuilderBase @Nullable private String resultsField; + private Vocabulary vocabulary; + /** * The number of dimensions in the embedding output *

@@ -215,6 +230,21 @@ public final Builder resultsField(@Nullable String value) { return this; } + /** + * Required - API name: {@code vocabulary} + */ + public final Builder vocabulary(Vocabulary value) { + this.vocabulary = value; + return this; + } + + /** + * Required - API name: {@code vocabulary} + */ + public final Builder vocabulary(Function> fn) { + return this.vocabulary(fn.apply(new Vocabulary.Builder()).build()); + } + @Override protected Builder self() { return this; @@ -247,6 +277,7 @@ protected static void setupTextEmbeddingInferenceOptionsDeserializer( op.add(Builder::embeddingSize, JsonpDeserializer.integerDeserializer(), "embedding_size"); op.add(Builder::tokenization, TokenizationConfig._DESERIALIZER, "tokenization"); op.add(Builder::resultsField, JsonpDeserializer.stringDeserializer(), "results_field"); + op.add(Builder::vocabulary, Vocabulary._DESERIALIZER, "vocabulary"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TextExpansionInferenceOptions.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TextExpansionInferenceOptions.java index bb434cefd..797e8a13c 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TextExpansionInferenceOptions.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TextExpansionInferenceOptions.java @@ -26,6 +26,7 @@ import co.elastic.clients.json.JsonpUtils; import co.elastic.clients.json.ObjectBuilderDeserializer; import co.elastic.clients.json.ObjectDeserializer; +import co.elastic.clients.util.ApiTypeHelper; import co.elastic.clients.util.ObjectBuilder; import co.elastic.clients.util.WithJsonObjectBuilderBase; import jakarta.json.stream.JsonGenerator; @@ -66,12 +67,15 @@ public class TextExpansionInferenceOptions implements InferenceConfigCreateVaria @Nullable private final String resultsField; + private final Vocabulary vocabulary; + // --------------------------------------------------------------------------------------------- private TextExpansionInferenceOptions(Builder builder) { this.tokenization = builder.tokenization; this.resultsField = builder.resultsField; + this.vocabulary = ApiTypeHelper.requireNonNull(builder.vocabulary, this, "vocabulary"); } @@ -108,6 +112,13 @@ public final String resultsField() { return this.resultsField; } + /** + * Required - API name: {@code vocabulary} + */ + public final Vocabulary vocabulary() { + return this.vocabulary; + } + /** * Serialize this object to JSON. */ @@ -129,6 +140,8 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.write(this.resultsField); } + generator.writeKey("vocabulary"); + this.vocabulary.serialize(generator, mapper); } @@ -152,6 +165,8 @@ public static class Builder extends WithJsonObjectBuilderBase @Nullable private String resultsField; + private Vocabulary vocabulary; + /** * The tokenization options *

@@ -182,6 +197,21 @@ public final Builder resultsField(@Nullable String value) { return this; } + /** + * Required - API name: {@code vocabulary} + */ + public final Builder vocabulary(Vocabulary value) { + this.vocabulary = value; + return this; + } + + /** + * Required - API name: {@code vocabulary} + */ + public final Builder vocabulary(Function> fn) { + return this.vocabulary(fn.apply(new Vocabulary.Builder()).build()); + } + @Override protected Builder self() { return this; @@ -213,6 +243,7 @@ protected static void setupTextExpansionInferenceOptionsDeserializer( op.add(Builder::tokenization, TokenizationConfig._DESERIALIZER, "tokenization"); op.add(Builder::resultsField, JsonpDeserializer.stringDeserializer(), "results_field"); + op.add(Builder::vocabulary, Vocabulary._DESERIALIZER, "vocabulary"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TokenizationConfig.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TokenizationConfig.java index 3307d59fa..854c0c3f0 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TokenizationConfig.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TokenizationConfig.java @@ -76,6 +76,8 @@ public class TokenizationConfig implements TaggedUnion bert( return this.bert(fn.apply(new NlpBertTokenizationConfig.Builder()).build()); } + public ObjectBuilder bertJa(NlpBertTokenizationConfig v) { + this._kind = Kind.BertJa; + this._value = v; + return this; + } + + public ObjectBuilder bertJa( + Function> fn) { + return this.bertJa(fn.apply(new NlpBertTokenizationConfig.Builder()).build()); + } + public ObjectBuilder mpnet(NlpBertTokenizationConfig v) { this._kind = Kind.Mpnet; this._value = v; @@ -249,6 +279,7 @@ public TokenizationConfig build() { protected static void setupTokenizationConfigDeserializer(ObjectDeserializer op) { op.add(Builder::bert, NlpBertTokenizationConfig._DESERIALIZER, "bert"); + op.add(Builder::bertJa, NlpBertTokenizationConfig._DESERIALIZER, "bert_ja"); op.add(Builder::mpnet, NlpBertTokenizationConfig._DESERIALIZER, "mpnet"); op.add(Builder::roberta, NlpRobertaTokenizationConfig._DESERIALIZER, "roberta"); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TokenizationConfigBuilders.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TokenizationConfigBuilders.java index b9259153e..9c50e0beb 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TokenizationConfigBuilders.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TokenizationConfigBuilders.java @@ -63,6 +63,25 @@ public static TokenizationConfig bert( return builder.build(); } + /** + * Creates a builder for the {@link NlpBertTokenizationConfig bert_ja} + * {@code TokenizationConfig} variant. + */ + public static NlpBertTokenizationConfig.Builder bertJa() { + return new NlpBertTokenizationConfig.Builder(); + } + + /** + * Creates a TokenizationConfig of the {@link NlpBertTokenizationConfig bert_ja} + * {@code TokenizationConfig} variant. + */ + public static TokenizationConfig bertJa( + Function> fn) { + TokenizationConfig.Builder builder = new TokenizationConfig.Builder(); + builder.bertJa(fn.apply(new NlpBertTokenizationConfig.Builder()).build()); + return builder.build(); + } + /** * Creates a builder for the {@link NlpBertTokenizationConfig mpnet} * {@code TokenizationConfig} variant. diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TrainedModelAssignment.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TrainedModelAssignment.java index 3d2416596..d0f4aa016 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TrainedModelAssignment.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TrainedModelAssignment.java @@ -63,11 +63,17 @@ */ @JsonpDeserializable public class TrainedModelAssignment implements JsonpSerializable { + @Nullable + private final AdaptiveAllocationsSettings adaptiveAllocations; + private final DeploymentAssignmentState assignmentState; @Nullable private final Integer maxAssignedAllocations; + @Nullable + private final String reason; + private final Map routingTable; private final DateTime startTime; @@ -78,8 +84,10 @@ public class TrainedModelAssignment implements JsonpSerializable { private TrainedModelAssignment(Builder builder) { + this.adaptiveAllocations = builder.adaptiveAllocations; this.assignmentState = ApiTypeHelper.requireNonNull(builder.assignmentState, this, "assignmentState"); this.maxAssignedAllocations = builder.maxAssignedAllocations; + this.reason = builder.reason; this.routingTable = ApiTypeHelper.unmodifiableRequired(builder.routingTable, this, "routingTable"); this.startTime = ApiTypeHelper.requireNonNull(builder.startTime, this, "startTime"); this.taskParameters = ApiTypeHelper.requireNonNull(builder.taskParameters, this, "taskParameters"); @@ -90,6 +98,14 @@ public static TrainedModelAssignment of(Function @@ -107,6 +123,14 @@ public final Integer maxAssignedAllocations() { return this.maxAssignedAllocations; } + /** + * API name: {@code reason} + */ + @Nullable + public final String reason() { + return this.reason; + } + /** * Required - The allocation state for each node. *

@@ -143,12 +167,22 @@ public void serialize(JsonGenerator generator, JsonpMapper mapper) { protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + if (this.adaptiveAllocations != null) { + generator.writeKey("adaptive_allocations"); + this.adaptiveAllocations.serialize(generator, mapper); + + } generator.writeKey("assignment_state"); this.assignmentState.serialize(generator, mapper); if (this.maxAssignedAllocations != null) { generator.writeKey("max_assigned_allocations"); generator.write(this.maxAssignedAllocations); + } + if (this.reason != null) { + generator.writeKey("reason"); + generator.write(this.reason); + } if (ApiTypeHelper.isDefined(this.routingTable)) { generator.writeKey("routing_table"); @@ -182,17 +216,39 @@ public String toString() { public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { + @Nullable + private AdaptiveAllocationsSettings adaptiveAllocations; + private DeploymentAssignmentState assignmentState; @Nullable private Integer maxAssignedAllocations; + @Nullable + private String reason; + private Map routingTable; private DateTime startTime; private TrainedModelAssignmentTaskParameters taskParameters; + /** + * API name: {@code adaptive_allocations} + */ + public final Builder adaptiveAllocations(@Nullable AdaptiveAllocationsSettings value) { + this.adaptiveAllocations = value; + return this; + } + + /** + * API name: {@code adaptive_allocations} + */ + public final Builder adaptiveAllocations( + Function> fn) { + return this.adaptiveAllocations(fn.apply(new AdaptiveAllocationsSettings.Builder()).build()); + } + /** * Required - The overall assignment state. *

@@ -211,6 +267,14 @@ public final Builder maxAssignedAllocations(@Nullable Integer value) { return this; } + /** + * API name: {@code reason} + */ + public final Builder reason(@Nullable String value) { + this.reason = value; + return this; + } + /** * Required - The allocation state for each node. *

@@ -302,8 +366,10 @@ public TrainedModelAssignment build() { protected static void setupTrainedModelAssignmentDeserializer( ObjectDeserializer op) { + op.add(Builder::adaptiveAllocations, AdaptiveAllocationsSettings._DESERIALIZER, "adaptive_allocations"); op.add(Builder::assignmentState, DeploymentAssignmentState._DESERIALIZER, "assignment_state"); op.add(Builder::maxAssignedAllocations, JsonpDeserializer.integerDeserializer(), "max_assigned_allocations"); + op.add(Builder::reason, JsonpDeserializer.stringDeserializer(), "reason"); op.add(Builder::routingTable, JsonpDeserializer.stringMapDeserializer(TrainedModelAssignmentRoutingTable._DESERIALIZER), "routing_table"); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TrainedModelAssignmentRoutingTable.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TrainedModelAssignmentRoutingTable.java index 9baf48b0e..b2b08082b 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TrainedModelAssignmentRoutingTable.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TrainedModelAssignmentRoutingTable.java @@ -61,6 +61,7 @@ */ @JsonpDeserializable public class TrainedModelAssignmentRoutingTable implements JsonpSerializable { + @Nullable private final String reason; private final RoutingState routingState; @@ -73,7 +74,7 @@ public class TrainedModelAssignmentRoutingTable implements JsonpSerializable { private TrainedModelAssignmentRoutingTable(Builder builder) { - this.reason = ApiTypeHelper.requireNonNull(builder.reason, this, "reason"); + this.reason = builder.reason; this.routingState = ApiTypeHelper.requireNonNull(builder.routingState, this, "routingState"); this.currentAllocations = ApiTypeHelper.requireNonNull(builder.currentAllocations, this, "currentAllocations"); this.targetAllocations = ApiTypeHelper.requireNonNull(builder.targetAllocations, this, "targetAllocations"); @@ -86,11 +87,12 @@ public static TrainedModelAssignmentRoutingTable of( } /** - * Required - The reason for the current state. It is usually populated only - * when the routing_state is failed. + * The reason for the current state. It is usually populated only when the + * routing_state is failed. *

* API name: {@code reason} */ + @Nullable public final String reason() { return this.reason; } @@ -133,9 +135,11 @@ public void serialize(JsonGenerator generator, JsonpMapper mapper) { protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { - generator.writeKey("reason"); - generator.write(this.reason); + if (this.reason != null) { + generator.writeKey("reason"); + generator.write(this.reason); + } generator.writeKey("routing_state"); this.routingState.serialize(generator, mapper); generator.writeKey("current_allocations"); @@ -160,6 +164,7 @@ public String toString() { public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { + @Nullable private String reason; private RoutingState routingState; @@ -169,12 +174,12 @@ public static class Builder extends WithJsonObjectBuilderBase private Integer targetAllocations; /** - * Required - The reason for the current state. It is usually populated only - * when the routing_state is failed. + * The reason for the current state. It is usually populated only when the + * routing_state is failed. *

* API name: {@code reason} */ - public final Builder reason(String value) { + public final Builder reason(@Nullable String value) { this.reason = value; return this; } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TrainedModelAssignmentTaskParameters.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TrainedModelAssignmentTaskParameters.java index 7d6adc82b..2dbac95d5 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TrainedModelAssignmentTaskParameters.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TrainedModelAssignmentTaskParameters.java @@ -61,18 +61,23 @@ */ @JsonpDeserializable public class TrainedModelAssignmentTaskParameters implements JsonpSerializable { - private final int modelBytes; + private final String modelBytes; private final String modelId; private final String deploymentId; + @Nullable private final String cacheSize; private final int numberOfAllocations; private final TrainingPriority priority; + private final String perDeploymentMemoryBytes; + + private final String perAllocationMemoryBytes; + private final int queueCapacity; private final int threadsPerAllocation; @@ -84,10 +89,14 @@ private TrainedModelAssignmentTaskParameters(Builder builder) { this.modelBytes = ApiTypeHelper.requireNonNull(builder.modelBytes, this, "modelBytes"); this.modelId = ApiTypeHelper.requireNonNull(builder.modelId, this, "modelId"); this.deploymentId = ApiTypeHelper.requireNonNull(builder.deploymentId, this, "deploymentId"); - this.cacheSize = ApiTypeHelper.requireNonNull(builder.cacheSize, this, "cacheSize"); + this.cacheSize = builder.cacheSize; this.numberOfAllocations = ApiTypeHelper.requireNonNull(builder.numberOfAllocations, this, "numberOfAllocations"); this.priority = ApiTypeHelper.requireNonNull(builder.priority, this, "priority"); + this.perDeploymentMemoryBytes = ApiTypeHelper.requireNonNull(builder.perDeploymentMemoryBytes, this, + "perDeploymentMemoryBytes"); + this.perAllocationMemoryBytes = ApiTypeHelper.requireNonNull(builder.perAllocationMemoryBytes, this, + "perAllocationMemoryBytes"); this.queueCapacity = ApiTypeHelper.requireNonNull(builder.queueCapacity, this, "queueCapacity"); this.threadsPerAllocation = ApiTypeHelper.requireNonNull(builder.threadsPerAllocation, this, "threadsPerAllocation"); @@ -104,7 +113,7 @@ public static TrainedModelAssignmentTaskParameters of( *

* API name: {@code model_bytes} */ - public final int modelBytes() { + public final String modelBytes() { return this.modelBytes; } @@ -127,10 +136,11 @@ public final String deploymentId() { } /** - * Required - The size of the trained model cache. + * The size of the trained model cache. *

* API name: {@code cache_size} */ + @Nullable public final String cacheSize() { return this.cacheSize; } @@ -152,6 +162,20 @@ public final TrainingPriority priority() { return this.priority; } + /** + * Required - API name: {@code per_deployment_memory_bytes} + */ + public final String perDeploymentMemoryBytes() { + return this.perDeploymentMemoryBytes; + } + + /** + * Required - API name: {@code per_allocation_memory_bytes} + */ + public final String perAllocationMemoryBytes() { + return this.perAllocationMemoryBytes; + } + /** * Required - Number of inference requests are allowed in the queue at a time. *

@@ -190,14 +214,22 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.writeKey("deployment_id"); generator.write(this.deploymentId); - generator.writeKey("cache_size"); - generator.write(this.cacheSize); + if (this.cacheSize != null) { + generator.writeKey("cache_size"); + generator.write(this.cacheSize); + } generator.writeKey("number_of_allocations"); generator.write(this.numberOfAllocations); generator.writeKey("priority"); this.priority.serialize(generator, mapper); + generator.writeKey("per_deployment_memory_bytes"); + generator.write(this.perDeploymentMemoryBytes); + + generator.writeKey("per_allocation_memory_bytes"); + generator.write(this.perAllocationMemoryBytes); + generator.writeKey("queue_capacity"); generator.write(this.queueCapacity); @@ -220,18 +252,23 @@ public String toString() { public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { - private Integer modelBytes; + private String modelBytes; private String modelId; private String deploymentId; + @Nullable private String cacheSize; private Integer numberOfAllocations; private TrainingPriority priority; + private String perDeploymentMemoryBytes; + + private String perAllocationMemoryBytes; + private Integer queueCapacity; private Integer threadsPerAllocation; @@ -241,7 +278,7 @@ public static class Builder extends WithJsonObjectBuilderBase *

* API name: {@code model_bytes} */ - public final Builder modelBytes(int value) { + public final Builder modelBytes(String value) { this.modelBytes = value; return this; } @@ -267,11 +304,11 @@ public final Builder deploymentId(String value) { } /** - * Required - The size of the trained model cache. + * The size of the trained model cache. *

* API name: {@code cache_size} */ - public final Builder cacheSize(String value) { + public final Builder cacheSize(@Nullable String value) { this.cacheSize = value; return this; } @@ -295,6 +332,22 @@ public final Builder priority(TrainingPriority value) { return this; } + /** + * Required - API name: {@code per_deployment_memory_bytes} + */ + public final Builder perDeploymentMemoryBytes(String value) { + this.perDeploymentMemoryBytes = value; + return this; + } + + /** + * Required - API name: {@code per_allocation_memory_bytes} + */ + public final Builder perAllocationMemoryBytes(String value) { + this.perAllocationMemoryBytes = value; + return this; + } + /** * Required - Number of inference requests are allowed in the queue at a time. *

@@ -345,12 +398,16 @@ public TrainedModelAssignmentTaskParameters build() { protected static void setupTrainedModelAssignmentTaskParametersDeserializer( ObjectDeserializer op) { - op.add(Builder::modelBytes, JsonpDeserializer.integerDeserializer(), "model_bytes"); + op.add(Builder::modelBytes, JsonpDeserializer.stringDeserializer(), "model_bytes"); op.add(Builder::modelId, JsonpDeserializer.stringDeserializer(), "model_id"); op.add(Builder::deploymentId, JsonpDeserializer.stringDeserializer(), "deployment_id"); op.add(Builder::cacheSize, JsonpDeserializer.stringDeserializer(), "cache_size"); op.add(Builder::numberOfAllocations, JsonpDeserializer.integerDeserializer(), "number_of_allocations"); op.add(Builder::priority, TrainingPriority._DESERIALIZER, "priority"); + op.add(Builder::perDeploymentMemoryBytes, JsonpDeserializer.stringDeserializer(), + "per_deployment_memory_bytes"); + op.add(Builder::perAllocationMemoryBytes, JsonpDeserializer.stringDeserializer(), + "per_allocation_memory_bytes"); op.add(Builder::queueCapacity, JsonpDeserializer.integerDeserializer(), "queue_capacity"); op.add(Builder::threadsPerAllocation, JsonpDeserializer.integerDeserializer(), "threads_per_allocation"); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TrainedModelConfig.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TrainedModelConfig.java index 7340a8e2f..e0a7518d4 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TrainedModelConfig.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TrainedModelConfig.java @@ -111,6 +111,9 @@ public class TrainedModelConfig implements JsonpSerializable { @Nullable private final String modelSizeBytes; + @Nullable + private final ModelPackageConfig modelPackage; + @Nullable private final TrainedModelLocation location; @@ -138,6 +141,7 @@ protected TrainedModelConfig(AbstractBuilder builder) { this.licenseLevel = builder.licenseLevel; this.metadata = builder.metadata; this.modelSizeBytes = builder.modelSizeBytes; + this.modelPackage = builder.modelPackage; this.location = builder.location; this.prefixStrings = builder.prefixStrings; @@ -314,6 +318,14 @@ public final String modelSizeBytes() { return this.modelSizeBytes; } + /** + * API name: {@code model_package} + */ + @Nullable + public final ModelPackageConfig modelPackage() { + return this.modelPackage; + } + /** * API name: {@code location} */ @@ -430,6 +442,11 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.writeKey("model_size_bytes"); generator.write(this.modelSizeBytes); + } + if (this.modelPackage != null) { + generator.writeKey("model_package"); + this.modelPackage.serialize(generator, mapper); + } if (this.location != null) { generator.writeKey("location"); @@ -527,6 +544,9 @@ public abstract static class AbstractBuilder> fn) { + return this.modelPackage(fn.apply(new ModelPackageConfig.Builder()).build()); + } + /** * API name: {@code location} */ @@ -833,6 +868,7 @@ protected static > void setupTrainedM op.add(AbstractBuilder::licenseLevel, JsonpDeserializer.stringDeserializer(), "license_level"); op.add(AbstractBuilder::metadata, TrainedModelConfigMetadata._DESERIALIZER, "metadata"); op.add(AbstractBuilder::modelSizeBytes, JsonpDeserializer.stringDeserializer(), "model_size_bytes"); + op.add(AbstractBuilder::modelPackage, ModelPackageConfig._DESERIALIZER, "model_package"); op.add(AbstractBuilder::location, TrainedModelLocation._DESERIALIZER, "location"); op.add(AbstractBuilder::prefixStrings, TrainedModelPrefixStrings._DESERIALIZER, "prefix_strings"); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TrainedModelDeploymentNodesStats.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TrainedModelDeploymentNodesStats.java index 75b18cdbf..b105d3127 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TrainedModelDeploymentNodesStats.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TrainedModelDeploymentNodesStats.java @@ -62,51 +62,81 @@ */ @JsonpDeserializable public class TrainedModelDeploymentNodesStats implements JsonpSerializable { - private final double averageInferenceTimeMs; + @Nullable + private final Double averageInferenceTimeMs; - private final int errorCount; + @Nullable + private final Double averageInferenceTimeMsLastMinute; - private final int inferenceCount; + @Nullable + private final Double averageInferenceTimeMsExcludingCacheHits; - private final long lastAccess; + @Nullable + private final Integer errorCount; - private final DiscoveryNode node; + @Nullable + private final Long inferenceCount; - private final int numberOfAllocations; + @Nullable + private final Long inferenceCacheHitCount; - private final int numberOfPendingRequests; + @Nullable + private final Long inferenceCacheHitCountLastMinute; - private final int rejectionExecutionCount; + @Nullable + private final Long lastAccess; + + @Nullable + private final DiscoveryNodeContent node; + + @Nullable + private final Integer numberOfAllocations; + + @Nullable + private final Integer numberOfPendingRequests; + + private final long peakThroughputPerMinute; + + @Nullable + private final Integer rejectionExecutionCount; private final TrainedModelAssignmentRoutingTable routingState; - private final long startTime; + @Nullable + private final Long startTime; + + @Nullable + private final Integer threadsPerAllocation; - private final int threadsPerAllocation; + private final int throughputLastMinute; - private final int timeoutCount; + @Nullable + private final Integer timeoutCount; // --------------------------------------------------------------------------------------------- private TrainedModelDeploymentNodesStats(Builder builder) { - this.averageInferenceTimeMs = ApiTypeHelper.requireNonNull(builder.averageInferenceTimeMs, this, - "averageInferenceTimeMs"); - this.errorCount = ApiTypeHelper.requireNonNull(builder.errorCount, this, "errorCount"); - this.inferenceCount = ApiTypeHelper.requireNonNull(builder.inferenceCount, this, "inferenceCount"); - this.lastAccess = ApiTypeHelper.requireNonNull(builder.lastAccess, this, "lastAccess"); - this.node = ApiTypeHelper.requireNonNull(builder.node, this, "node"); - this.numberOfAllocations = ApiTypeHelper.requireNonNull(builder.numberOfAllocations, this, - "numberOfAllocations"); - this.numberOfPendingRequests = ApiTypeHelper.requireNonNull(builder.numberOfPendingRequests, this, - "numberOfPendingRequests"); - this.rejectionExecutionCount = ApiTypeHelper.requireNonNull(builder.rejectionExecutionCount, this, - "rejectionExecutionCount"); + this.averageInferenceTimeMs = builder.averageInferenceTimeMs; + this.averageInferenceTimeMsLastMinute = builder.averageInferenceTimeMsLastMinute; + this.averageInferenceTimeMsExcludingCacheHits = builder.averageInferenceTimeMsExcludingCacheHits; + this.errorCount = builder.errorCount; + this.inferenceCount = builder.inferenceCount; + this.inferenceCacheHitCount = builder.inferenceCacheHitCount; + this.inferenceCacheHitCountLastMinute = builder.inferenceCacheHitCountLastMinute; + this.lastAccess = builder.lastAccess; + this.node = builder.node; + this.numberOfAllocations = builder.numberOfAllocations; + this.numberOfPendingRequests = builder.numberOfPendingRequests; + this.peakThroughputPerMinute = ApiTypeHelper.requireNonNull(builder.peakThroughputPerMinute, this, + "peakThroughputPerMinute"); + this.rejectionExecutionCount = builder.rejectionExecutionCount; this.routingState = ApiTypeHelper.requireNonNull(builder.routingState, this, "routingState"); - this.startTime = ApiTypeHelper.requireNonNull(builder.startTime, this, "startTime"); - this.threadsPerAllocation = ApiTypeHelper.requireNonNull(builder.threadsPerAllocation, this, - "threadsPerAllocation"); - this.timeoutCount = ApiTypeHelper.requireNonNull(builder.timeoutCount, this, "timeoutCount"); + this.startTime = builder.startTime; + this.threadsPerAllocation = builder.threadsPerAllocation; + this.throughputLastMinute = ApiTypeHelper.requireNonNull(builder.throughputLastMinute, this, + "throughputLastMinute"); + this.timeoutCount = builder.timeoutCount; } @@ -116,77 +146,125 @@ public static TrainedModelDeploymentNodesStats of( } /** - * Required - The average time for each inference call to complete on this node. + * The average time for each inference call to complete on this node. *

* API name: {@code average_inference_time_ms} */ - public final double averageInferenceTimeMs() { + @Nullable + public final Double averageInferenceTimeMs() { return this.averageInferenceTimeMs; } /** - * Required - The number of errors when evaluating the trained model. + * API name: {@code average_inference_time_ms_last_minute} + */ + @Nullable + public final Double averageInferenceTimeMsLastMinute() { + return this.averageInferenceTimeMsLastMinute; + } + + /** + * The average time for each inference call to complete on this node, excluding + * cache + *

+ * API name: {@code average_inference_time_ms_excluding_cache_hits} + */ + @Nullable + public final Double averageInferenceTimeMsExcludingCacheHits() { + return this.averageInferenceTimeMsExcludingCacheHits; + } + + /** + * The number of errors when evaluating the trained model. *

* API name: {@code error_count} */ - public final int errorCount() { + @Nullable + public final Integer errorCount() { return this.errorCount; } /** - * Required - The total number of inference calls made against this node for - * this model. + * The total number of inference calls made against this node for this model. *

* API name: {@code inference_count} */ - public final int inferenceCount() { + @Nullable + public final Long inferenceCount() { return this.inferenceCount; } /** - * Required - The epoch time stamp of the last inference call for the model on - * this node. + * API name: {@code inference_cache_hit_count} + */ + @Nullable + public final Long inferenceCacheHitCount() { + return this.inferenceCacheHitCount; + } + + /** + * API name: {@code inference_cache_hit_count_last_minute} + */ + @Nullable + public final Long inferenceCacheHitCountLastMinute() { + return this.inferenceCacheHitCountLastMinute; + } + + /** + * The epoch time stamp of the last inference call for the model on this node. *

* API name: {@code last_access} */ - public final long lastAccess() { + @Nullable + public final Long lastAccess() { return this.lastAccess; } /** - * Required - Information pertaining to the node. + * Information pertaining to the node. *

* API name: {@code node} */ - public final DiscoveryNode node() { + @Nullable + public final DiscoveryNodeContent node() { return this.node; } /** - * Required - The number of allocations assigned to this node. + * The number of allocations assigned to this node. *

* API name: {@code number_of_allocations} */ - public final int numberOfAllocations() { + @Nullable + public final Integer numberOfAllocations() { return this.numberOfAllocations; } /** - * Required - The number of inference requests queued to be processed. + * The number of inference requests queued to be processed. *

* API name: {@code number_of_pending_requests} */ - public final int numberOfPendingRequests() { + @Nullable + public final Integer numberOfPendingRequests() { return this.numberOfPendingRequests; } /** - * Required - The number of inference requests that were not processed because - * the queue was full. + * Required - API name: {@code peak_throughput_per_minute} + */ + public final long peakThroughputPerMinute() { + return this.peakThroughputPerMinute; + } + + /** + * The number of inference requests that were not processed because the queue + * was full. *

* API name: {@code rejection_execution_count} */ - public final int rejectionExecutionCount() { + @Nullable + public final Integer rejectionExecutionCount() { return this.rejectionExecutionCount; } @@ -201,30 +279,39 @@ public final TrainedModelAssignmentRoutingTable routingState() { } /** - * Required - The epoch timestamp when the allocation started. + * The epoch timestamp when the allocation started. *

* API name: {@code start_time} */ - public final long startTime() { + @Nullable + public final Long startTime() { return this.startTime; } /** - * Required - The number of threads used by each allocation during inference. + * The number of threads used by each allocation during inference. *

* API name: {@code threads_per_allocation} */ - public final int threadsPerAllocation() { + @Nullable + public final Integer threadsPerAllocation() { return this.threadsPerAllocation; } /** - * Required - The number of inference requests that timed out before being - * processed. + * Required - API name: {@code throughput_last_minute} + */ + public final int throughputLastMinute() { + return this.throughputLastMinute; + } + + /** + * The number of inference requests that timed out before being processed. *

* API name: {@code timeout_count} */ - public final int timeoutCount() { + @Nullable + public final Integer timeoutCount() { return this.timeoutCount; } @@ -239,41 +326,90 @@ public void serialize(JsonGenerator generator, JsonpMapper mapper) { protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { - generator.writeKey("average_inference_time_ms"); - generator.write(this.averageInferenceTimeMs); + if (this.averageInferenceTimeMs != null) { + generator.writeKey("average_inference_time_ms"); + generator.write(this.averageInferenceTimeMs); + + } + if (this.averageInferenceTimeMsLastMinute != null) { + generator.writeKey("average_inference_time_ms_last_minute"); + generator.write(this.averageInferenceTimeMsLastMinute); + + } + if (this.averageInferenceTimeMsExcludingCacheHits != null) { + generator.writeKey("average_inference_time_ms_excluding_cache_hits"); + generator.write(this.averageInferenceTimeMsExcludingCacheHits); + + } + if (this.errorCount != null) { + generator.writeKey("error_count"); + generator.write(this.errorCount); - generator.writeKey("error_count"); - generator.write(this.errorCount); + } + if (this.inferenceCount != null) { + generator.writeKey("inference_count"); + generator.write(this.inferenceCount); - generator.writeKey("inference_count"); - generator.write(this.inferenceCount); + } + if (this.inferenceCacheHitCount != null) { + generator.writeKey("inference_cache_hit_count"); + generator.write(this.inferenceCacheHitCount); - generator.writeKey("last_access"); - generator.write(this.lastAccess); + } + if (this.inferenceCacheHitCountLastMinute != null) { + generator.writeKey("inference_cache_hit_count_last_minute"); + generator.write(this.inferenceCacheHitCountLastMinute); - generator.writeKey("node"); - this.node.serialize(generator, mapper); + } + if (this.lastAccess != null) { + generator.writeKey("last_access"); + generator.write(this.lastAccess); + + } + if (this.node != null) { + generator.writeKey("node"); + this.node.serialize(generator, mapper); + + } + if (this.numberOfAllocations != null) { + generator.writeKey("number_of_allocations"); + generator.write(this.numberOfAllocations); - generator.writeKey("number_of_allocations"); - generator.write(this.numberOfAllocations); + } + if (this.numberOfPendingRequests != null) { + generator.writeKey("number_of_pending_requests"); + generator.write(this.numberOfPendingRequests); - generator.writeKey("number_of_pending_requests"); - generator.write(this.numberOfPendingRequests); + } + generator.writeKey("peak_throughput_per_minute"); + generator.write(this.peakThroughputPerMinute); - generator.writeKey("rejection_execution_count"); - generator.write(this.rejectionExecutionCount); + if (this.rejectionExecutionCount != null) { + generator.writeKey("rejection_execution_count"); + generator.write(this.rejectionExecutionCount); + } generator.writeKey("routing_state"); this.routingState.serialize(generator, mapper); - generator.writeKey("start_time"); - generator.write(this.startTime); + if (this.startTime != null) { + generator.writeKey("start_time"); + generator.write(this.startTime); + + } + if (this.threadsPerAllocation != null) { + generator.writeKey("threads_per_allocation"); + generator.write(this.threadsPerAllocation); + + } + generator.writeKey("throughput_last_minute"); + generator.write(this.throughputLastMinute); - generator.writeKey("threads_per_allocation"); - generator.write(this.threadsPerAllocation); + if (this.timeoutCount != null) { + generator.writeKey("timeout_count"); + generator.write(this.timeoutCount); - generator.writeKey("timeout_count"); - generator.write(this.timeoutCount); + } } @@ -291,118 +427,186 @@ public String toString() { public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { + @Nullable private Double averageInferenceTimeMs; + @Nullable + private Double averageInferenceTimeMsLastMinute; + + @Nullable + private Double averageInferenceTimeMsExcludingCacheHits; + + @Nullable private Integer errorCount; - private Integer inferenceCount; + @Nullable + private Long inferenceCount; + + @Nullable + private Long inferenceCacheHitCount; + + @Nullable + private Long inferenceCacheHitCountLastMinute; + @Nullable private Long lastAccess; - private DiscoveryNode node; + @Nullable + private DiscoveryNodeContent node; + @Nullable private Integer numberOfAllocations; + @Nullable private Integer numberOfPendingRequests; + private Long peakThroughputPerMinute; + + @Nullable private Integer rejectionExecutionCount; private TrainedModelAssignmentRoutingTable routingState; + @Nullable private Long startTime; + @Nullable private Integer threadsPerAllocation; + private Integer throughputLastMinute; + + @Nullable private Integer timeoutCount; /** - * Required - The average time for each inference call to complete on this node. + * The average time for each inference call to complete on this node. *

* API name: {@code average_inference_time_ms} */ - public final Builder averageInferenceTimeMs(double value) { + public final Builder averageInferenceTimeMs(@Nullable Double value) { this.averageInferenceTimeMs = value; return this; } /** - * Required - The number of errors when evaluating the trained model. + * API name: {@code average_inference_time_ms_last_minute} + */ + public final Builder averageInferenceTimeMsLastMinute(@Nullable Double value) { + this.averageInferenceTimeMsLastMinute = value; + return this; + } + + /** + * The average time for each inference call to complete on this node, excluding + * cache + *

+ * API name: {@code average_inference_time_ms_excluding_cache_hits} + */ + public final Builder averageInferenceTimeMsExcludingCacheHits(@Nullable Double value) { + this.averageInferenceTimeMsExcludingCacheHits = value; + return this; + } + + /** + * The number of errors when evaluating the trained model. *

* API name: {@code error_count} */ - public final Builder errorCount(int value) { + public final Builder errorCount(@Nullable Integer value) { this.errorCount = value; return this; } /** - * Required - The total number of inference calls made against this node for - * this model. + * The total number of inference calls made against this node for this model. *

* API name: {@code inference_count} */ - public final Builder inferenceCount(int value) { + public final Builder inferenceCount(@Nullable Long value) { this.inferenceCount = value; return this; } /** - * Required - The epoch time stamp of the last inference call for the model on - * this node. + * API name: {@code inference_cache_hit_count} + */ + public final Builder inferenceCacheHitCount(@Nullable Long value) { + this.inferenceCacheHitCount = value; + return this; + } + + /** + * API name: {@code inference_cache_hit_count_last_minute} + */ + public final Builder inferenceCacheHitCountLastMinute(@Nullable Long value) { + this.inferenceCacheHitCountLastMinute = value; + return this; + } + + /** + * The epoch time stamp of the last inference call for the model on this node. *

* API name: {@code last_access} */ - public final Builder lastAccess(long value) { + public final Builder lastAccess(@Nullable Long value) { this.lastAccess = value; return this; } /** - * Required - Information pertaining to the node. + * Information pertaining to the node. *

* API name: {@code node} */ - public final Builder node(DiscoveryNode value) { + public final Builder node(@Nullable DiscoveryNodeContent value) { this.node = value; return this; } /** - * Required - Information pertaining to the node. + * Information pertaining to the node. *

* API name: {@code node} */ - public final Builder node(Function> fn) { - return this.node(fn.apply(new DiscoveryNode.Builder()).build()); + public final Builder node(Function> fn) { + return this.node(fn.apply(new DiscoveryNodeContent.Builder()).build()); } /** - * Required - The number of allocations assigned to this node. + * The number of allocations assigned to this node. *

* API name: {@code number_of_allocations} */ - public final Builder numberOfAllocations(int value) { + public final Builder numberOfAllocations(@Nullable Integer value) { this.numberOfAllocations = value; return this; } /** - * Required - The number of inference requests queued to be processed. + * The number of inference requests queued to be processed. *

* API name: {@code number_of_pending_requests} */ - public final Builder numberOfPendingRequests(int value) { + public final Builder numberOfPendingRequests(@Nullable Integer value) { this.numberOfPendingRequests = value; return this; } /** - * Required - The number of inference requests that were not processed because - * the queue was full. + * Required - API name: {@code peak_throughput_per_minute} + */ + public final Builder peakThroughputPerMinute(long value) { + this.peakThroughputPerMinute = value; + return this; + } + + /** + * The number of inference requests that were not processed because the queue + * was full. *

* API name: {@code rejection_execution_count} */ - public final Builder rejectionExecutionCount(int value) { + public final Builder rejectionExecutionCount(@Nullable Integer value) { this.rejectionExecutionCount = value; return this; } @@ -430,32 +634,39 @@ public final Builder routingState( } /** - * Required - The epoch timestamp when the allocation started. + * The epoch timestamp when the allocation started. *

* API name: {@code start_time} */ - public final Builder startTime(long value) { + public final Builder startTime(@Nullable Long value) { this.startTime = value; return this; } /** - * Required - The number of threads used by each allocation during inference. + * The number of threads used by each allocation during inference. *

* API name: {@code threads_per_allocation} */ - public final Builder threadsPerAllocation(int value) { + public final Builder threadsPerAllocation(@Nullable Integer value) { this.threadsPerAllocation = value; return this; } /** - * Required - The number of inference requests that timed out before being - * processed. + * Required - API name: {@code throughput_last_minute} + */ + public final Builder throughputLastMinute(int value) { + this.throughputLastMinute = value; + return this; + } + + /** + * The number of inference requests that timed out before being processed. *

* API name: {@code timeout_count} */ - public final Builder timeoutCount(int value) { + public final Builder timeoutCount(@Nullable Integer value) { this.timeoutCount = value; return this; } @@ -490,16 +701,25 @@ protected static void setupTrainedModelDeploymentNodesStatsDeserializer( ObjectDeserializer op) { op.add(Builder::averageInferenceTimeMs, JsonpDeserializer.doubleDeserializer(), "average_inference_time_ms"); + op.add(Builder::averageInferenceTimeMsLastMinute, JsonpDeserializer.doubleDeserializer(), + "average_inference_time_ms_last_minute"); + op.add(Builder::averageInferenceTimeMsExcludingCacheHits, JsonpDeserializer.doubleDeserializer(), + "average_inference_time_ms_excluding_cache_hits"); op.add(Builder::errorCount, JsonpDeserializer.integerDeserializer(), "error_count"); - op.add(Builder::inferenceCount, JsonpDeserializer.integerDeserializer(), "inference_count"); + op.add(Builder::inferenceCount, JsonpDeserializer.longDeserializer(), "inference_count"); + op.add(Builder::inferenceCacheHitCount, JsonpDeserializer.longDeserializer(), "inference_cache_hit_count"); + op.add(Builder::inferenceCacheHitCountLastMinute, JsonpDeserializer.longDeserializer(), + "inference_cache_hit_count_last_minute"); op.add(Builder::lastAccess, JsonpDeserializer.longDeserializer(), "last_access"); - op.add(Builder::node, DiscoveryNode._DESERIALIZER, "node"); + op.add(Builder::node, DiscoveryNodeContent._DESERIALIZER, "node"); op.add(Builder::numberOfAllocations, JsonpDeserializer.integerDeserializer(), "number_of_allocations"); op.add(Builder::numberOfPendingRequests, JsonpDeserializer.integerDeserializer(), "number_of_pending_requests"); + op.add(Builder::peakThroughputPerMinute, JsonpDeserializer.longDeserializer(), "peak_throughput_per_minute"); op.add(Builder::rejectionExecutionCount, JsonpDeserializer.integerDeserializer(), "rejection_execution_count"); op.add(Builder::routingState, TrainedModelAssignmentRoutingTable._DESERIALIZER, "routing_state"); op.add(Builder::startTime, JsonpDeserializer.longDeserializer(), "start_time"); op.add(Builder::threadsPerAllocation, JsonpDeserializer.integerDeserializer(), "threads_per_allocation"); + op.add(Builder::throughputLastMinute, JsonpDeserializer.integerDeserializer(), "throughput_last_minute"); op.add(Builder::timeoutCount, JsonpDeserializer.integerDeserializer(), "timeout_count"); } diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TrainedModelDeploymentStats.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TrainedModelDeploymentStats.java index 4c39c2806..4d3779f31 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TrainedModelDeploymentStats.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/TrainedModelDeploymentStats.java @@ -63,6 +63,10 @@ */ @JsonpDeserializable public class TrainedModelDeploymentStats implements JsonpSerializable { + @Nullable + private final AdaptiveAllocationsSettings adaptiveAllocations; + + @Nullable private final TrainedModelDeploymentAllocationStatus allocationStatus; @Nullable @@ -70,52 +74,66 @@ public class TrainedModelDeploymentStats implements JsonpSerializable { private final String deploymentId; - private final int errorCount; + @Nullable + private final Integer errorCount; - private final int inferenceCount; + @Nullable + private final Integer inferenceCount; private final String modelId; private final List nodes; - private final int numberOfAllocations; + @Nullable + private final Integer numberOfAllocations; - private final int queueCapacity; + private final long peakThroughputPerMinute; - private final int rejectedExecutionCount; + private final TrainingPriority priority; + @Nullable + private final Integer queueCapacity; + + @Nullable + private final Integer rejectedExecutionCount; + + @Nullable private final String reason; private final long startTime; + @Nullable private final DeploymentAssignmentState state; - private final int threadsPerAllocation; + @Nullable + private final Integer threadsPerAllocation; - private final int timeoutCount; + @Nullable + private final Integer timeoutCount; // --------------------------------------------------------------------------------------------- private TrainedModelDeploymentStats(Builder builder) { - this.allocationStatus = ApiTypeHelper.requireNonNull(builder.allocationStatus, this, "allocationStatus"); + this.adaptiveAllocations = builder.adaptiveAllocations; + this.allocationStatus = builder.allocationStatus; this.cacheSize = builder.cacheSize; this.deploymentId = ApiTypeHelper.requireNonNull(builder.deploymentId, this, "deploymentId"); - this.errorCount = ApiTypeHelper.requireNonNull(builder.errorCount, this, "errorCount"); - this.inferenceCount = ApiTypeHelper.requireNonNull(builder.inferenceCount, this, "inferenceCount"); + this.errorCount = builder.errorCount; + this.inferenceCount = builder.inferenceCount; this.modelId = ApiTypeHelper.requireNonNull(builder.modelId, this, "modelId"); this.nodes = ApiTypeHelper.unmodifiableRequired(builder.nodes, this, "nodes"); - this.numberOfAllocations = ApiTypeHelper.requireNonNull(builder.numberOfAllocations, this, - "numberOfAllocations"); - this.queueCapacity = ApiTypeHelper.requireNonNull(builder.queueCapacity, this, "queueCapacity"); - this.rejectedExecutionCount = ApiTypeHelper.requireNonNull(builder.rejectedExecutionCount, this, - "rejectedExecutionCount"); - this.reason = ApiTypeHelper.requireNonNull(builder.reason, this, "reason"); + this.numberOfAllocations = builder.numberOfAllocations; + this.peakThroughputPerMinute = ApiTypeHelper.requireNonNull(builder.peakThroughputPerMinute, this, + "peakThroughputPerMinute"); + this.priority = ApiTypeHelper.requireNonNull(builder.priority, this, "priority"); + this.queueCapacity = builder.queueCapacity; + this.rejectedExecutionCount = builder.rejectedExecutionCount; + this.reason = builder.reason; this.startTime = ApiTypeHelper.requireNonNull(builder.startTime, this, "startTime"); - this.state = ApiTypeHelper.requireNonNull(builder.state, this, "state"); - this.threadsPerAllocation = ApiTypeHelper.requireNonNull(builder.threadsPerAllocation, this, - "threadsPerAllocation"); - this.timeoutCount = ApiTypeHelper.requireNonNull(builder.timeoutCount, this, "timeoutCount"); + this.state = builder.state; + this.threadsPerAllocation = builder.threadsPerAllocation; + this.timeoutCount = builder.timeoutCount; } @@ -124,10 +142,19 @@ public static TrainedModelDeploymentStats of(Function * API name: {@code allocation_status} */ + @Nullable public final TrainedModelDeploymentAllocationStatus allocationStatus() { return this.allocationStatus; } @@ -150,22 +177,22 @@ public final String deploymentId() { } /** - * Required - The sum of error_count for all nodes in the - * deployment. + * The sum of error_count for all nodes in the deployment. *

* API name: {@code error_count} */ - public final int errorCount() { + @Nullable + public final Integer errorCount() { return this.errorCount; } /** - * Required - The sum of inference_count for all nodes in the - * deployment. + * The sum of inference_count for all nodes in the deployment. *

* API name: {@code inference_count} */ - public final int inferenceCount() { + @Nullable + public final Integer inferenceCount() { return this.inferenceCount; } @@ -190,43 +217,61 @@ public final List nodes() { } /** - * Required - The number of allocations requested. + * The number of allocations requested. *

* API name: {@code number_of_allocations} */ - public final int numberOfAllocations() { + @Nullable + public final Integer numberOfAllocations() { return this.numberOfAllocations; } /** - * Required - The number of inference requests that can be queued before new - * requests are rejected. + * Required - API name: {@code peak_throughput_per_minute} + */ + public final long peakThroughputPerMinute() { + return this.peakThroughputPerMinute; + } + + /** + * Required - API name: {@code priority} + */ + public final TrainingPriority priority() { + return this.priority; + } + + /** + * The number of inference requests that can be queued before new requests are + * rejected. *

* API name: {@code queue_capacity} */ - public final int queueCapacity() { + @Nullable + public final Integer queueCapacity() { return this.queueCapacity; } /** - * Required - The sum of rejected_execution_count for all nodes in - * the deployment. Individual nodes reject an inference request if the inference + * The sum of rejected_execution_count for all nodes in the + * deployment. Individual nodes reject an inference request if the inference * queue is full. The queue size is controlled by the * queue_capacity setting in the start trained model deployment * API. *

* API name: {@code rejected_execution_count} */ - public final int rejectedExecutionCount() { + @Nullable + public final Integer rejectedExecutionCount() { return this.rejectedExecutionCount; } /** - * Required - The reason for the current deployment state. Usually only - * populated when the model is not deployed to a node. + * The reason for the current deployment state. Usually only populated when the + * model is not deployed to a node. *

* API name: {@code reason} */ + @Nullable public final String reason() { return this.reason; } @@ -241,30 +286,32 @@ public final long startTime() { } /** - * Required - The overall state of the deployment. + * The overall state of the deployment. *

* API name: {@code state} */ + @Nullable public final DeploymentAssignmentState state() { return this.state; } /** - * Required - The number of threads used be each allocation during inference. + * The number of threads used be each allocation during inference. *

* API name: {@code threads_per_allocation} */ - public final int threadsPerAllocation() { + @Nullable + public final Integer threadsPerAllocation() { return this.threadsPerAllocation; } /** - * Required - The sum of timeout_count for all nodes in the - * deployment. + * The sum of timeout_count for all nodes in the deployment. *

* API name: {@code timeout_count} */ - public final int timeoutCount() { + @Nullable + public final Integer timeoutCount() { return this.timeoutCount; } @@ -279,9 +326,16 @@ public void serialize(JsonGenerator generator, JsonpMapper mapper) { protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { - generator.writeKey("allocation_status"); - this.allocationStatus.serialize(generator, mapper); + if (this.adaptiveAllocations != null) { + generator.writeKey("adaptive_allocations"); + this.adaptiveAllocations.serialize(generator, mapper); + } + if (this.allocationStatus != null) { + generator.writeKey("allocation_status"); + this.allocationStatus.serialize(generator, mapper); + + } if (this.cacheSize != null) { generator.writeKey("cache_size"); generator.write(this.cacheSize); @@ -290,12 +344,16 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.writeKey("deployment_id"); generator.write(this.deploymentId); - generator.writeKey("error_count"); - generator.write(this.errorCount); + if (this.errorCount != null) { + generator.writeKey("error_count"); + generator.write(this.errorCount); - generator.writeKey("inference_count"); - generator.write(this.inferenceCount); + } + if (this.inferenceCount != null) { + generator.writeKey("inference_count"); + generator.write(this.inferenceCount); + } generator.writeKey("model_id"); generator.write(this.modelId); @@ -309,28 +367,48 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { generator.writeEnd(); } - generator.writeKey("number_of_allocations"); - generator.write(this.numberOfAllocations); + if (this.numberOfAllocations != null) { + generator.writeKey("number_of_allocations"); + generator.write(this.numberOfAllocations); - generator.writeKey("queue_capacity"); - generator.write(this.queueCapacity); + } + generator.writeKey("peak_throughput_per_minute"); + generator.write(this.peakThroughputPerMinute); + + generator.writeKey("priority"); + this.priority.serialize(generator, mapper); + if (this.queueCapacity != null) { + generator.writeKey("queue_capacity"); + generator.write(this.queueCapacity); - generator.writeKey("rejected_execution_count"); - generator.write(this.rejectedExecutionCount); + } + if (this.rejectedExecutionCount != null) { + generator.writeKey("rejected_execution_count"); + generator.write(this.rejectedExecutionCount); - generator.writeKey("reason"); - generator.write(this.reason); + } + if (this.reason != null) { + generator.writeKey("reason"); + generator.write(this.reason); + } generator.writeKey("start_time"); generator.write(this.startTime); - generator.writeKey("state"); - this.state.serialize(generator, mapper); - generator.writeKey("threads_per_allocation"); - generator.write(this.threadsPerAllocation); + if (this.state != null) { + generator.writeKey("state"); + this.state.serialize(generator, mapper); + } + if (this.threadsPerAllocation != null) { + generator.writeKey("threads_per_allocation"); + generator.write(this.threadsPerAllocation); - generator.writeKey("timeout_count"); - generator.write(this.timeoutCount); + } + if (this.timeoutCount != null) { + generator.writeKey("timeout_count"); + generator.write(this.timeoutCount); + + } } @@ -348,6 +426,10 @@ public String toString() { public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { + @Nullable + private AdaptiveAllocationsSettings adaptiveAllocations; + + @Nullable private TrainedModelDeploymentAllocationStatus allocationStatus; @Nullable @@ -355,42 +437,71 @@ public static class Builder extends WithJsonObjectBuilderBase private String deploymentId; + @Nullable private Integer errorCount; + @Nullable private Integer inferenceCount; private String modelId; private List nodes; + @Nullable private Integer numberOfAllocations; + private Long peakThroughputPerMinute; + + private TrainingPriority priority; + + @Nullable private Integer queueCapacity; + @Nullable private Integer rejectedExecutionCount; + @Nullable private String reason; private Long startTime; + @Nullable private DeploymentAssignmentState state; + @Nullable private Integer threadsPerAllocation; + @Nullable private Integer timeoutCount; /** - * Required - The detailed allocation status for the deployment. + * API name: {@code adaptive_allocations} + */ + public final Builder adaptiveAllocations(@Nullable AdaptiveAllocationsSettings value) { + this.adaptiveAllocations = value; + return this; + } + + /** + * API name: {@code adaptive_allocations} + */ + public final Builder adaptiveAllocations( + Function> fn) { + return this.adaptiveAllocations(fn.apply(new AdaptiveAllocationsSettings.Builder()).build()); + } + + /** + * The detailed allocation status for the deployment. *

* API name: {@code allocation_status} */ - public final Builder allocationStatus(TrainedModelDeploymentAllocationStatus value) { + public final Builder allocationStatus(@Nullable TrainedModelDeploymentAllocationStatus value) { this.allocationStatus = value; return this; } /** - * Required - The detailed allocation status for the deployment. + * The detailed allocation status for the deployment. *

* API name: {@code allocation_status} */ @@ -418,23 +529,21 @@ public final Builder deploymentId(String value) { } /** - * Required - The sum of error_count for all nodes in the - * deployment. + * The sum of error_count for all nodes in the deployment. *

* API name: {@code error_count} */ - public final Builder errorCount(int value) { + public final Builder errorCount(@Nullable Integer value) { this.errorCount = value; return this; } /** - * Required - The sum of inference_count for all nodes in the - * deployment. + * The sum of inference_count for all nodes in the deployment. *

* API name: {@code inference_count} */ - public final Builder inferenceCount(int value) { + public final Builder inferenceCount(@Nullable Integer value) { this.inferenceCount = value; return this; } @@ -492,47 +601,63 @@ public final Builder nodes( } /** - * Required - The number of allocations requested. + * The number of allocations requested. *

* API name: {@code number_of_allocations} */ - public final Builder numberOfAllocations(int value) { + public final Builder numberOfAllocations(@Nullable Integer value) { this.numberOfAllocations = value; return this; } /** - * Required - The number of inference requests that can be queued before new - * requests are rejected. + * Required - API name: {@code peak_throughput_per_minute} + */ + public final Builder peakThroughputPerMinute(long value) { + this.peakThroughputPerMinute = value; + return this; + } + + /** + * Required - API name: {@code priority} + */ + public final Builder priority(TrainingPriority value) { + this.priority = value; + return this; + } + + /** + * The number of inference requests that can be queued before new requests are + * rejected. *

* API name: {@code queue_capacity} */ - public final Builder queueCapacity(int value) { + public final Builder queueCapacity(@Nullable Integer value) { this.queueCapacity = value; return this; } /** - * Required - The sum of rejected_execution_count for all nodes in - * the deployment. Individual nodes reject an inference request if the inference + * The sum of rejected_execution_count for all nodes in the + * deployment. Individual nodes reject an inference request if the inference * queue is full. The queue size is controlled by the * queue_capacity setting in the start trained model deployment * API. *

* API name: {@code rejected_execution_count} */ - public final Builder rejectedExecutionCount(int value) { + public final Builder rejectedExecutionCount(@Nullable Integer value) { this.rejectedExecutionCount = value; return this; } /** - * Required - The reason for the current deployment state. Usually only - * populated when the model is not deployed to a node. + * The reason for the current deployment state. Usually only populated when the + * model is not deployed to a node. *

* API name: {@code reason} */ - public final Builder reason(String value) { + public final Builder reason(@Nullable String value) { this.reason = value; return this; } @@ -548,32 +673,31 @@ public final Builder startTime(long value) { } /** - * Required - The overall state of the deployment. + * The overall state of the deployment. *

* API name: {@code state} */ - public final Builder state(DeploymentAssignmentState value) { + public final Builder state(@Nullable DeploymentAssignmentState value) { this.state = value; return this; } /** - * Required - The number of threads used be each allocation during inference. + * The number of threads used be each allocation during inference. *

* API name: {@code threads_per_allocation} */ - public final Builder threadsPerAllocation(int value) { + public final Builder threadsPerAllocation(@Nullable Integer value) { this.threadsPerAllocation = value; return this; } /** - * Required - The sum of timeout_count for all nodes in the - * deployment. + * The sum of timeout_count for all nodes in the deployment. *

* API name: {@code timeout_count} */ - public final Builder timeoutCount(int value) { + public final Builder timeoutCount(@Nullable Integer value) { this.timeoutCount = value; return this; } @@ -607,6 +731,7 @@ public TrainedModelDeploymentStats build() { protected static void setupTrainedModelDeploymentStatsDeserializer( ObjectDeserializer op) { + op.add(Builder::adaptiveAllocations, AdaptiveAllocationsSettings._DESERIALIZER, "adaptive_allocations"); op.add(Builder::allocationStatus, TrainedModelDeploymentAllocationStatus._DESERIALIZER, "allocation_status"); op.add(Builder::cacheSize, JsonpDeserializer.stringDeserializer(), "cache_size"); op.add(Builder::deploymentId, JsonpDeserializer.stringDeserializer(), "deployment_id"); @@ -616,6 +741,8 @@ protected static void setupTrainedModelDeploymentStatsDeserializer( op.add(Builder::nodes, JsonpDeserializer.arrayDeserializer(TrainedModelDeploymentNodesStats._DESERIALIZER), "nodes"); op.add(Builder::numberOfAllocations, JsonpDeserializer.integerDeserializer(), "number_of_allocations"); + op.add(Builder::peakThroughputPerMinute, JsonpDeserializer.longDeserializer(), "peak_throughput_per_minute"); + op.add(Builder::priority, TrainingPriority._DESERIALIZER, "priority"); op.add(Builder::queueCapacity, JsonpDeserializer.integerDeserializer(), "queue_capacity"); op.add(Builder::rejectedExecutionCount, JsonpDeserializer.integerDeserializer(), "rejected_execution_count"); op.add(Builder::reason, JsonpDeserializer.stringDeserializer(), "reason"); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/UpdateJobRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/UpdateJobRequest.java index 1c0f308c0..0bab8e116 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/UpdateJobRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/UpdateJobRequest.java @@ -90,7 +90,7 @@ public class UpdateJobRequest extends RequestBase implements JsonpSerializable { @Nullable private final String description; - private final List detectors; + private final List detectors; private final List groups; @@ -232,7 +232,7 @@ public final String description() { *

* API name: {@code detectors} */ - public final List detectors() { + public final List detectors() { return this.detectors; } @@ -379,7 +379,7 @@ protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { if (ApiTypeHelper.isDefined(this.detectors)) { generator.writeKey("detectors"); generator.writeStartArray(); - for (Detector item0 : this.detectors) { + for (DetectorUpdate item0 : this.detectors) { item0.serialize(generator, mapper); } @@ -460,7 +460,7 @@ public static class Builder extends RequestBase.AbstractBuilder private String description; @Nullable - private List detectors; + private List detectors; @Nullable private List groups; @@ -631,7 +631,7 @@ public final Builder description(@Nullable String value) { *

* Adds all elements of list to detectors. */ - public final Builder detectors(List list) { + public final Builder detectors(List list) { this.detectors = _listAddAll(this.detectors, list); return this; } @@ -643,7 +643,7 @@ public final Builder detectors(List list) { *

* Adds one or more values to detectors. */ - public final Builder detectors(Detector value, Detector... values) { + public final Builder detectors(DetectorUpdate value, DetectorUpdate... values) { this.detectors = _listAdd(this.detectors, value, values); return this; } @@ -655,8 +655,8 @@ public final Builder detectors(Detector value, Detector... values) { *

* Adds a value to detectors using a builder lambda. */ - public final Builder detectors(Function> fn) { - return detectors(fn.apply(new Detector.Builder()).build()); + public final Builder detectors(Function> fn) { + return detectors(fn.apply(new DetectorUpdate.Builder()).build()); } /** @@ -820,7 +820,7 @@ protected static void setupUpdateJobRequestDeserializer(ObjectDeserializerAPI * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/info/Limits.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/info/Limits.java index 882dc9ade..b5832c1cd 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/info/Limits.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/ml/info/Limits.java @@ -30,6 +30,7 @@ import co.elastic.clients.util.ObjectBuilder; import co.elastic.clients.util.WithJsonObjectBuilderBase; import jakarta.json.stream.JsonGenerator; +import java.lang.Integer; import java.lang.String; import java.util.Objects; import java.util.function.Function; @@ -59,9 +60,16 @@ */ @JsonpDeserializable public class Limits implements JsonpSerializable { + @Nullable + private final Integer maxSingleMlNodeProcessors; + + @Nullable + private final Integer totalMlProcessors; + @Nullable private final String maxModelMemoryLimit; + @Nullable private final String effectiveMaxModelMemoryLimit; private final String totalMlMemory; @@ -70,9 +78,10 @@ public class Limits implements JsonpSerializable { private Limits(Builder builder) { + this.maxSingleMlNodeProcessors = builder.maxSingleMlNodeProcessors; + this.totalMlProcessors = builder.totalMlProcessors; this.maxModelMemoryLimit = builder.maxModelMemoryLimit; - this.effectiveMaxModelMemoryLimit = ApiTypeHelper.requireNonNull(builder.effectiveMaxModelMemoryLimit, this, - "effectiveMaxModelMemoryLimit"); + this.effectiveMaxModelMemoryLimit = builder.effectiveMaxModelMemoryLimit; this.totalMlMemory = ApiTypeHelper.requireNonNull(builder.totalMlMemory, this, "totalMlMemory"); } @@ -81,6 +90,22 @@ public static Limits of(Function> fn) { return fn.apply(new Builder()).build(); } + /** + * API name: {@code max_single_ml_node_processors} + */ + @Nullable + public final Integer maxSingleMlNodeProcessors() { + return this.maxSingleMlNodeProcessors; + } + + /** + * API name: {@code total_ml_processors} + */ + @Nullable + public final Integer totalMlProcessors() { + return this.totalMlProcessors; + } + /** * API name: {@code max_model_memory_limit} */ @@ -90,8 +115,9 @@ public final String maxModelMemoryLimit() { } /** - * Required - API name: {@code effective_max_model_memory_limit} + * API name: {@code effective_max_model_memory_limit} */ + @Nullable public final String effectiveMaxModelMemoryLimit() { return this.effectiveMaxModelMemoryLimit; } @@ -114,14 +140,26 @@ public void serialize(JsonGenerator generator, JsonpMapper mapper) { protected void serializeInternal(JsonGenerator generator, JsonpMapper mapper) { + if (this.maxSingleMlNodeProcessors != null) { + generator.writeKey("max_single_ml_node_processors"); + generator.write(this.maxSingleMlNodeProcessors); + + } + if (this.totalMlProcessors != null) { + generator.writeKey("total_ml_processors"); + generator.write(this.totalMlProcessors); + + } if (this.maxModelMemoryLimit != null) { generator.writeKey("max_model_memory_limit"); generator.write(this.maxModelMemoryLimit); } - generator.writeKey("effective_max_model_memory_limit"); - generator.write(this.effectiveMaxModelMemoryLimit); + if (this.effectiveMaxModelMemoryLimit != null) { + generator.writeKey("effective_max_model_memory_limit"); + generator.write(this.effectiveMaxModelMemoryLimit); + } generator.writeKey("total_ml_memory"); generator.write(this.totalMlMemory); @@ -139,13 +177,36 @@ public String toString() { */ public static class Builder extends WithJsonObjectBuilderBase implements ObjectBuilder { + @Nullable + private Integer maxSingleMlNodeProcessors; + + @Nullable + private Integer totalMlProcessors; + @Nullable private String maxModelMemoryLimit; + @Nullable private String effectiveMaxModelMemoryLimit; private String totalMlMemory; + /** + * API name: {@code max_single_ml_node_processors} + */ + public final Builder maxSingleMlNodeProcessors(@Nullable Integer value) { + this.maxSingleMlNodeProcessors = value; + return this; + } + + /** + * API name: {@code total_ml_processors} + */ + public final Builder totalMlProcessors(@Nullable Integer value) { + this.totalMlProcessors = value; + return this; + } + /** * API name: {@code max_model_memory_limit} */ @@ -155,9 +216,9 @@ public final Builder maxModelMemoryLimit(@Nullable String value) { } /** - * Required - API name: {@code effective_max_model_memory_limit} + * API name: {@code effective_max_model_memory_limit} */ - public final Builder effectiveMaxModelMemoryLimit(String value) { + public final Builder effectiveMaxModelMemoryLimit(@Nullable String value) { this.effectiveMaxModelMemoryLimit = value; return this; } @@ -198,6 +259,9 @@ public Limits build() { protected static void setupLimitsDeserializer(ObjectDeserializer op) { + op.add(Builder::maxSingleMlNodeProcessors, JsonpDeserializer.integerDeserializer(), + "max_single_ml_node_processors"); + op.add(Builder::totalMlProcessors, JsonpDeserializer.integerDeserializer(), "total_ml_processors"); op.add(Builder::maxModelMemoryLimit, JsonpDeserializer.stringDeserializer(), "max_model_memory_limit"); op.add(Builder::effectiveMaxModelMemoryLimit, JsonpDeserializer.stringDeserializer(), "effective_max_model_memory_limit"); diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/BulkRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/BulkRequest.java index 768360106..dbb680041 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/BulkRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/BulkRequest.java @@ -62,7 +62,8 @@ // typedef: monitoring.bulk.Request /** - * Used by the monitoring features to send monitoring data. + * Send monitoring data. This API is used by the monitoring features to send + * monitoring data. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/ElasticsearchMonitoringAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/ElasticsearchMonitoringAsyncClient.java index aad05d73a..9dc19d082 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/ElasticsearchMonitoringAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/ElasticsearchMonitoringAsyncClient.java @@ -70,7 +70,8 @@ public ElasticsearchMonitoringAsyncClient withTransportOptions(@Nullable Transpo // ----- Endpoint: monitoring.bulk /** - * Used by the monitoring features to send monitoring data. + * Send monitoring data. This API is used by the monitoring features to send + * monitoring data. * * @see Documentation @@ -85,7 +86,8 @@ public CompletableFuture bulk(BulkRequest request) { } /** - * Used by the monitoring features to send monitoring data. + * Send monitoring data. This API is used by the monitoring features to send + * monitoring data. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/ElasticsearchMonitoringClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/ElasticsearchMonitoringClient.java index 0c0fb8752..1ad60de63 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/ElasticsearchMonitoringClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/monitoring/ElasticsearchMonitoringClient.java @@ -69,7 +69,8 @@ public ElasticsearchMonitoringClient withTransportOptions(@Nullable TransportOpt // ----- Endpoint: monitoring.bulk /** - * Used by the monitoring features to send monitoring data. + * Send monitoring data. This API is used by the monitoring features to send + * monitoring data. * * @see Documentation @@ -84,7 +85,8 @@ public BulkResponse bulk(BulkRequest request) throws IOException, ElasticsearchE } /** - * Used by the monitoring features to send monitoring data. + * Send monitoring data. This API is used by the monitoring features to send + * monitoring data. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ClearRepositoriesMeteringArchiveRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ClearRepositoriesMeteringArchiveRequest.java index 24a760458..2f6e7b98f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ClearRepositoriesMeteringArchiveRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ClearRepositoriesMeteringArchiveRequest.java @@ -59,8 +59,8 @@ // typedef: nodes.clear_repositories_metering_archive.Request /** - * You can use this API to clear the archived repositories metering information - * in the cluster. + * Clear the archived repositories metering. Clear the archived repositories + * metering information in the cluster. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ElasticsearchNodesAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ElasticsearchNodesAsyncClient.java index 22290e128..701d12b15 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ElasticsearchNodesAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ElasticsearchNodesAsyncClient.java @@ -68,8 +68,8 @@ public ElasticsearchNodesAsyncClient withTransportOptions(@Nullable TransportOpt // ----- Endpoint: nodes.clear_repositories_metering_archive /** - * You can use this API to clear the archived repositories metering information - * in the cluster. + * Clear the archived repositories metering. Clear the archived repositories + * metering information in the cluster. * * @see Documentation @@ -85,8 +85,8 @@ public CompletableFuture clearReposito } /** - * You can use this API to clear the archived repositories metering information - * in the cluster. + * Clear the archived repositories metering. Clear the archived repositories + * metering information in the cluster. * * @param fn * a function that initializes a builder to create the @@ -105,12 +105,12 @@ public final CompletableFuture clearRe // ----- Endpoint: nodes.get_repositories_metering_info /** - * You can use the cluster repositories metering API to retrieve repositories - * metering information in a cluster. This API exposes monotonically - * non-decreasing counters and it’s expected that clients would durably store - * the information needed to compute aggregations over a period of time. - * Additionally, the information exposed by this API is volatile, meaning that - * it won’t be present after node restarts. + * Get cluster repositories metering. Get repositories metering information for + * a cluster. This API exposes monotonically non-decreasing counters and it is + * expected that clients would durably store the information needed to compute + * aggregations over a period of time. Additionally, the information exposed by + * this API is volatile, meaning that it will not be present after node + * restarts. * * @see Documentation @@ -126,12 +126,12 @@ public CompletableFuture getRepositoriesMet } /** - * You can use the cluster repositories metering API to retrieve repositories - * metering information in a cluster. This API exposes monotonically - * non-decreasing counters and it’s expected that clients would durably store - * the information needed to compute aggregations over a period of time. - * Additionally, the information exposed by this API is volatile, meaning that - * it won’t be present after node restarts. + * Get cluster repositories metering. Get repositories metering information for + * a cluster. This API exposes monotonically non-decreasing counters and it is + * expected that clients would durably store the information needed to compute + * aggregations over a period of time. Additionally, the information exposed by + * this API is volatile, meaning that it will not be present after node + * restarts. * * @param fn * a function that initializes a builder to create the @@ -149,9 +149,9 @@ public final CompletableFuture getRepositor // ----- Endpoint: nodes.hot_threads /** - * This API yields a breakdown of the hot threads on each selected node in the - * cluster. The output is plain text with a breakdown of each node’s top hot - * threads. + * Get the hot threads for nodes. Get a breakdown of the hot threads on each + * selected node in the cluster. The output is plain text with a breakdown of + * the top hot threads for each node. * * @see Documentation @@ -166,9 +166,9 @@ public CompletableFuture hotThreads(HotThreadsRequest reques } /** - * This API yields a breakdown of the hot threads on each selected node in the - * cluster. The output is plain text with a breakdown of each node’s top hot - * threads. + * Get the hot threads for nodes. Get a breakdown of the hot threads on each + * selected node in the cluster. The output is plain text with a breakdown of + * the top hot threads for each node. * * @param fn * a function that initializes a builder to create the @@ -184,9 +184,9 @@ public final CompletableFuture hotThreads( } /** - * This API yields a breakdown of the hot threads on each selected node in the - * cluster. The output is plain text with a breakdown of each node’s top hot - * threads. + * Get the hot threads for nodes. Get a breakdown of the hot threads on each + * selected node in the cluster. The output is plain text with a breakdown of + * the top hot threads for each node. * * @see Documentation @@ -201,7 +201,8 @@ public CompletableFuture hotThreads() { // ----- Endpoint: nodes.info /** - * Returns cluster nodes information. + * Get node information. By default, the API returns all attributes and core + * settings for cluster nodes. * * @see Documentation @@ -216,7 +217,8 @@ public CompletableFuture info(NodesInfoRequest request) { } /** - * Returns cluster nodes information. + * Get node information. By default, the API returns all attributes and core + * settings for cluster nodes. * * @param fn * a function that initializes a builder to create the @@ -232,7 +234,8 @@ public final CompletableFuture info( } /** - * Returns cluster nodes information. + * Get node information. By default, the API returns all attributes and core + * settings for cluster nodes. * * @see Documentation @@ -247,7 +250,22 @@ public CompletableFuture info() { // ----- Endpoint: nodes.reload_secure_settings /** - * Reloads the keystore on nodes in the cluster. + * Reload the keystore on nodes in the cluster. + *

+ * Secure settings are stored in an on-disk keystore. Certain of these settings + * are reloadable. That is, you can change them on disk and reload them without + * restarting any nodes in the cluster. When you have updated reloadable secure + * settings in your keystore, you can use this API to reload those settings on + * each node. + *

+ * When the Elasticsearch keystore is password protected and not simply + * obfuscated, you must provide the password for the keystore when you reload + * the secure settings. Reloading the settings for the whole cluster assumes + * that the keystores for all nodes are protected with the same password; this + * method is allowed only when inter-node communications are encrypted. + * Alternatively, you can reload the secure settings on each node by locally + * accessing the API and passing the node-specific Elasticsearch keystore + * password. * * @see Documentation @@ -262,7 +280,22 @@ public CompletableFuture reloadSecureSettings(Relo } /** - * Reloads the keystore on nodes in the cluster. + * Reload the keystore on nodes in the cluster. + *

+ * Secure settings are stored in an on-disk keystore. Certain of these settings + * are reloadable. That is, you can change them on disk and reload them without + * restarting any nodes in the cluster. When you have updated reloadable secure + * settings in your keystore, you can use this API to reload those settings on + * each node. + *

+ * When the Elasticsearch keystore is password protected and not simply + * obfuscated, you must provide the password for the keystore when you reload + * the secure settings. Reloading the settings for the whole cluster assumes + * that the keystores for all nodes are protected with the same password; this + * method is allowed only when inter-node communications are encrypted. + * Alternatively, you can reload the secure settings on each node by locally + * accessing the API and passing the node-specific Elasticsearch keystore + * password. * * @param fn * a function that initializes a builder to create the @@ -278,7 +311,22 @@ public final CompletableFuture reloadSecureSetting } /** - * Reloads the keystore on nodes in the cluster. + * Reload the keystore on nodes in the cluster. + *

+ * Secure settings are stored in an on-disk keystore. Certain of these settings + * are reloadable. That is, you can change them on disk and reload them without + * restarting any nodes in the cluster. When you have updated reloadable secure + * settings in your keystore, you can use this API to reload those settings on + * each node. + *

+ * When the Elasticsearch keystore is password protected and not simply + * obfuscated, you must provide the password for the keystore when you reload + * the secure settings. Reloading the settings for the whole cluster assumes + * that the keystores for all nodes are protected with the same password; this + * method is allowed only when inter-node communications are encrypted. + * Alternatively, you can reload the secure settings on each node by locally + * accessing the API and passing the node-specific Elasticsearch keystore + * password. * * @see Documentation @@ -293,7 +341,8 @@ public CompletableFuture reloadSecureSettings() { // ----- Endpoint: nodes.stats /** - * Returns cluster nodes statistics. + * Get node statistics. Get statistics for nodes in a cluster. By default, all + * stats are returned. You can limit the returned information by using metrics. * * @see Documentation @@ -308,7 +357,8 @@ public CompletableFuture stats(NodesStatsRequest request) { } /** - * Returns cluster nodes statistics. + * Get node statistics. Get statistics for nodes in a cluster. By default, all + * stats are returned. You can limit the returned information by using metrics. * * @param fn * a function that initializes a builder to create the @@ -324,7 +374,8 @@ public final CompletableFuture stats( } /** - * Returns cluster nodes statistics. + * Get node statistics. Get statistics for nodes in a cluster. By default, all + * stats are returned. You can limit the returned information by using metrics. * * @see Documentation @@ -339,7 +390,7 @@ public CompletableFuture stats() { // ----- Endpoint: nodes.usage /** - * Returns information on the usage of features. + * Get feature usage information. * * @see Documentation @@ -354,7 +405,7 @@ public CompletableFuture usage(NodesUsageRequest request) { } /** - * Returns information on the usage of features. + * Get feature usage information. * * @param fn * a function that initializes a builder to create the @@ -370,7 +421,7 @@ public final CompletableFuture usage( } /** - * Returns information on the usage of features. + * Get feature usage information. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ElasticsearchNodesClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ElasticsearchNodesClient.java index a32df2954..d0481fe85 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ElasticsearchNodesClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ElasticsearchNodesClient.java @@ -68,8 +68,8 @@ public ElasticsearchNodesClient withTransportOptions(@Nullable TransportOptions // ----- Endpoint: nodes.clear_repositories_metering_archive /** - * You can use this API to clear the archived repositories metering information - * in the cluster. + * Clear the archived repositories metering. Clear the archived repositories + * metering information in the cluster. * * @see Documentation @@ -85,8 +85,8 @@ public ClearRepositoriesMeteringArchiveResponse clearRepositoriesMeteringArchive } /** - * You can use this API to clear the archived repositories metering information - * in the cluster. + * Clear the archived repositories metering. Clear the archived repositories + * metering information in the cluster. * * @param fn * a function that initializes a builder to create the @@ -106,12 +106,12 @@ public final ClearRepositoriesMeteringArchiveResponse clearRepositoriesMeteringA // ----- Endpoint: nodes.get_repositories_metering_info /** - * You can use the cluster repositories metering API to retrieve repositories - * metering information in a cluster. This API exposes monotonically - * non-decreasing counters and it’s expected that clients would durably store - * the information needed to compute aggregations over a period of time. - * Additionally, the information exposed by this API is volatile, meaning that - * it won’t be present after node restarts. + * Get cluster repositories metering. Get repositories metering information for + * a cluster. This API exposes monotonically non-decreasing counters and it is + * expected that clients would durably store the information needed to compute + * aggregations over a period of time. Additionally, the information exposed by + * this API is volatile, meaning that it will not be present after node + * restarts. * * @see Documentation @@ -127,12 +127,12 @@ public GetRepositoriesMeteringInfoResponse getRepositoriesMeteringInfo(GetReposi } /** - * You can use the cluster repositories metering API to retrieve repositories - * metering information in a cluster. This API exposes monotonically - * non-decreasing counters and it’s expected that clients would durably store - * the information needed to compute aggregations over a period of time. - * Additionally, the information exposed by this API is volatile, meaning that - * it won’t be present after node restarts. + * Get cluster repositories metering. Get repositories metering information for + * a cluster. This API exposes monotonically non-decreasing counters and it is + * expected that clients would durably store the information needed to compute + * aggregations over a period of time. Additionally, the information exposed by + * this API is volatile, meaning that it will not be present after node + * restarts. * * @param fn * a function that initializes a builder to create the @@ -151,9 +151,9 @@ public final GetRepositoriesMeteringInfoResponse getRepositoriesMeteringInfo( // ----- Endpoint: nodes.hot_threads /** - * This API yields a breakdown of the hot threads on each selected node in the - * cluster. The output is plain text with a breakdown of each node’s top hot - * threads. + * Get the hot threads for nodes. Get a breakdown of the hot threads on each + * selected node in the cluster. The output is plain text with a breakdown of + * the top hot threads for each node. * * @see Documentation @@ -168,9 +168,9 @@ public HotThreadsResponse hotThreads(HotThreadsRequest request) throws IOExcepti } /** - * This API yields a breakdown of the hot threads on each selected node in the - * cluster. The output is plain text with a breakdown of each node’s top hot - * threads. + * Get the hot threads for nodes. Get a breakdown of the hot threads on each + * selected node in the cluster. The output is plain text with a breakdown of + * the top hot threads for each node. * * @param fn * a function that initializes a builder to create the @@ -186,9 +186,9 @@ public final HotThreadsResponse hotThreads(FunctionDocumentation @@ -203,7 +203,8 @@ public HotThreadsResponse hotThreads() throws IOException, ElasticsearchExceptio // ----- Endpoint: nodes.info /** - * Returns cluster nodes information. + * Get node information. By default, the API returns all attributes and core + * settings for cluster nodes. * * @see Documentation @@ -218,7 +219,8 @@ public NodesInfoResponse info(NodesInfoRequest request) throws IOException, Elas } /** - * Returns cluster nodes information. + * Get node information. By default, the API returns all attributes and core + * settings for cluster nodes. * * @param fn * a function that initializes a builder to create the @@ -234,7 +236,8 @@ public final NodesInfoResponse info(FunctionDocumentation @@ -249,7 +252,22 @@ public NodesInfoResponse info() throws IOException, ElasticsearchException { // ----- Endpoint: nodes.reload_secure_settings /** - * Reloads the keystore on nodes in the cluster. + * Reload the keystore on nodes in the cluster. + *

+ * Secure settings are stored in an on-disk keystore. Certain of these settings + * are reloadable. That is, you can change them on disk and reload them without + * restarting any nodes in the cluster. When you have updated reloadable secure + * settings in your keystore, you can use this API to reload those settings on + * each node. + *

+ * When the Elasticsearch keystore is password protected and not simply + * obfuscated, you must provide the password for the keystore when you reload + * the secure settings. Reloading the settings for the whole cluster assumes + * that the keystores for all nodes are protected with the same password; this + * method is allowed only when inter-node communications are encrypted. + * Alternatively, you can reload the secure settings on each node by locally + * accessing the API and passing the node-specific Elasticsearch keystore + * password. * * @see Documentation @@ -265,7 +283,22 @@ public ReloadSecureSettingsResponse reloadSecureSettings(ReloadSecureSettingsReq } /** - * Reloads the keystore on nodes in the cluster. + * Reload the keystore on nodes in the cluster. + *

+ * Secure settings are stored in an on-disk keystore. Certain of these settings + * are reloadable. That is, you can change them on disk and reload them without + * restarting any nodes in the cluster. When you have updated reloadable secure + * settings in your keystore, you can use this API to reload those settings on + * each node. + *

+ * When the Elasticsearch keystore is password protected and not simply + * obfuscated, you must provide the password for the keystore when you reload + * the secure settings. Reloading the settings for the whole cluster assumes + * that the keystores for all nodes are protected with the same password; this + * method is allowed only when inter-node communications are encrypted. + * Alternatively, you can reload the secure settings on each node by locally + * accessing the API and passing the node-specific Elasticsearch keystore + * password. * * @param fn * a function that initializes a builder to create the @@ -282,7 +315,22 @@ public final ReloadSecureSettingsResponse reloadSecureSettings( } /** - * Reloads the keystore on nodes in the cluster. + * Reload the keystore on nodes in the cluster. + *

+ * Secure settings are stored in an on-disk keystore. Certain of these settings + * are reloadable. That is, you can change them on disk and reload them without + * restarting any nodes in the cluster. When you have updated reloadable secure + * settings in your keystore, you can use this API to reload those settings on + * each node. + *

+ * When the Elasticsearch keystore is password protected and not simply + * obfuscated, you must provide the password for the keystore when you reload + * the secure settings. Reloading the settings for the whole cluster assumes + * that the keystores for all nodes are protected with the same password; this + * method is allowed only when inter-node communications are encrypted. + * Alternatively, you can reload the secure settings on each node by locally + * accessing the API and passing the node-specific Elasticsearch keystore + * password. * * @see Documentation @@ -297,7 +345,8 @@ public ReloadSecureSettingsResponse reloadSecureSettings() throws IOException, E // ----- Endpoint: nodes.stats /** - * Returns cluster nodes statistics. + * Get node statistics. Get statistics for nodes in a cluster. By default, all + * stats are returned. You can limit the returned information by using metrics. * * @see Documentation @@ -312,7 +361,8 @@ public NodesStatsResponse stats(NodesStatsRequest request) throws IOException, E } /** - * Returns cluster nodes statistics. + * Get node statistics. Get statistics for nodes in a cluster. By default, all + * stats are returned. You can limit the returned information by using metrics. * * @param fn * a function that initializes a builder to create the @@ -328,7 +378,8 @@ public final NodesStatsResponse stats(FunctionDocumentation @@ -343,7 +394,7 @@ public NodesStatsResponse stats() throws IOException, ElasticsearchException { // ----- Endpoint: nodes.usage /** - * Returns information on the usage of features. + * Get feature usage information. * * @see Documentation @@ -358,7 +409,7 @@ public NodesUsageResponse usage(NodesUsageRequest request) throws IOException, E } /** - * Returns information on the usage of features. + * Get feature usage information. * * @param fn * a function that initializes a builder to create the @@ -374,7 +425,7 @@ public final NodesUsageResponse usage(FunctionDocumentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/GetRepositoriesMeteringInfoRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/GetRepositoriesMeteringInfoRequest.java index 73977ee82..c5a7b5c38 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/GetRepositoriesMeteringInfoRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/GetRepositoriesMeteringInfoRequest.java @@ -58,12 +58,12 @@ // typedef: nodes.get_repositories_metering_info.Request /** - * You can use the cluster repositories metering API to retrieve repositories - * metering information in a cluster. This API exposes monotonically - * non-decreasing counters and it’s expected that clients would durably store - * the information needed to compute aggregations over a period of time. - * Additionally, the information exposed by this API is volatile, meaning that - * it won’t be present after node restarts. + * Get cluster repositories metering. Get repositories metering information for + * a cluster. This API exposes monotonically non-decreasing counters and it is + * expected that clients would durably store the information needed to compute + * aggregations over a period of time. Additionally, the information exposed by + * this API is volatile, meaning that it will not be present after node + * restarts. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/HotThreadsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/HotThreadsRequest.java index f73e22cf0..56a4e66b7 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/HotThreadsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/HotThreadsRequest.java @@ -61,9 +61,9 @@ // typedef: nodes.hot_threads.Request /** - * This API yields a breakdown of the hot threads on each selected node in the - * cluster. The output is plain text with a breakdown of each node’s top hot - * threads. + * Get the hot threads for nodes. Get a breakdown of the hot threads on each + * selected node in the cluster. The output is plain text with a breakdown of + * the top hot threads for each node. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/NodesInfoRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/NodesInfoRequest.java index c703eafdc..b330f26ae 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/NodesInfoRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/NodesInfoRequest.java @@ -59,7 +59,8 @@ // typedef: nodes.info.Request /** - * Returns cluster nodes information. + * Get node information. By default, the API returns all attributes and core + * settings for cluster nodes. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/NodesStatsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/NodesStatsRequest.java index 810133836..4045f9698 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/NodesStatsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/NodesStatsRequest.java @@ -60,7 +60,8 @@ // typedef: nodes.stats.Request /** - * Returns cluster nodes statistics. + * Get node statistics. Get statistics for nodes in a cluster. By default, all + * stats are returned. You can limit the returned information by using metrics. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/NodesUsageRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/NodesUsageRequest.java index e422d85f2..8b0127991 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/NodesUsageRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/NodesUsageRequest.java @@ -58,7 +58,7 @@ // typedef: nodes.usage.Request /** - * Returns information on the usage of features. + * Get feature usage information. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ReloadSecureSettingsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ReloadSecureSettingsRequest.java index b7cfd67fa..b97d633cc 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ReloadSecureSettingsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/nodes/ReloadSecureSettingsRequest.java @@ -60,7 +60,22 @@ // typedef: nodes.reload_secure_settings.Request /** - * Reloads the keystore on nodes in the cluster. + * Reload the keystore on nodes in the cluster. + *

+ * Secure settings are stored in an on-disk keystore. Certain of these settings + * are reloadable. That is, you can change them on disk and reload them without + * restarting any nodes in the cluster. When you have updated reloadable secure + * settings in your keystore, you can use this API to reload those settings on + * each node. + *

+ * When the Elasticsearch keystore is password protected and not simply + * obfuscated, you must provide the password for the keystore when you reload + * the secure settings. Reloading the settings for the whole cluster assumes + * that the keystores for all nodes are protected with the same password; this + * method is allowed only when inter-node communications are encrypted. + * Alternatively, you can reload the secure settings on each node by locally + * accessing the API and passing the node-specific Elasticsearch keystore + * password. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/DeleteJobRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/DeleteJobRequest.java index 8abae242c..bf077086d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/DeleteJobRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/DeleteJobRequest.java @@ -56,8 +56,36 @@ // typedef: rollup.delete_job.Request /** - * Deletes an existing rollup job. + * Delete a rollup job. + *

+ * A job must be stopped before it can be deleted. If you attempt to delete a + * started job, an error occurs. Similarly, if you attempt to delete a + * nonexistent job, an exception occurs. + *

+ * IMPORTANT: When you delete a job, you remove only the process that is + * actively monitoring and rolling up data. The API does not delete any + * previously rolled up data. This is by design; a user may wish to roll up a + * static data set. Because the data set is static, after it has been fully + * rolled up there is no need to keep the indexing rollup job around (as there + * will be no new data). Thus the job can be deleted, leaving behind the rolled + * up data for analysis. If you wish to also remove the rollup data and the + * rollup index contains the data for only a single job, you can delete the + * whole rollup index. If the rollup index stores data from several jobs, you + * must issue a delete-by-query that targets the rollup job's identifier in the + * rollup index. For example: * + *

+ * POST my_rollup_index/_delete_by_query
+ * {
+ *   "query": {
+ *     "term": {
+ *       "_rollup.id": "the_rollup_job_id"
+ *     }
+ *   }
+ * }
+ * 
+ * 
+ * * @see API * specification */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/ElasticsearchRollupAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/ElasticsearchRollupAsyncClient.java index f564b55ba..1c5b12922 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/ElasticsearchRollupAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/ElasticsearchRollupAsyncClient.java @@ -70,8 +70,36 @@ public ElasticsearchRollupAsyncClient withTransportOptions(@Nullable TransportOp // ----- Endpoint: rollup.delete_job /** - * Deletes an existing rollup job. + * Delete a rollup job. + *

+ * A job must be stopped before it can be deleted. If you attempt to delete a + * started job, an error occurs. Similarly, if you attempt to delete a + * nonexistent job, an exception occurs. + *

+ * IMPORTANT: When you delete a job, you remove only the process that is + * actively monitoring and rolling up data. The API does not delete any + * previously rolled up data. This is by design; a user may wish to roll up a + * static data set. Because the data set is static, after it has been fully + * rolled up there is no need to keep the indexing rollup job around (as there + * will be no new data). Thus the job can be deleted, leaving behind the rolled + * up data for analysis. If you wish to also remove the rollup data and the + * rollup index contains the data for only a single job, you can delete the + * whole rollup index. If the rollup index stores data from several jobs, you + * must issue a delete-by-query that targets the rollup job's identifier in the + * rollup index. For example: * + *

+	 * POST my_rollup_index/_delete_by_query
+	 * {
+	 *   "query": {
+	 *     "term": {
+	 *       "_rollup.id": "the_rollup_job_id"
+	 *     }
+	 *   }
+	 * }
+	 * 
+	 * 
+ * * @see Documentation * on elastic.co @@ -85,8 +113,36 @@ public CompletableFuture deleteJob(DeleteJobRequest request) } /** - * Deletes an existing rollup job. + * Delete a rollup job. + *

+ * A job must be stopped before it can be deleted. If you attempt to delete a + * started job, an error occurs. Similarly, if you attempt to delete a + * nonexistent job, an exception occurs. + *

+ * IMPORTANT: When you delete a job, you remove only the process that is + * actively monitoring and rolling up data. The API does not delete any + * previously rolled up data. This is by design; a user may wish to roll up a + * static data set. Because the data set is static, after it has been fully + * rolled up there is no need to keep the indexing rollup job around (as there + * will be no new data). Thus the job can be deleted, leaving behind the rolled + * up data for analysis. If you wish to also remove the rollup data and the + * rollup index contains the data for only a single job, you can delete the + * whole rollup index. If the rollup index stores data from several jobs, you + * must issue a delete-by-query that targets the rollup job's identifier in the + * rollup index. For example: * + *

+	 * POST my_rollup_index/_delete_by_query
+	 * {
+	 *   "query": {
+	 *     "term": {
+	 *       "_rollup.id": "the_rollup_job_id"
+	 *     }
+	 *   }
+	 * }
+	 * 
+	 * 
+ * * @param fn * a function that initializes a builder to create the * {@link DeleteJobRequest} @@ -103,7 +159,13 @@ public final CompletableFuture deleteJob( // ----- Endpoint: rollup.get_jobs /** - * Retrieves the configuration, stats, and status of rollup jobs. + * Get rollup job information. Get the configuration, stats, and status of + * rollup jobs. + *

+ * NOTE: This API returns only active (both STARTED and + * STOPPED) jobs. If a job was created, ran for a while, then was + * deleted, the API does not return any details about it. For details about a + * historical rollup job, the rollup capabilities API may be more useful. * * @see Documentation @@ -118,7 +180,13 @@ public CompletableFuture getJobs(GetJobsRequest request) { } /** - * Retrieves the configuration, stats, and status of rollup jobs. + * Get rollup job information. Get the configuration, stats, and status of + * rollup jobs. + *

+ * NOTE: This API returns only active (both STARTED and + * STOPPED) jobs. If a job was created, ran for a while, then was + * deleted, the API does not return any details about it. For details about a + * historical rollup job, the rollup capabilities API may be more useful. * * @param fn * a function that initializes a builder to create the @@ -134,7 +202,13 @@ public final CompletableFuture getJobs( } /** - * Retrieves the configuration, stats, and status of rollup jobs. + * Get rollup job information. Get the configuration, stats, and status of + * rollup jobs. + *

+ * NOTE: This API returns only active (both STARTED and + * STOPPED) jobs. If a job was created, ran for a while, then was + * deleted, the API does not return any details about it. For details about a + * historical rollup job, the rollup capabilities API may be more useful. * * @see Documentation @@ -149,9 +223,21 @@ public CompletableFuture getJobs() { // ----- Endpoint: rollup.get_rollup_caps /** - * Returns the capabilities of any rollup jobs that have been configured for a - * specific index or index pattern. - * + * Get the rollup job capabilities. Get the capabilities of any rollup jobs that + * have been configured for a specific index or index pattern. + *

+ * This API is useful because a rollup job is often configured to rollup only a + * subset of fields from the source index. Furthermore, only certain + * aggregations can be configured for various fields, leading to a limited + * subset of functionality depending on that configuration. This API enables you + * to inspect an index and determine: + *

    + *
  1. Does this index have associated rollup data somewhere in the + * cluster?
  2. + *
  3. If yes to the first question, what fields were rolled up, what + * aggregations can be performed, and where does the data live?
  4. + *
+ * * @see Documentation * on elastic.co @@ -165,9 +251,21 @@ public CompletableFuture getRollupCaps(GetRollupCapsReque } /** - * Returns the capabilities of any rollup jobs that have been configured for a - * specific index or index pattern. - * + * Get the rollup job capabilities. Get the capabilities of any rollup jobs that + * have been configured for a specific index or index pattern. + *

+ * This API is useful because a rollup job is often configured to rollup only a + * subset of fields from the source index. Furthermore, only certain + * aggregations can be configured for various fields, leading to a limited + * subset of functionality depending on that configuration. This API enables you + * to inspect an index and determine: + *

    + *
  1. Does this index have associated rollup data somewhere in the + * cluster?
  2. + *
  3. If yes to the first question, what fields were rolled up, what + * aggregations can be performed, and where does the data live?
  4. + *
+ * * @param fn * a function that initializes a builder to create the * {@link GetRollupCapsRequest} @@ -182,9 +280,21 @@ public final CompletableFuture getRollupCaps( } /** - * Returns the capabilities of any rollup jobs that have been configured for a - * specific index or index pattern. - * + * Get the rollup job capabilities. Get the capabilities of any rollup jobs that + * have been configured for a specific index or index pattern. + *

+ * This API is useful because a rollup job is often configured to rollup only a + * subset of fields from the source index. Furthermore, only certain + * aggregations can be configured for various fields, leading to a limited + * subset of functionality depending on that configuration. This API enables you + * to inspect an index and determine: + *

    + *
  1. Does this index have associated rollup data somewhere in the + * cluster?
  2. + *
  3. If yes to the first question, what fields were rolled up, what + * aggregations can be performed, and where does the data live?
  4. + *
+ * * @see Documentation * on elastic.co @@ -198,9 +308,17 @@ public CompletableFuture getRollupCaps() { // ----- Endpoint: rollup.get_rollup_index_caps /** - * Returns the rollup capabilities of all jobs inside of a rollup index (for - * example, the index where rollup data is stored). - * + * Get the rollup index capabilities. Get the rollup capabilities of all jobs + * inside of a rollup index. A single rollup index may store the data for + * multiple rollup jobs and may have a variety of capabilities depending on + * those jobs. This API enables you to determine: + *
    + *
  • What jobs are stored in an index (or indices specified via a + * pattern)?
  • + *
  • What target indices were rolled up, what fields were used in those + * rollups, and what aggregations can be performed on each job?
  • + *
+ * * @see Documentation * on elastic.co @@ -214,9 +332,17 @@ public CompletableFuture getRollupIndexCaps(GetRollu } /** - * Returns the rollup capabilities of all jobs inside of a rollup index (for - * example, the index where rollup data is stored). - * + * Get the rollup index capabilities. Get the rollup capabilities of all jobs + * inside of a rollup index. A single rollup index may store the data for + * multiple rollup jobs and may have a variety of capabilities depending on + * those jobs. This API enables you to determine: + *
    + *
  • What jobs are stored in an index (or indices specified via a + * pattern)?
  • + *
  • What target indices were rolled up, what fields were used in those + * rollups, and what aggregations can be performed on each job?
  • + *
+ * * @param fn * a function that initializes a builder to create the * {@link GetRollupIndexCapsRequest} @@ -233,7 +359,23 @@ public final CompletableFuture getRollupIndexCaps( // ----- Endpoint: rollup.put_job /** - * Creates a rollup job. + * Create a rollup job. + *

+ * WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will + * fail with a message about the deprecation and planned removal of rollup + * features. A cluster needs to contain either a rollup job or a rollup index in + * order for this API to be allowed to run. + *

+ * The rollup job configuration contains all the details about how the job + * should run, when it indexes documents, and what future queries will be able + * to run against the rollup index. + *

+ * There are three main sections to the job configuration: the logistical + * details about the job (for example, the cron schedule), the fields that are + * used for grouping, and what metrics to collect for each group. + *

+ * Jobs are created in a STOPPED state. You can start them with the + * start rollup jobs API. * * @see Documentation @@ -248,7 +390,23 @@ public CompletableFuture putJob(PutJobRequest request) { } /** - * Creates a rollup job. + * Create a rollup job. + *

+ * WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will + * fail with a message about the deprecation and planned removal of rollup + * features. A cluster needs to contain either a rollup job or a rollup index in + * order for this API to be allowed to run. + *

+ * The rollup job configuration contains all the details about how the job + * should run, when it indexes documents, and what future queries will be able + * to run against the rollup index. + *

+ * There are three main sections to the job configuration: the logistical + * details about the job (for example, the cron schedule), the fields that are + * used for grouping, and what metrics to collect for each group. + *

+ * Jobs are created in a STOPPED state. You can start them with the + * start rollup jobs API. * * @param fn * a function that initializes a builder to create the @@ -266,7 +424,11 @@ public final CompletableFuture putJob( // ----- Endpoint: rollup.rollup_search /** - * Enables searching rolled-up data using the standard Query DSL. + * Search rolled-up data. The rollup search endpoint is needed because, + * internally, rolled-up documents utilize a different document structure than + * the original data. It rewrites standard Query DSL into a format that matches + * the rollup documents then takes the response and rewrites it back to what a + * client would expect given the original query. * * @see Documentation @@ -285,7 +447,11 @@ public CompletableFuture> rollupSear } /** - * Enables searching rolled-up data using the standard Query DSL. + * Search rolled-up data. The rollup search endpoint is needed because, + * internally, rolled-up documents utilize a different document structure than + * the original data. It rewrites standard Query DSL into a format that matches + * the rollup documents then takes the response and rewrites it back to what a + * client would expect given the original query. * * @param fn * a function that initializes a builder to create the @@ -302,7 +468,11 @@ public final CompletableFuture> roll } /** - * Enables searching rolled-up data using the standard Query DSL. + * Search rolled-up data. The rollup search endpoint is needed because, + * internally, rolled-up documents utilize a different document structure than + * the original data. It rewrites standard Query DSL into a format that matches + * the rollup documents then takes the response and rewrites it back to what a + * client would expect given the original query. * * @see Documentation @@ -321,7 +491,11 @@ public CompletableFuture> rollupSear } /** - * Enables searching rolled-up data using the standard Query DSL. + * Search rolled-up data. The rollup search endpoint is needed because, + * internally, rolled-up documents utilize a different document structure than + * the original data. It rewrites standard Query DSL into a format that matches + * the rollup documents then takes the response and rewrites it back to what a + * client would expect given the original query. * * @param fn * a function that initializes a builder to create the @@ -339,7 +513,9 @@ public final CompletableFuture> roll // ----- Endpoint: rollup.start_job /** - * Starts an existing, stopped rollup job. + * Start rollup jobs. If you try to start a job that does not exist, an + * exception occurs. If you try to start a job that is already started, nothing + * happens. * * @see Documentation @@ -354,7 +530,9 @@ public CompletableFuture startJob(StartJobRequest request) { } /** - * Starts an existing, stopped rollup job. + * Start rollup jobs. If you try to start a job that does not exist, an + * exception occurs. If you try to start a job that is already started, nothing + * happens. * * @param fn * a function that initializes a builder to create the @@ -372,7 +550,8 @@ public final CompletableFuture startJob( // ----- Endpoint: rollup.stop_job /** - * Stops an existing, started rollup job. + * Stop rollup jobs. If you try to stop a job that does not exist, an exception + * occurs. If you try to stop a job that is already stopped, nothing happens. * * @see Documentation @@ -387,7 +566,8 @@ public CompletableFuture stopJob(StopJobRequest request) { } /** - * Stops an existing, started rollup job. + * Stop rollup jobs. If you try to stop a job that does not exist, an exception + * occurs. If you try to stop a job that is already stopped, nothing happens. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/ElasticsearchRollupClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/ElasticsearchRollupClient.java index f8b4584cc..021e3f572 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/ElasticsearchRollupClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/ElasticsearchRollupClient.java @@ -70,8 +70,36 @@ public ElasticsearchRollupClient withTransportOptions(@Nullable TransportOptions // ----- Endpoint: rollup.delete_job /** - * Deletes an existing rollup job. + * Delete a rollup job. + *

+ * A job must be stopped before it can be deleted. If you attempt to delete a + * started job, an error occurs. Similarly, if you attempt to delete a + * nonexistent job, an exception occurs. + *

+ * IMPORTANT: When you delete a job, you remove only the process that is + * actively monitoring and rolling up data. The API does not delete any + * previously rolled up data. This is by design; a user may wish to roll up a + * static data set. Because the data set is static, after it has been fully + * rolled up there is no need to keep the indexing rollup job around (as there + * will be no new data). Thus the job can be deleted, leaving behind the rolled + * up data for analysis. If you wish to also remove the rollup data and the + * rollup index contains the data for only a single job, you can delete the + * whole rollup index. If the rollup index stores data from several jobs, you + * must issue a delete-by-query that targets the rollup job's identifier in the + * rollup index. For example: * + *

+	 * POST my_rollup_index/_delete_by_query
+	 * {
+	 *   "query": {
+	 *     "term": {
+	 *       "_rollup.id": "the_rollup_job_id"
+	 *     }
+	 *   }
+	 * }
+	 * 
+	 * 
+ * * @see Documentation * on elastic.co @@ -85,8 +113,36 @@ public DeleteJobResponse deleteJob(DeleteJobRequest request) throws IOException, } /** - * Deletes an existing rollup job. + * Delete a rollup job. + *

+ * A job must be stopped before it can be deleted. If you attempt to delete a + * started job, an error occurs. Similarly, if you attempt to delete a + * nonexistent job, an exception occurs. + *

+ * IMPORTANT: When you delete a job, you remove only the process that is + * actively monitoring and rolling up data. The API does not delete any + * previously rolled up data. This is by design; a user may wish to roll up a + * static data set. Because the data set is static, after it has been fully + * rolled up there is no need to keep the indexing rollup job around (as there + * will be no new data). Thus the job can be deleted, leaving behind the rolled + * up data for analysis. If you wish to also remove the rollup data and the + * rollup index contains the data for only a single job, you can delete the + * whole rollup index. If the rollup index stores data from several jobs, you + * must issue a delete-by-query that targets the rollup job's identifier in the + * rollup index. For example: * + *

+	 * POST my_rollup_index/_delete_by_query
+	 * {
+	 *   "query": {
+	 *     "term": {
+	 *       "_rollup.id": "the_rollup_job_id"
+	 *     }
+	 *   }
+	 * }
+	 * 
+	 * 
+ * * @param fn * a function that initializes a builder to create the * {@link DeleteJobRequest} @@ -103,7 +159,13 @@ public final DeleteJobResponse deleteJob(Function + * NOTE: This API returns only active (both STARTED and + * STOPPED) jobs. If a job was created, ran for a while, then was + * deleted, the API does not return any details about it. For details about a + * historical rollup job, the rollup capabilities API may be more useful. * * @see Documentation @@ -118,7 +180,13 @@ public GetJobsResponse getJobs(GetJobsRequest request) throws IOException, Elast } /** - * Retrieves the configuration, stats, and status of rollup jobs. + * Get rollup job information. Get the configuration, stats, and status of + * rollup jobs. + *

+ * NOTE: This API returns only active (both STARTED and + * STOPPED) jobs. If a job was created, ran for a while, then was + * deleted, the API does not return any details about it. For details about a + * historical rollup job, the rollup capabilities API may be more useful. * * @param fn * a function that initializes a builder to create the @@ -134,7 +202,13 @@ public final GetJobsResponse getJobs(Function + * NOTE: This API returns only active (both STARTED and + * STOPPED) jobs. If a job was created, ran for a while, then was + * deleted, the API does not return any details about it. For details about a + * historical rollup job, the rollup capabilities API may be more useful. * * @see Documentation @@ -149,9 +223,21 @@ public GetJobsResponse getJobs() throws IOException, ElasticsearchException { // ----- Endpoint: rollup.get_rollup_caps /** - * Returns the capabilities of any rollup jobs that have been configured for a - * specific index or index pattern. - * + * Get the rollup job capabilities. Get the capabilities of any rollup jobs that + * have been configured for a specific index or index pattern. + *

+ * This API is useful because a rollup job is often configured to rollup only a + * subset of fields from the source index. Furthermore, only certain + * aggregations can be configured for various fields, leading to a limited + * subset of functionality depending on that configuration. This API enables you + * to inspect an index and determine: + *

    + *
  1. Does this index have associated rollup data somewhere in the + * cluster?
  2. + *
  3. If yes to the first question, what fields were rolled up, what + * aggregations can be performed, and where does the data live?
  4. + *
+ * * @see Documentation * on elastic.co @@ -166,9 +252,21 @@ public GetRollupCapsResponse getRollupCaps(GetRollupCapsRequest request) } /** - * Returns the capabilities of any rollup jobs that have been configured for a - * specific index or index pattern. - * + * Get the rollup job capabilities. Get the capabilities of any rollup jobs that + * have been configured for a specific index or index pattern. + *

+ * This API is useful because a rollup job is often configured to rollup only a + * subset of fields from the source index. Furthermore, only certain + * aggregations can be configured for various fields, leading to a limited + * subset of functionality depending on that configuration. This API enables you + * to inspect an index and determine: + *

    + *
  1. Does this index have associated rollup data somewhere in the + * cluster?
  2. + *
  3. If yes to the first question, what fields were rolled up, what + * aggregations can be performed, and where does the data live?
  4. + *
+ * * @param fn * a function that initializes a builder to create the * {@link GetRollupCapsRequest} @@ -184,9 +282,21 @@ public final GetRollupCapsResponse getRollupCaps( } /** - * Returns the capabilities of any rollup jobs that have been configured for a - * specific index or index pattern. - * + * Get the rollup job capabilities. Get the capabilities of any rollup jobs that + * have been configured for a specific index or index pattern. + *

+ * This API is useful because a rollup job is often configured to rollup only a + * subset of fields from the source index. Furthermore, only certain + * aggregations can be configured for various fields, leading to a limited + * subset of functionality depending on that configuration. This API enables you + * to inspect an index and determine: + *

    + *
  1. Does this index have associated rollup data somewhere in the + * cluster?
  2. + *
  3. If yes to the first question, what fields were rolled up, what + * aggregations can be performed, and where does the data live?
  4. + *
+ * * @see Documentation * on elastic.co @@ -200,9 +310,17 @@ public GetRollupCapsResponse getRollupCaps() throws IOException, ElasticsearchEx // ----- Endpoint: rollup.get_rollup_index_caps /** - * Returns the rollup capabilities of all jobs inside of a rollup index (for - * example, the index where rollup data is stored). - * + * Get the rollup index capabilities. Get the rollup capabilities of all jobs + * inside of a rollup index. A single rollup index may store the data for + * multiple rollup jobs and may have a variety of capabilities depending on + * those jobs. This API enables you to determine: + *
    + *
  • What jobs are stored in an index (or indices specified via a + * pattern)?
  • + *
  • What target indices were rolled up, what fields were used in those + * rollups, and what aggregations can be performed on each job?
  • + *
+ * * @see Documentation * on elastic.co @@ -217,9 +335,17 @@ public GetRollupIndexCapsResponse getRollupIndexCaps(GetRollupIndexCapsRequest r } /** - * Returns the rollup capabilities of all jobs inside of a rollup index (for - * example, the index where rollup data is stored). - * + * Get the rollup index capabilities. Get the rollup capabilities of all jobs + * inside of a rollup index. A single rollup index may store the data for + * multiple rollup jobs and may have a variety of capabilities depending on + * those jobs. This API enables you to determine: + *
    + *
  • What jobs are stored in an index (or indices specified via a + * pattern)?
  • + *
  • What target indices were rolled up, what fields were used in those + * rollups, and what aggregations can be performed on each job?
  • + *
+ * * @param fn * a function that initializes a builder to create the * {@link GetRollupIndexCapsRequest} @@ -237,7 +363,23 @@ public final GetRollupIndexCapsResponse getRollupIndexCaps( // ----- Endpoint: rollup.put_job /** - * Creates a rollup job. + * Create a rollup job. + *

+ * WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will + * fail with a message about the deprecation and planned removal of rollup + * features. A cluster needs to contain either a rollup job or a rollup index in + * order for this API to be allowed to run. + *

+ * The rollup job configuration contains all the details about how the job + * should run, when it indexes documents, and what future queries will be able + * to run against the rollup index. + *

+ * There are three main sections to the job configuration: the logistical + * details about the job (for example, the cron schedule), the fields that are + * used for grouping, and what metrics to collect for each group. + *

+ * Jobs are created in a STOPPED state. You can start them with the + * start rollup jobs API. * * @see Documentation @@ -252,7 +394,23 @@ public PutJobResponse putJob(PutJobRequest request) throws IOException, Elastics } /** - * Creates a rollup job. + * Create a rollup job. + *

+ * WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will + * fail with a message about the deprecation and planned removal of rollup + * features. A cluster needs to contain either a rollup job or a rollup index in + * order for this API to be allowed to run. + *

+ * The rollup job configuration contains all the details about how the job + * should run, when it indexes documents, and what future queries will be able + * to run against the rollup index. + *

+ * There are three main sections to the job configuration: the logistical + * details about the job (for example, the cron schedule), the fields that are + * used for grouping, and what metrics to collect for each group. + *

+ * Jobs are created in a STOPPED state. You can start them with the + * start rollup jobs API. * * @param fn * a function that initializes a builder to create the @@ -270,7 +428,11 @@ public final PutJobResponse putJob(FunctionDocumentation @@ -289,7 +451,11 @@ public RollupSearchResponse rollupSearch(RollupSearchRequ } /** - * Enables searching rolled-up data using the standard Query DSL. + * Search rolled-up data. The rollup search endpoint is needed because, + * internally, rolled-up documents utilize a different document structure than + * the original data. It rewrites standard Query DSL into a format that matches + * the rollup documents then takes the response and rewrites it back to what a + * client would expect given the original query. * * @param fn * a function that initializes a builder to create the @@ -306,7 +472,11 @@ public final RollupSearchResponse rollupSearch( } /** - * Enables searching rolled-up data using the standard Query DSL. + * Search rolled-up data. The rollup search endpoint is needed because, + * internally, rolled-up documents utilize a different document structure than + * the original data. It rewrites standard Query DSL into a format that matches + * the rollup documents then takes the response and rewrites it back to what a + * client would expect given the original query. * * @see Documentation @@ -325,7 +495,11 @@ public RollupSearchResponse rollupSearch(RollupSearchRequ } /** - * Enables searching rolled-up data using the standard Query DSL. + * Search rolled-up data. The rollup search endpoint is needed because, + * internally, rolled-up documents utilize a different document structure than + * the original data. It rewrites standard Query DSL into a format that matches + * the rollup documents then takes the response and rewrites it back to what a + * client would expect given the original query. * * @param fn * a function that initializes a builder to create the @@ -344,7 +518,9 @@ public final RollupSearchResponse rollupSearch( // ----- Endpoint: rollup.start_job /** - * Starts an existing, stopped rollup job. + * Start rollup jobs. If you try to start a job that does not exist, an + * exception occurs. If you try to start a job that is already started, nothing + * happens. * * @see Documentation @@ -359,7 +535,9 @@ public StartJobResponse startJob(StartJobRequest request) throws IOException, El } /** - * Starts an existing, stopped rollup job. + * Start rollup jobs. If you try to start a job that does not exist, an + * exception occurs. If you try to start a job that is already started, nothing + * happens. * * @param fn * a function that initializes a builder to create the @@ -377,7 +555,8 @@ public final StartJobResponse startJob(FunctionDocumentation @@ -392,7 +571,8 @@ public StopJobResponse stopJob(StopJobRequest request) throws IOException, Elast } /** - * Stops an existing, started rollup job. + * Stop rollup jobs. If you try to stop a job that does not exist, an exception + * occurs. If you try to stop a job that is already stopped, nothing happens. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetJobsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetJobsRequest.java index 29f9fe583..79ed80012 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetJobsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetJobsRequest.java @@ -55,7 +55,13 @@ // typedef: rollup.get_jobs.Request /** - * Retrieves the configuration, stats, and status of rollup jobs. + * Get rollup job information. Get the configuration, stats, and status of + * rollup jobs. + *

+ * NOTE: This API returns only active (both STARTED and + * STOPPED) jobs. If a job was created, ran for a while, then was + * deleted, the API does not return any details about it. For details about a + * historical rollup job, the rollup capabilities API may be more useful. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetRollupCapsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetRollupCapsRequest.java index 7f1a32ac5..43f2c701d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetRollupCapsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetRollupCapsRequest.java @@ -55,9 +55,21 @@ // typedef: rollup.get_rollup_caps.Request /** - * Returns the capabilities of any rollup jobs that have been configured for a - * specific index or index pattern. - * + * Get the rollup job capabilities. Get the capabilities of any rollup jobs that + * have been configured for a specific index or index pattern. + *

+ * This API is useful because a rollup job is often configured to rollup only a + * subset of fields from the source index. Furthermore, only certain + * aggregations can be configured for various fields, leading to a limited + * subset of functionality depending on that configuration. This API enables you + * to inspect an index and determine: + *

    + *
  1. Does this index have associated rollup data somewhere in the + * cluster?
  2. + *
  3. If yes to the first question, what fields were rolled up, what + * aggregations can be performed, and where does the data live?
  4. + *
+ * * @see API * specification */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetRollupIndexCapsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetRollupIndexCapsRequest.java index d4f711672..72aef2df2 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetRollupIndexCapsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/GetRollupIndexCapsRequest.java @@ -58,9 +58,17 @@ // typedef: rollup.get_rollup_index_caps.Request /** - * Returns the rollup capabilities of all jobs inside of a rollup index (for - * example, the index where rollup data is stored). - * + * Get the rollup index capabilities. Get the rollup capabilities of all jobs + * inside of a rollup index. A single rollup index may store the data for + * multiple rollup jobs and may have a variety of capabilities depending on + * those jobs. This API enables you to determine: + *
    + *
  • What jobs are stored in an index (or indices specified via a + * pattern)?
  • + *
  • What target indices were rolled up, what fields were used in those + * rollups, and what aggregations can be performed on each job?
  • + *
+ * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/PutJobRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/PutJobRequest.java index f556d0ce9..5e059d002 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/PutJobRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/PutJobRequest.java @@ -61,7 +61,23 @@ // typedef: rollup.put_job.Request /** - * Creates a rollup job. + * Create a rollup job. + *

+ * WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will + * fail with a message about the deprecation and planned removal of rollup + * features. A cluster needs to contain either a rollup job or a rollup index in + * order for this API to be allowed to run. + *

+ * The rollup job configuration contains all the details about how the job + * should run, when it indexes documents, and what future queries will be able + * to run against the rollup index. + *

+ * There are three main sections to the job configuration: the logistical + * details about the job (for example, the cron schedule), the fields that are + * used for grouping, and what metrics to collect for each group. + *

+ * Jobs are created in a STOPPED state. You can start them with the + * start rollup jobs API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/RollupSearchRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/RollupSearchRequest.java index 0eab9af1c..0d9cc55f8 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/RollupSearchRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/RollupSearchRequest.java @@ -63,7 +63,11 @@ // typedef: rollup.rollup_search.Request /** - * Enables searching rolled-up data using the standard Query DSL. + * Search rolled-up data. The rollup search endpoint is needed because, + * internally, rolled-up documents utilize a different document structure than + * the original data. It rewrites standard Query DSL into a format that matches + * the rollup documents then takes the response and rewrites it back to what a + * client would expect given the original query. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/StartJobRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/StartJobRequest.java index dc63e9189..4d44c46ff 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/StartJobRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/StartJobRequest.java @@ -56,7 +56,9 @@ // typedef: rollup.start_job.Request /** - * Starts an existing, stopped rollup job. + * Start rollup jobs. If you try to start a job that does not exist, an + * exception occurs. If you try to start a job that is already started, nothing + * happens. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/StopJobRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/StopJobRequest.java index 08eb7d861..8df95af37 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/StopJobRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/rollup/StopJobRequest.java @@ -57,7 +57,8 @@ // typedef: rollup.stop_job.Request /** - * Stops an existing, started rollup job. + * Stop rollup jobs. If you try to stop a job that does not exist, an exception + * occurs. If you try to stop a job that is already stopped, nothing happens. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/CacheStatsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/CacheStatsRequest.java index c807e9642..0d8d16b7c 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/CacheStatsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/CacheStatsRequest.java @@ -58,7 +58,8 @@ // typedef: searchable_snapshots.cache_stats.Request /** - * Retrieve node-level cache statistics about searchable snapshots. + * Get cache statistics. Get statistics about the shared cache for partially + * mounted indices. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ClearCacheRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ClearCacheRequest.java index 771e420e7..d5362ffb4 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ClearCacheRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ClearCacheRequest.java @@ -59,7 +59,8 @@ // typedef: searchable_snapshots.clear_cache.Request /** - * Clear the cache of searchable snapshots. + * Clear the cache. Clear indices and data streams from the shared cache for + * partially mounted indices. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ElasticsearchSearchableSnapshotsAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ElasticsearchSearchableSnapshotsAsyncClient.java index 175f5f222..338f1d307 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ElasticsearchSearchableSnapshotsAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ElasticsearchSearchableSnapshotsAsyncClient.java @@ -71,7 +71,8 @@ public ElasticsearchSearchableSnapshotsAsyncClient withTransportOptions( // ----- Endpoint: searchable_snapshots.cache_stats /** - * Retrieve node-level cache statistics about searchable snapshots. + * Get cache statistics. Get statistics about the shared cache for partially + * mounted indices. * * @see Documentation @@ -86,7 +87,8 @@ public CompletableFuture cacheStats(CacheStatsRequest reques } /** - * Retrieve node-level cache statistics about searchable snapshots. + * Get cache statistics. Get statistics about the shared cache for partially + * mounted indices. * * @param fn * a function that initializes a builder to create the @@ -102,7 +104,8 @@ public final CompletableFuture cacheStats( } /** - * Retrieve node-level cache statistics about searchable snapshots. + * Get cache statistics. Get statistics about the shared cache for partially + * mounted indices. * * @see Documentation @@ -117,7 +120,8 @@ public CompletableFuture cacheStats() { // ----- Endpoint: searchable_snapshots.clear_cache /** - * Clear the cache of searchable snapshots. + * Clear the cache. Clear indices and data streams from the shared cache for + * partially mounted indices. * * @see Documentation @@ -132,7 +136,8 @@ public CompletableFuture clearCache(ClearCacheRequest reques } /** - * Clear the cache of searchable snapshots. + * Clear the cache. Clear indices and data streams from the shared cache for + * partially mounted indices. * * @param fn * a function that initializes a builder to create the @@ -148,7 +153,8 @@ public final CompletableFuture clearCache( } /** - * Clear the cache of searchable snapshots. + * Clear the cache. Clear indices and data streams from the shared cache for + * partially mounted indices. * * @see Documentation @@ -163,7 +169,9 @@ public CompletableFuture clearCache() { // ----- Endpoint: searchable_snapshots.mount /** - * Mount a snapshot as a searchable index. + * Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use + * this API for snapshots managed by index lifecycle management (ILM). Manually + * mounting ILM-managed snapshots can interfere with ILM processes. * * @see Documentation @@ -178,7 +186,9 @@ public CompletableFuture mount(MountRequest request) { } /** - * Mount a snapshot as a searchable index. + * Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use + * this API for snapshots managed by index lifecycle management (ILM). Manually + * mounting ILM-managed snapshots can interfere with ILM processes. * * @param fn * a function that initializes a builder to create the @@ -196,7 +206,7 @@ public final CompletableFuture mount( // ----- Endpoint: searchable_snapshots.stats /** - * Retrieve shard-level statistics about searchable snapshots. + * Get searchable snapshot statistics. * * @see Documentation @@ -211,7 +221,7 @@ public CompletableFuture stats(SearchableSnaps } /** - * Retrieve shard-level statistics about searchable snapshots. + * Get searchable snapshot statistics. * * @param fn * a function that initializes a builder to create the @@ -227,7 +237,7 @@ public final CompletableFuture stats( } /** - * Retrieve shard-level statistics about searchable snapshots. + * Get searchable snapshot statistics. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ElasticsearchSearchableSnapshotsClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ElasticsearchSearchableSnapshotsClient.java index 3280a28f0..6f57e841e 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ElasticsearchSearchableSnapshotsClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/ElasticsearchSearchableSnapshotsClient.java @@ -71,7 +71,8 @@ public ElasticsearchSearchableSnapshotsClient withTransportOptions(@Nullable Tra // ----- Endpoint: searchable_snapshots.cache_stats /** - * Retrieve node-level cache statistics about searchable snapshots. + * Get cache statistics. Get statistics about the shared cache for partially + * mounted indices. * * @see Documentation @@ -86,7 +87,8 @@ public CacheStatsResponse cacheStats(CacheStatsRequest request) throws IOExcepti } /** - * Retrieve node-level cache statistics about searchable snapshots. + * Get cache statistics. Get statistics about the shared cache for partially + * mounted indices. * * @param fn * a function that initializes a builder to create the @@ -102,7 +104,8 @@ public final CacheStatsResponse cacheStats(FunctionDocumentation @@ -117,7 +120,8 @@ public CacheStatsResponse cacheStats() throws IOException, ElasticsearchExceptio // ----- Endpoint: searchable_snapshots.clear_cache /** - * Clear the cache of searchable snapshots. + * Clear the cache. Clear indices and data streams from the shared cache for + * partially mounted indices. * * @see Documentation @@ -132,7 +136,8 @@ public ClearCacheResponse clearCache(ClearCacheRequest request) throws IOExcepti } /** - * Clear the cache of searchable snapshots. + * Clear the cache. Clear indices and data streams from the shared cache for + * partially mounted indices. * * @param fn * a function that initializes a builder to create the @@ -148,7 +153,8 @@ public final ClearCacheResponse clearCache(FunctionDocumentation @@ -163,7 +169,9 @@ public ClearCacheResponse clearCache() throws IOException, ElasticsearchExceptio // ----- Endpoint: searchable_snapshots.mount /** - * Mount a snapshot as a searchable index. + * Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use + * this API for snapshots managed by index lifecycle management (ILM). Manually + * mounting ILM-managed snapshots can interfere with ILM processes. * * @see Documentation @@ -178,7 +186,9 @@ public MountResponse mount(MountRequest request) throws IOException, Elasticsear } /** - * Mount a snapshot as a searchable index. + * Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use + * this API for snapshots managed by index lifecycle management (ILM). Manually + * mounting ILM-managed snapshots can interfere with ILM processes. * * @param fn * a function that initializes a builder to create the @@ -196,7 +206,7 @@ public final MountResponse mount(FunctionDocumentation @@ -212,7 +222,7 @@ public SearchableSnapshotsStatsResponse stats(SearchableSnapshotsStatsRequest re } /** - * Retrieve shard-level statistics about searchable snapshots. + * Get searchable snapshot statistics. * * @param fn * a function that initializes a builder to create the @@ -229,7 +239,7 @@ public final SearchableSnapshotsStatsResponse stats( } /** - * Retrieve shard-level statistics about searchable snapshots. + * Get searchable snapshot statistics. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/MountRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/MountRequest.java index aa2964a80..1cd7ce354 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/MountRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/MountRequest.java @@ -61,7 +61,9 @@ // typedef: searchable_snapshots.mount.Request /** - * Mount a snapshot as a searchable index. + * Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use + * this API for snapshots managed by index lifecycle management (ILM). Manually + * mounting ILM-managed snapshots can interfere with ILM processes. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/SearchableSnapshotsStatsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/SearchableSnapshotsStatsRequest.java index 33630a60d..7b37b9e90 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/SearchableSnapshotsStatsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/searchable_snapshots/SearchableSnapshotsStatsRequest.java @@ -57,7 +57,7 @@ // typedef: searchable_snapshots.stats.Request /** - * Retrieve shard-level statistics about searchable snapshots. + * Get searchable snapshot statistics. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/DeleteNodeRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/DeleteNodeRequest.java index cadea1b06..33686353f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/DeleteNodeRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/DeleteNodeRequest.java @@ -56,8 +56,17 @@ // typedef: shutdown.delete_node.Request /** - * Removes a node from the shutdown list. Designed for indirect use by ECE/ESS - * and ECK. Direct use is not supported. + * Cancel node shutdown preparations. Remove a node from the shutdown list so it + * can resume normal operations. You must explicitly clear the shutdown request + * when a node rejoins the cluster or when a node has permanently left the + * cluster. Shutdown requests are never removed automatically by Elasticsearch. + *

+ * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic + * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + * supported. + *

+ * If the operator privileges feature is enabled, you must be an operator to use + * this API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/ElasticsearchShutdownAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/ElasticsearchShutdownAsyncClient.java index e32836ca0..c9c33f601 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/ElasticsearchShutdownAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/ElasticsearchShutdownAsyncClient.java @@ -70,8 +70,17 @@ public ElasticsearchShutdownAsyncClient withTransportOptions(@Nullable Transport // ----- Endpoint: shutdown.delete_node /** - * Removes a node from the shutdown list. Designed for indirect use by ECE/ESS - * and ECK. Direct use is not supported. + * Cancel node shutdown preparations. Remove a node from the shutdown list so it + * can resume normal operations. You must explicitly clear the shutdown request + * when a node rejoins the cluster or when a node has permanently left the + * cluster. Shutdown requests are never removed automatically by Elasticsearch. + *

+ * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic + * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + * supported. + *

+ * If the operator privileges feature is enabled, you must be an operator to use + * this API. * * @see Documentation @@ -86,8 +95,17 @@ public CompletableFuture deleteNode(DeleteNodeRequest reques } /** - * Removes a node from the shutdown list. Designed for indirect use by ECE/ESS - * and ECK. Direct use is not supported. + * Cancel node shutdown preparations. Remove a node from the shutdown list so it + * can resume normal operations. You must explicitly clear the shutdown request + * when a node rejoins the cluster or when a node has permanently left the + * cluster. Shutdown requests are never removed automatically by Elasticsearch. + *

+ * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic + * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + * supported. + *

+ * If the operator privileges feature is enabled, you must be an operator to use + * this API. * * @param fn * a function that initializes a builder to create the @@ -105,9 +123,18 @@ public final CompletableFuture deleteNode( // ----- Endpoint: shutdown.get_node /** - * Retrieve status of a node or nodes that are currently marked as shutting - * down. Designed for indirect use by ECE/ESS and ECK. Direct use is not + * Get the shutdown status. + *

+ * Get information about nodes that are ready to be shut down, have shut down + * preparations still in progress, or have stalled. The API returns status + * information for each part of the shut down process. + *

+ * NOTE: This feature is designed for indirect use by Elasticsearch Service, + * Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not * supported. + *

+ * If the operator privileges feature is enabled, you must be an operator to use + * this API. * * @see Documentation @@ -122,9 +149,18 @@ public CompletableFuture getNode(GetNodeRequest request) { } /** - * Retrieve status of a node or nodes that are currently marked as shutting - * down. Designed for indirect use by ECE/ESS and ECK. Direct use is not + * Get the shutdown status. + *

+ * Get information about nodes that are ready to be shut down, have shut down + * preparations still in progress, or have stalled. The API returns status + * information for each part of the shut down process. + *

+ * NOTE: This feature is designed for indirect use by Elasticsearch Service, + * Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not * supported. + *

+ * If the operator privileges feature is enabled, you must be an operator to use + * this API. * * @param fn * a function that initializes a builder to create the @@ -140,9 +176,18 @@ public final CompletableFuture getNode( } /** - * Retrieve status of a node or nodes that are currently marked as shutting - * down. Designed for indirect use by ECE/ESS and ECK. Direct use is not + * Get the shutdown status. + *

+ * Get information about nodes that are ready to be shut down, have shut down + * preparations still in progress, or have stalled. The API returns status + * information for each part of the shut down process. + *

+ * NOTE: This feature is designed for indirect use by Elasticsearch Service, + * Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not * supported. + *

+ * If the operator privileges feature is enabled, you must be an operator to use + * this API. * * @see Documentation @@ -157,8 +202,26 @@ public CompletableFuture getNode() { // ----- Endpoint: shutdown.put_node /** - * Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. - * Direct use is not supported. + * Prepare a node to be shut down. + *

+ * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic + * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + * supported. + *

+ * If the operator privileges feature is enabled, you must be an operator to use + * this API. + *

+ * The API migrates ongoing tasks and index shards to other nodes as needed to + * prepare a node to be restarted or shut down and removed from the cluster. + * This ensures that Elasticsearch can be stopped safely with minimal disruption + * to the cluster. + *

+ * You must specify the type of shutdown: restart, + * remove, or replace. If a node is already being + * prepared for shutdown, you can use this API to change the shutdown type. + *

+ * IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the + * node shutdown status to determine when it is safe to stop Elasticsearch. * * @see Documentation @@ -173,8 +236,26 @@ public CompletableFuture putNode(PutNodeRequest request) { } /** - * Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. - * Direct use is not supported. + * Prepare a node to be shut down. + *

+ * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic + * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + * supported. + *

+ * If the operator privileges feature is enabled, you must be an operator to use + * this API. + *

+ * The API migrates ongoing tasks and index shards to other nodes as needed to + * prepare a node to be restarted or shut down and removed from the cluster. + * This ensures that Elasticsearch can be stopped safely with minimal disruption + * to the cluster. + *

+ * You must specify the type of shutdown: restart, + * remove, or replace. If a node is already being + * prepared for shutdown, you can use this API to change the shutdown type. + *

+ * IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the + * node shutdown status to determine when it is safe to stop Elasticsearch. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/ElasticsearchShutdownClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/ElasticsearchShutdownClient.java index d697c22ee..840334b97 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/ElasticsearchShutdownClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/ElasticsearchShutdownClient.java @@ -68,8 +68,17 @@ public ElasticsearchShutdownClient withTransportOptions(@Nullable TransportOptio // ----- Endpoint: shutdown.delete_node /** - * Removes a node from the shutdown list. Designed for indirect use by ECE/ESS - * and ECK. Direct use is not supported. + * Cancel node shutdown preparations. Remove a node from the shutdown list so it + * can resume normal operations. You must explicitly clear the shutdown request + * when a node rejoins the cluster or when a node has permanently left the + * cluster. Shutdown requests are never removed automatically by Elasticsearch. + *

+ * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic + * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + * supported. + *

+ * If the operator privileges feature is enabled, you must be an operator to use + * this API. * * @see Documentation @@ -84,8 +93,17 @@ public DeleteNodeResponse deleteNode(DeleteNodeRequest request) throws IOExcepti } /** - * Removes a node from the shutdown list. Designed for indirect use by ECE/ESS - * and ECK. Direct use is not supported. + * Cancel node shutdown preparations. Remove a node from the shutdown list so it + * can resume normal operations. You must explicitly clear the shutdown request + * when a node rejoins the cluster or when a node has permanently left the + * cluster. Shutdown requests are never removed automatically by Elasticsearch. + *

+ * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic + * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + * supported. + *

+ * If the operator privileges feature is enabled, you must be an operator to use + * this API. * * @param fn * a function that initializes a builder to create the @@ -103,9 +121,18 @@ public final DeleteNodeResponse deleteNode(Function + * Get information about nodes that are ready to be shut down, have shut down + * preparations still in progress, or have stalled. The API returns status + * information for each part of the shut down process. + *

+ * NOTE: This feature is designed for indirect use by Elasticsearch Service, + * Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not * supported. + *

+ * If the operator privileges feature is enabled, you must be an operator to use + * this API. * * @see Documentation @@ -120,9 +147,18 @@ public GetNodeResponse getNode(GetNodeRequest request) throws IOException, Elast } /** - * Retrieve status of a node or nodes that are currently marked as shutting - * down. Designed for indirect use by ECE/ESS and ECK. Direct use is not + * Get the shutdown status. + *

+ * Get information about nodes that are ready to be shut down, have shut down + * preparations still in progress, or have stalled. The API returns status + * information for each part of the shut down process. + *

+ * NOTE: This feature is designed for indirect use by Elasticsearch Service, + * Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not * supported. + *

+ * If the operator privileges feature is enabled, you must be an operator to use + * this API. * * @param fn * a function that initializes a builder to create the @@ -138,9 +174,18 @@ public final GetNodeResponse getNode(Function + * Get information about nodes that are ready to be shut down, have shut down + * preparations still in progress, or have stalled. The API returns status + * information for each part of the shut down process. + *

+ * NOTE: This feature is designed for indirect use by Elasticsearch Service, + * Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not * supported. + *

+ * If the operator privileges feature is enabled, you must be an operator to use + * this API. * * @see Documentation @@ -155,8 +200,26 @@ public GetNodeResponse getNode() throws IOException, ElasticsearchException { // ----- Endpoint: shutdown.put_node /** - * Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. - * Direct use is not supported. + * Prepare a node to be shut down. + *

+ * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic + * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + * supported. + *

+ * If the operator privileges feature is enabled, you must be an operator to use + * this API. + *

+ * The API migrates ongoing tasks and index shards to other nodes as needed to + * prepare a node to be restarted or shut down and removed from the cluster. + * This ensures that Elasticsearch can be stopped safely with minimal disruption + * to the cluster. + *

+ * You must specify the type of shutdown: restart, + * remove, or replace. If a node is already being + * prepared for shutdown, you can use this API to change the shutdown type. + *

+ * IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the + * node shutdown status to determine when it is safe to stop Elasticsearch. * * @see Documentation @@ -171,8 +234,26 @@ public PutNodeResponse putNode(PutNodeRequest request) throws IOException, Elast } /** - * Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. - * Direct use is not supported. + * Prepare a node to be shut down. + *

+ * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic + * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + * supported. + *

+ * If the operator privileges feature is enabled, you must be an operator to use + * this API. + *

+ * The API migrates ongoing tasks and index shards to other nodes as needed to + * prepare a node to be restarted or shut down and removed from the cluster. + * This ensures that Elasticsearch can be stopped safely with minimal disruption + * to the cluster. + *

+ * You must specify the type of shutdown: restart, + * remove, or replace. If a node is already being + * prepared for shutdown, you can use this API to change the shutdown type. + *

+ * IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the + * node shutdown status to determine when it is safe to stop Elasticsearch. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/GetNodeRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/GetNodeRequest.java index 42ee075c2..1c3dee599 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/GetNodeRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/GetNodeRequest.java @@ -58,9 +58,18 @@ // typedef: shutdown.get_node.Request /** - * Retrieve status of a node or nodes that are currently marked as shutting - * down. Designed for indirect use by ECE/ESS and ECK. Direct use is not + * Get the shutdown status. + *

+ * Get information about nodes that are ready to be shut down, have shut down + * preparations still in progress, or have stalled. The API returns status + * information for each part of the shut down process. + *

+ * NOTE: This feature is designed for indirect use by Elasticsearch Service, + * Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not * supported. + *

+ * If the operator privileges feature is enabled, you must be an operator to use + * this API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/PutNodeRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/PutNodeRequest.java index 0eaa0a97b..af64a69ab 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/PutNodeRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/shutdown/PutNodeRequest.java @@ -58,8 +58,26 @@ // typedef: shutdown.put_node.Request /** - * Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. - * Direct use is not supported. + * Prepare a node to be shut down. + *

+ * NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic + * Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not + * supported. + *

+ * If the operator privileges feature is enabled, you must be an operator to use + * this API. + *

+ * The API migrates ongoing tasks and index shards to other nodes as needed to + * prepare a node to be restarted or shut down and removed from the cluster. + * This ensures that Elasticsearch can be stopped safely with minimal disruption + * to the cluster. + *

+ * You must specify the type of shutdown: restart, + * remove, or replace. If a node is already being + * prepared for shutdown, you can use this API to change the shutdown type. + *

+ * IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the + * node shutdown status to determine when it is safe to stop Elasticsearch. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/DeleteLifecycleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/DeleteLifecycleRequest.java index ef16269fb..c49fcccfc 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/DeleteLifecycleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/DeleteLifecycleRequest.java @@ -56,7 +56,9 @@ // typedef: slm.delete_lifecycle.Request /** - * Deletes an existing snapshot lifecycle policy. + * Delete a policy. Delete a snapshot lifecycle policy definition. This + * operation prevents any future snapshots from being taken but does not cancel + * in-progress snapshots or remove previously-taken snapshots. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ElasticsearchSlmAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ElasticsearchSlmAsyncClient.java index 43a51bc51..85e081f42 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ElasticsearchSlmAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ElasticsearchSlmAsyncClient.java @@ -67,7 +67,9 @@ public ElasticsearchSlmAsyncClient withTransportOptions(@Nullable TransportOptio // ----- Endpoint: slm.delete_lifecycle /** - * Deletes an existing snapshot lifecycle policy. + * Delete a policy. Delete a snapshot lifecycle policy definition. This + * operation prevents any future snapshots from being taken but does not cancel + * in-progress snapshots or remove previously-taken snapshots. * * @see Documentation @@ -82,7 +84,9 @@ public CompletableFuture deleteLifecycle(DeleteLifecycl } /** - * Deletes an existing snapshot lifecycle policy. + * Delete a policy. Delete a snapshot lifecycle policy definition. This + * operation prevents any future snapshots from being taken but does not cancel + * in-progress snapshots or remove previously-taken snapshots. * * @param fn * a function that initializes a builder to create the @@ -100,8 +104,10 @@ public final CompletableFuture deleteLifecycle( // ----- Endpoint: slm.execute_lifecycle /** - * Immediately creates a snapshot according to the lifecycle policy, without - * waiting for the scheduled time. + * Run a policy. Immediately create a snapshot according to the snapshot + * lifecycle policy without waiting for the scheduled time. The snapshot policy + * is normally applied according to its schedule, but you might want to manually + * run a policy before performing an upgrade or other maintenance. * * @see Documentation @@ -116,8 +122,10 @@ public CompletableFuture executeLifecycle(ExecuteLifec } /** - * Immediately creates a snapshot according to the lifecycle policy, without - * waiting for the scheduled time. + * Run a policy. Immediately create a snapshot according to the snapshot + * lifecycle policy without waiting for the scheduled time. The snapshot policy + * is normally applied according to its schedule, but you might want to manually + * run a policy before performing an upgrade or other maintenance. * * @param fn * a function that initializes a builder to create the @@ -135,8 +143,10 @@ public final CompletableFuture executeLifecycle( // ----- Endpoint: slm.execute_retention /** - * Deletes any snapshots that are expired according to the policy's retention - * rules. + * Run a retention policy. Manually apply the retention policy to force + * immediate removal of snapshots that are expired according to the snapshot + * lifecycle policy retention rules. The retention policy is normally applied + * according to its schedule. * * @see Documentation @@ -150,8 +160,8 @@ public CompletableFuture executeRetention() { // ----- Endpoint: slm.get_lifecycle /** - * Retrieves one or more snapshot lifecycle policy definitions and information - * about the latest snapshot attempts. + * Get policy information. Get snapshot lifecycle policy definitions and + * information about the latest snapshot attempts. * * @see Documentation @@ -166,8 +176,8 @@ public CompletableFuture getLifecycle(GetLifecycleRequest } /** - * Retrieves one or more snapshot lifecycle policy definitions and information - * about the latest snapshot attempts. + * Get policy information. Get snapshot lifecycle policy definitions and + * information about the latest snapshot attempts. * * @param fn * a function that initializes a builder to create the @@ -183,8 +193,8 @@ public final CompletableFuture getLifecycle( } /** - * Retrieves one or more snapshot lifecycle policy definitions and information - * about the latest snapshot attempts. + * Get policy information. Get snapshot lifecycle policy definitions and + * information about the latest snapshot attempts. * * @see Documentation @@ -199,8 +209,8 @@ public CompletableFuture getLifecycle() { // ----- Endpoint: slm.get_stats /** - * Returns global and policy-level statistics about actions taken by snapshot - * lifecycle management. + * Get snapshot lifecycle management statistics. Get global and policy-level + * statistics about actions taken by snapshot lifecycle management. * * @see Documentation @@ -214,7 +224,7 @@ public CompletableFuture getStats() { // ----- Endpoint: slm.get_status /** - * Retrieves the status of snapshot lifecycle management (SLM). + * Get the snapshot lifecycle management status. * * @see Documentation @@ -228,7 +238,9 @@ public CompletableFuture getStatus() { // ----- Endpoint: slm.put_lifecycle /** - * Creates or updates a snapshot lifecycle policy. + * Create or update a policy. Create or update a snapshot lifecycle policy. If + * the policy already exists, this request increments the policy version. Only + * the latest version of a policy is stored. * * @see Documentation @@ -243,7 +255,9 @@ public CompletableFuture putLifecycle(PutLifecycleRequest } /** - * Creates or updates a snapshot lifecycle policy. + * Create or update a policy. Create or update a snapshot lifecycle policy. If + * the policy already exists, this request increments the policy version. Only + * the latest version of a policy is stored. * * @param fn * a function that initializes a builder to create the @@ -261,7 +275,9 @@ public final CompletableFuture putLifecycle( // ----- Endpoint: slm.start /** - * Turns on snapshot lifecycle management (SLM). + * Start snapshot lifecycle management. Snapshot lifecycle management (SLM) + * starts automatically when a cluster is formed. Manually starting SLM is + * necessary only if it has been stopped using the stop SLM API. * * @see Documentation @@ -275,7 +291,17 @@ public CompletableFuture start() { // ----- Endpoint: slm.stop /** - * Turns off snapshot lifecycle management (SLM). + * Stop snapshot lifecycle management. Stop all snapshot lifecycle management + * (SLM) operations and the SLM plugin. This API is useful when you are + * performing maintenance on a cluster and need to prevent SLM from performing + * any actions on your data streams or indices. Stopping SLM does not stop any + * snapshots that are in progress. You can manually trigger snapshots with the + * run snapshot lifecycle policy API even if SLM is stopped. + *

+ * The API returns a response as soon as the request is acknowledged, but the + * plugin might continue to run until in-progress operations complete and it can + * be safely stopped. Use the get snapshot lifecycle management status API to + * see if SLM is running. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ElasticsearchSlmClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ElasticsearchSlmClient.java index a6cd5188e..ca233fa6d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ElasticsearchSlmClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ElasticsearchSlmClient.java @@ -68,7 +68,9 @@ public ElasticsearchSlmClient withTransportOptions(@Nullable TransportOptions tr // ----- Endpoint: slm.delete_lifecycle /** - * Deletes an existing snapshot lifecycle policy. + * Delete a policy. Delete a snapshot lifecycle policy definition. This + * operation prevents any future snapshots from being taken but does not cancel + * in-progress snapshots or remove previously-taken snapshots. * * @see Documentation @@ -84,7 +86,9 @@ public DeleteLifecycleResponse deleteLifecycle(DeleteLifecycleRequest request) } /** - * Deletes an existing snapshot lifecycle policy. + * Delete a policy. Delete a snapshot lifecycle policy definition. This + * operation prevents any future snapshots from being taken but does not cancel + * in-progress snapshots or remove previously-taken snapshots. * * @param fn * a function that initializes a builder to create the @@ -103,8 +107,10 @@ public final DeleteLifecycleResponse deleteLifecycle( // ----- Endpoint: slm.execute_lifecycle /** - * Immediately creates a snapshot according to the lifecycle policy, without - * waiting for the scheduled time. + * Run a policy. Immediately create a snapshot according to the snapshot + * lifecycle policy without waiting for the scheduled time. The snapshot policy + * is normally applied according to its schedule, but you might want to manually + * run a policy before performing an upgrade or other maintenance. * * @see Documentation @@ -120,8 +126,10 @@ public ExecuteLifecycleResponse executeLifecycle(ExecuteLifecycleRequest request } /** - * Immediately creates a snapshot according to the lifecycle policy, without - * waiting for the scheduled time. + * Run a policy. Immediately create a snapshot according to the snapshot + * lifecycle policy without waiting for the scheduled time. The snapshot policy + * is normally applied according to its schedule, but you might want to manually + * run a policy before performing an upgrade or other maintenance. * * @param fn * a function that initializes a builder to create the @@ -140,8 +148,10 @@ public final ExecuteLifecycleResponse executeLifecycle( // ----- Endpoint: slm.execute_retention /** - * Deletes any snapshots that are expired according to the policy's retention - * rules. + * Run a retention policy. Manually apply the retention policy to force + * immediate removal of snapshots that are expired according to the snapshot + * lifecycle policy retention rules. The retention policy is normally applied + * according to its schedule. * * @see Documentation @@ -155,8 +165,8 @@ public ExecuteRetentionResponse executeRetention() throws IOException, Elasticse // ----- Endpoint: slm.get_lifecycle /** - * Retrieves one or more snapshot lifecycle policy definitions and information - * about the latest snapshot attempts. + * Get policy information. Get snapshot lifecycle policy definitions and + * information about the latest snapshot attempts. * * @see Documentation @@ -171,8 +181,8 @@ public GetLifecycleResponse getLifecycle(GetLifecycleRequest request) throws IOE } /** - * Retrieves one or more snapshot lifecycle policy definitions and information - * about the latest snapshot attempts. + * Get policy information. Get snapshot lifecycle policy definitions and + * information about the latest snapshot attempts. * * @param fn * a function that initializes a builder to create the @@ -189,8 +199,8 @@ public final GetLifecycleResponse getLifecycle( } /** - * Retrieves one or more snapshot lifecycle policy definitions and information - * about the latest snapshot attempts. + * Get policy information. Get snapshot lifecycle policy definitions and + * information about the latest snapshot attempts. * * @see Documentation @@ -205,8 +215,8 @@ public GetLifecycleResponse getLifecycle() throws IOException, ElasticsearchExce // ----- Endpoint: slm.get_stats /** - * Returns global and policy-level statistics about actions taken by snapshot - * lifecycle management. + * Get snapshot lifecycle management statistics. Get global and policy-level + * statistics about actions taken by snapshot lifecycle management. * * @see Documentation @@ -220,7 +230,7 @@ public GetStatsResponse getStats() throws IOException, ElasticsearchException { // ----- Endpoint: slm.get_status /** - * Retrieves the status of snapshot lifecycle management (SLM). + * Get the snapshot lifecycle management status. * * @see Documentation @@ -234,7 +244,9 @@ public GetSlmStatusResponse getStatus() throws IOException, ElasticsearchExcepti // ----- Endpoint: slm.put_lifecycle /** - * Creates or updates a snapshot lifecycle policy. + * Create or update a policy. Create or update a snapshot lifecycle policy. If + * the policy already exists, this request increments the policy version. Only + * the latest version of a policy is stored. * * @see Documentation @@ -249,7 +261,9 @@ public PutLifecycleResponse putLifecycle(PutLifecycleRequest request) throws IOE } /** - * Creates or updates a snapshot lifecycle policy. + * Create or update a policy. Create or update a snapshot lifecycle policy. If + * the policy already exists, this request increments the policy version. Only + * the latest version of a policy is stored. * * @param fn * a function that initializes a builder to create the @@ -268,7 +282,9 @@ public final PutLifecycleResponse putLifecycle( // ----- Endpoint: slm.start /** - * Turns on snapshot lifecycle management (SLM). + * Start snapshot lifecycle management. Snapshot lifecycle management (SLM) + * starts automatically when a cluster is formed. Manually starting SLM is + * necessary only if it has been stopped using the stop SLM API. * * @see Documentation @@ -282,7 +298,17 @@ public StartSlmResponse start() throws IOException, ElasticsearchException { // ----- Endpoint: slm.stop /** - * Turns off snapshot lifecycle management (SLM). + * Stop snapshot lifecycle management. Stop all snapshot lifecycle management + * (SLM) operations and the SLM plugin. This API is useful when you are + * performing maintenance on a cluster and need to prevent SLM from performing + * any actions on your data streams or indices. Stopping SLM does not stop any + * snapshots that are in progress. You can manually trigger snapshots with the + * run snapshot lifecycle policy API even if SLM is stopped. + *

+ * The API returns a response as soon as the request is acknowledged, but the + * plugin might continue to run until in-progress operations complete and it can + * be safely stopped. Use the get snapshot lifecycle management status API to + * see if SLM is running. * * @see Documentation diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ExecuteLifecycleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ExecuteLifecycleRequest.java index fd0983d7e..abda3524d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ExecuteLifecycleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ExecuteLifecycleRequest.java @@ -56,8 +56,10 @@ // typedef: slm.execute_lifecycle.Request /** - * Immediately creates a snapshot according to the lifecycle policy, without - * waiting for the scheduled time. + * Run a policy. Immediately create a snapshot according to the snapshot + * lifecycle policy without waiting for the scheduled time. The snapshot policy + * is normally applied according to its schedule, but you might want to manually + * run a policy before performing an upgrade or other maintenance. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ExecuteRetentionRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ExecuteRetentionRequest.java index 57ec5c733..9755c009d 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ExecuteRetentionRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/ExecuteRetentionRequest.java @@ -50,8 +50,10 @@ // typedef: slm.execute_retention.Request /** - * Deletes any snapshots that are expired according to the policy's retention - * rules. + * Run a retention policy. Manually apply the retention policy to force + * immediate removal of snapshots that are expired according to the snapshot + * lifecycle policy retention rules. The retention policy is normally applied + * according to its schedule. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetLifecycleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetLifecycleRequest.java index 6df555883..f7b866993 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetLifecycleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetLifecycleRequest.java @@ -58,8 +58,8 @@ // typedef: slm.get_lifecycle.Request /** - * Retrieves one or more snapshot lifecycle policy definitions and information - * about the latest snapshot attempts. + * Get policy information. Get snapshot lifecycle policy definitions and + * information about the latest snapshot attempts. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetSlmStatusRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetSlmStatusRequest.java index d575fbbe7..fe707abbf 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetSlmStatusRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetSlmStatusRequest.java @@ -50,7 +50,7 @@ // typedef: slm.get_status.Request /** - * Retrieves the status of snapshot lifecycle management (SLM). + * Get the snapshot lifecycle management status. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetStatsRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetStatsRequest.java index 9fd0ed656..bd7b30c49 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetStatsRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/GetStatsRequest.java @@ -50,8 +50,8 @@ // typedef: slm.get_stats.Request /** - * Returns global and policy-level statistics about actions taken by snapshot - * lifecycle management. + * Get snapshot lifecycle management statistics. Get global and policy-level + * statistics about actions taken by snapshot lifecycle management. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/PutLifecycleRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/PutLifecycleRequest.java index ad68597ae..5f1803804 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/PutLifecycleRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/PutLifecycleRequest.java @@ -58,7 +58,9 @@ // typedef: slm.put_lifecycle.Request /** - * Creates or updates a snapshot lifecycle policy. + * Create or update a policy. Create or update a snapshot lifecycle policy. If + * the policy already exists, this request increments the policy version. Only + * the latest version of a policy is stored. * * @see API * specification @@ -141,7 +143,8 @@ public final String name() { } /** - * Required - ID for the snapshot lifecycle policy you want to create or update. + * Required - The identifier for the snapshot lifecycle policy you want to + * create or update. *

* API name: {@code policy_id} */ @@ -317,7 +320,8 @@ public final Builder name(@Nullable String value) { } /** - * Required - ID for the snapshot lifecycle policy you want to create or update. + * Required - The identifier for the snapshot lifecycle policy you want to + * create or update. *

* API name: {@code policy_id} */ diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/StartSlmRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/StartSlmRequest.java index 2be9ce3f7..c6ae7dcd9 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/StartSlmRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/StartSlmRequest.java @@ -50,7 +50,9 @@ // typedef: slm.start.Request /** - * Turns on snapshot lifecycle management (SLM). + * Start snapshot lifecycle management. Snapshot lifecycle management (SLM) + * starts automatically when a cluster is formed. Manually starting SLM is + * necessary only if it has been stopped using the stop SLM API. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/StopSlmRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/StopSlmRequest.java index bc7a32f73..89925a268 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/StopSlmRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/slm/StopSlmRequest.java @@ -50,7 +50,17 @@ // typedef: slm.stop.Request /** - * Turns off snapshot lifecycle management (SLM). + * Stop snapshot lifecycle management. Stop all snapshot lifecycle management + * (SLM) operations and the SLM plugin. This API is useful when you are + * performing maintenance on a cluster and need to prevent SLM from performing + * any actions on your data streams or indices. Stopping SLM does not stop any + * snapshots that are in progress. You can manually trigger snapshots with the + * run snapshot lifecycle policy API even if SLM is stopped. + *

+ * The API returns a response as soon as the request is acknowledged, but the + * plugin might continue to run until in-progress operations complete and it can + * be safely stopped. Use the get snapshot lifecycle management status API to + * see if SLM is running. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CleanupRepositoryRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CleanupRepositoryRequest.java index 5626be8bd..759d3157a 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CleanupRepositoryRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CleanupRepositoryRequest.java @@ -56,8 +56,9 @@ // typedef: snapshot.cleanup_repository.Request /** - * Triggers the review of a snapshot repository’s contents and deletes any stale - * data not referenced by existing snapshots. + * Clean up the snapshot repository. Trigger the review of the contents of a + * snapshot repository and delete any stale data not referenced by existing + * snapshots. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CloneSnapshotRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CloneSnapshotRequest.java index d23514cb8..ab5a8ea0b 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CloneSnapshotRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CloneSnapshotRequest.java @@ -58,8 +58,8 @@ // typedef: snapshot.clone.Request /** - * Clones indices from one snapshot into another snapshot in the same - * repository. + * Clone a snapshot. Clone part of all of a snapshot into another snapshot in + * the same repository. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CreateRepositoryRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CreateRepositoryRequest.java index 0bb69501c..86eeca000 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CreateRepositoryRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CreateRepositoryRequest.java @@ -60,7 +60,13 @@ // typedef: snapshot.create_repository.Request /** - * Creates a repository. + * Create or update a snapshot repository. IMPORTANT: If you are migrating + * searchable snapshots, the repository name must be identical in the source and + * destination clusters. To register a snapshot repository, the cluster's global + * metadata must be writeable. Ensure there are no cluster blocks (for example, + * cluster.blocks.read_only and + * clsuter.blocks.read_only_allow_delete settings) that prevent + * write access. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CreateSnapshotRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CreateSnapshotRequest.java index 5a415b198..fc6be7a48 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CreateSnapshotRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/CreateSnapshotRequest.java @@ -61,7 +61,8 @@ // typedef: snapshot.create.Request /** - * Creates a snapshot in a repository. + * Create a snapshot. Take a snapshot of a cluster or of data streams and + * indices. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/DeleteRepositoryRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/DeleteRepositoryRequest.java index c8f2587ea..7521c6549 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/DeleteRepositoryRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/DeleteRepositoryRequest.java @@ -58,7 +58,10 @@ // typedef: snapshot.delete_repository.Request /** - * Deletes a repository. + * Delete snapshot repositories. When a repository is unregistered, + * Elasticsearch removes only the reference to the location where the repository + * is storing the snapshots. The snapshots themselves are left untouched and in + * place. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/DeleteSnapshotRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/DeleteSnapshotRequest.java index fc29ad635..cfd12fe6b 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/DeleteSnapshotRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/DeleteSnapshotRequest.java @@ -56,7 +56,7 @@ // typedef: snapshot.delete.Request /** - * Deletes one or more snapshots. + * Delete snapshots. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotAsyncClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotAsyncClient.java index 0b5bdfaca..5d1708b5f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotAsyncClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotAsyncClient.java @@ -70,8 +70,9 @@ public ElasticsearchSnapshotAsyncClient withTransportOptions(@Nullable Transport // ----- Endpoint: snapshot.cleanup_repository /** - * Triggers the review of a snapshot repository’s contents and deletes any stale - * data not referenced by existing snapshots. + * Clean up the snapshot repository. Trigger the review of the contents of a + * snapshot repository and delete any stale data not referenced by existing + * snapshots. * * @see Documentation @@ -86,8 +87,9 @@ public CompletableFuture cleanupRepository(CleanupRep } /** - * Triggers the review of a snapshot repository’s contents and deletes any stale - * data not referenced by existing snapshots. + * Clean up the snapshot repository. Trigger the review of the contents of a + * snapshot repository and delete any stale data not referenced by existing + * snapshots. * * @param fn * a function that initializes a builder to create the @@ -105,8 +107,8 @@ public final CompletableFuture cleanupRepository( // ----- Endpoint: snapshot.clone /** - * Clones indices from one snapshot into another snapshot in the same - * repository. + * Clone a snapshot. Clone part of all of a snapshot into another snapshot in + * the same repository. * * @see Documentation @@ -121,8 +123,8 @@ public CompletableFuture clone(CloneSnapshotRequest reque } /** - * Clones indices from one snapshot into another snapshot in the same - * repository. + * Clone a snapshot. Clone part of all of a snapshot into another snapshot in + * the same repository. * * @param fn * a function that initializes a builder to create the @@ -140,7 +142,8 @@ public final CompletableFuture clone( // ----- Endpoint: snapshot.create /** - * Creates a snapshot in a repository. + * Create a snapshot. Take a snapshot of a cluster or of data streams and + * indices. * * @see Documentation @@ -155,7 +158,8 @@ public CompletableFuture create(CreateSnapshotRequest re } /** - * Creates a snapshot in a repository. + * Create a snapshot. Take a snapshot of a cluster or of data streams and + * indices. * * @param fn * a function that initializes a builder to create the @@ -173,7 +177,13 @@ public final CompletableFuture create( // ----- Endpoint: snapshot.create_repository /** - * Creates a repository. + * Create or update a snapshot repository. IMPORTANT: If you are migrating + * searchable snapshots, the repository name must be identical in the source and + * destination clusters. To register a snapshot repository, the cluster's global + * metadata must be writeable. Ensure there are no cluster blocks (for example, + * cluster.blocks.read_only and + * clsuter.blocks.read_only_allow_delete settings) that prevent + * write access. * * @see Documentation @@ -188,7 +198,13 @@ public CompletableFuture createRepository(CreateReposi } /** - * Creates a repository. + * Create or update a snapshot repository. IMPORTANT: If you are migrating + * searchable snapshots, the repository name must be identical in the source and + * destination clusters. To register a snapshot repository, the cluster's global + * metadata must be writeable. Ensure there are no cluster blocks (for example, + * cluster.blocks.read_only and + * clsuter.blocks.read_only_allow_delete settings) that prevent + * write access. * * @param fn * a function that initializes a builder to create the @@ -206,7 +222,7 @@ public final CompletableFuture createRepository( // ----- Endpoint: snapshot.delete /** - * Deletes one or more snapshots. + * Delete snapshots. * * @see Documentation @@ -221,7 +237,7 @@ public CompletableFuture delete(DeleteSnapshotRequest re } /** - * Deletes one or more snapshots. + * Delete snapshots. * * @param fn * a function that initializes a builder to create the @@ -239,7 +255,10 @@ public final CompletableFuture delete( // ----- Endpoint: snapshot.delete_repository /** - * Deletes a repository. + * Delete snapshot repositories. When a repository is unregistered, + * Elasticsearch removes only the reference to the location where the repository + * is storing the snapshots. The snapshots themselves are left untouched and in + * place. * * @see Documentation @@ -254,7 +273,10 @@ public CompletableFuture deleteRepository(DeleteReposi } /** - * Deletes a repository. + * Delete snapshot repositories. When a repository is unregistered, + * Elasticsearch removes only the reference to the location where the repository + * is storing the snapshots. The snapshots themselves are left untouched and in + * place. * * @param fn * a function that initializes a builder to create the @@ -272,7 +294,7 @@ public final CompletableFuture deleteRepository( // ----- Endpoint: snapshot.get /** - * Returns information about a snapshot. + * Get snapshot information. * * @see Documentation @@ -287,7 +309,7 @@ public CompletableFuture get(GetSnapshotRequest request) { } /** - * Returns information about a snapshot. + * Get snapshot information. * * @param fn * a function that initializes a builder to create the @@ -305,7 +327,7 @@ public final CompletableFuture get( // ----- Endpoint: snapshot.get_repository /** - * Returns information about a repository. + * Get snapshot repository information. * * @see Documentation @@ -320,7 +342,7 @@ public CompletableFuture getRepository(GetRepositoryReque } /** - * Returns information about a repository. + * Get snapshot repository information. * * @param fn * a function that initializes a builder to create the @@ -336,7 +358,7 @@ public final CompletableFuture getRepository( } /** - * Returns information about a repository. + * Get snapshot repository information. * * @see Documentation @@ -351,7 +373,56 @@ public CompletableFuture getRepository() { // ----- Endpoint: snapshot.repository_verify_integrity /** - * Verifies the integrity of the contents of a snapshot repository + * Verify the repository integrity. Verify the integrity of the contents of a + * snapshot repository. + *

+ * This API enables you to perform a comprehensive check of the contents of a + * repository, looking for any anomalies in its data or metadata which might + * prevent you from restoring snapshots from the repository or which might cause + * future snapshot create or delete operations to fail. + *

+ * If you suspect the integrity of the contents of one of your snapshot + * repositories, cease all write activity to this repository immediately, set + * its read_only option to true, and use this API to + * verify its integrity. Until you do so: + *

+ *

+ * If the API finds any problems with the integrity of the contents of your + * repository, Elasticsearch will not be able to repair the damage. The only way + * to bring the repository back into a fully working state after its contents + * have been damaged is by restoring its contents from a repository backup which + * was taken before the damage occurred. You must also identify what caused the + * damage and take action to prevent it from happening again. + *

+ * If you cannot restore a repository backup, register a new repository and use + * this for all future snapshot operations. In some cases it may be possible to + * recover some of the contents of a damaged repository, either by restoring as + * many of its snapshots as needed and taking new snapshots of the restored + * data, or by using the reindex API to copy data from any searchable snapshots + * mounted from the damaged repository. + *

+ * Avoid all operations which write to the repository while the verify + * repository integrity API is running. If something changes the repository + * contents while an integrity verification is running then Elasticsearch may + * incorrectly report having detected some anomalies in its contents due to the + * concurrent writes. It may also incorrectly fail to report some anomalies that + * the concurrent writes prevented it from detecting. + *

+ * NOTE: This API is intended for exploratory use by humans. You should expect + * the request parameters and the response format to vary in future versions. + *

+ * NOTE: This API may not work correctly in a mixed-version cluster. * * @see Documentation @@ -367,7 +438,56 @@ public CompletableFuture repositoryVerifyInte } /** - * Verifies the integrity of the contents of a snapshot repository + * Verify the repository integrity. Verify the integrity of the contents of a + * snapshot repository. + *

+ * This API enables you to perform a comprehensive check of the contents of a + * repository, looking for any anomalies in its data or metadata which might + * prevent you from restoring snapshots from the repository or which might cause + * future snapshot create or delete operations to fail. + *

+ * If you suspect the integrity of the contents of one of your snapshot + * repositories, cease all write activity to this repository immediately, set + * its read_only option to true, and use this API to + * verify its integrity. Until you do so: + *

+ *

+ * If the API finds any problems with the integrity of the contents of your + * repository, Elasticsearch will not be able to repair the damage. The only way + * to bring the repository back into a fully working state after its contents + * have been damaged is by restoring its contents from a repository backup which + * was taken before the damage occurred. You must also identify what caused the + * damage and take action to prevent it from happening again. + *

+ * If you cannot restore a repository backup, register a new repository and use + * this for all future snapshot operations. In some cases it may be possible to + * recover some of the contents of a damaged repository, either by restoring as + * many of its snapshots as needed and taking new snapshots of the restored + * data, or by using the reindex API to copy data from any searchable snapshots + * mounted from the damaged repository. + *

+ * Avoid all operations which write to the repository while the verify + * repository integrity API is running. If something changes the repository + * contents while an integrity verification is running then Elasticsearch may + * incorrectly report having detected some anomalies in its contents due to the + * concurrent writes. It may also incorrectly fail to report some anomalies that + * the concurrent writes prevented it from detecting. + *

+ * NOTE: This API is intended for exploratory use by humans. You should expect + * the request parameters and the response format to vary in future versions. + *

+ * NOTE: This API may not work correctly in a mixed-version cluster. * * @param fn * a function that initializes a builder to create the @@ -385,7 +505,32 @@ public final CompletableFuture repositoryVeri // ----- Endpoint: snapshot.restore /** - * Restores a snapshot. + * Restore a snapshot. Restore a snapshot of a cluster or data streams and + * indices. + *

+ * You can restore a snapshot only to a running cluster with an elected master + * node. The snapshot repository must be registered and available to the + * cluster. The snapshot and cluster versions must be compatible. + *

+ * To restore a snapshot, the cluster's global metadata must be writable. Ensure + * there are't any cluster blocks that prevent writes. The restore operation + * ignores index blocks. + *

+ * Before you restore a data stream, ensure the cluster contains a matching + * index template with data streams enabled. To check, use the index management + * feature in Kibana or the get index template API: + * + *

+	 * GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream
+	 * 
+	 * 
+ *

+ * If no such template exists, you can create one or restore a cluster state + * that contains one. Without a matching index template, a data stream can't + * roll over or create backing indices. + *

+ * If your snapshot contains data from App Search or Workplace Search, you must + * restore the Enterprise Search encryption key before you restore the snapshot. * * @see Documentation @@ -400,7 +545,32 @@ public CompletableFuture restore(RestoreRequest request) { } /** - * Restores a snapshot. + * Restore a snapshot. Restore a snapshot of a cluster or data streams and + * indices. + *

+ * You can restore a snapshot only to a running cluster with an elected master + * node. The snapshot repository must be registered and available to the + * cluster. The snapshot and cluster versions must be compatible. + *

+ * To restore a snapshot, the cluster's global metadata must be writable. Ensure + * there are't any cluster blocks that prevent writes. The restore operation + * ignores index blocks. + *

+ * Before you restore a data stream, ensure the cluster contains a matching + * index template with data streams enabled. To check, use the index management + * feature in Kibana or the get index template API: + * + *

+	 * GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream
+	 * 
+	 * 
+ *

+ * If no such template exists, you can create one or restore a cluster state + * that contains one. Without a matching index template, a data stream can't + * roll over or create backing indices. + *

+ * If your snapshot contains data from App Search or Workplace Search, you must + * restore the Enterprise Search encryption key before you restore the snapshot. * * @param fn * a function that initializes a builder to create the @@ -418,7 +588,21 @@ public final CompletableFuture restore( // ----- Endpoint: snapshot.status /** - * Returns information about the status of a snapshot. + * Get the snapshot status. Get a detailed description of the current state for + * each shard participating in the snapshot. Note that this API should be used + * only to obtain detailed shard-level information for ongoing snapshots. If + * this detail is not needed or you want to obtain information about one or more + * existing snapshots, use the get snapshot API. + *

+ * WARNING: Using the API to return the status of any snapshots other than + * currently running snapshots can be expensive. The API requires a read from + * the repository for each shard in each snapshot. For example, if you have 100 + * snapshots with 1,000 shards each, an API request that includes all snapshots + * will require 100,000 reads (100 snapshots x 1,000 shards). + *

+ * Depending on the latency of your storage, such requests can take an extremely + * long time to return results. These requests can also tax machine resources + * and, when using cloud storage, incur high processing costs. * * @see Documentation @@ -433,7 +617,21 @@ public CompletableFuture status(SnapshotStatusRequest re } /** - * Returns information about the status of a snapshot. + * Get the snapshot status. Get a detailed description of the current state for + * each shard participating in the snapshot. Note that this API should be used + * only to obtain detailed shard-level information for ongoing snapshots. If + * this detail is not needed or you want to obtain information about one or more + * existing snapshots, use the get snapshot API. + *

+ * WARNING: Using the API to return the status of any snapshots other than + * currently running snapshots can be expensive. The API requires a read from + * the repository for each shard in each snapshot. For example, if you have 100 + * snapshots with 1,000 shards each, an API request that includes all snapshots + * will require 100,000 reads (100 snapshots x 1,000 shards). + *

+ * Depending on the latency of your storage, such requests can take an extremely + * long time to return results. These requests can also tax machine resources + * and, when using cloud storage, incur high processing costs. * * @param fn * a function that initializes a builder to create the @@ -449,7 +647,21 @@ public final CompletableFuture status( } /** - * Returns information about the status of a snapshot. + * Get the snapshot status. Get a detailed description of the current state for + * each shard participating in the snapshot. Note that this API should be used + * only to obtain detailed shard-level information for ongoing snapshots. If + * this detail is not needed or you want to obtain information about one or more + * existing snapshots, use the get snapshot API. + *

+ * WARNING: Using the API to return the status of any snapshots other than + * currently running snapshots can be expensive. The API requires a read from + * the repository for each shard in each snapshot. For example, if you have 100 + * snapshots with 1,000 shards each, an API request that includes all snapshots + * will require 100,000 reads (100 snapshots x 1,000 shards). + *

+ * Depending on the latency of your storage, such requests can take an extremely + * long time to return results. These requests can also tax machine resources + * and, when using cloud storage, incur high processing costs. * * @see Documentation @@ -464,7 +676,8 @@ public CompletableFuture status() { // ----- Endpoint: snapshot.verify_repository /** - * Verifies a repository. + * Verify a snapshot repository. Check for common misconfigurations in a + * snapshot repository. * * @see Documentation @@ -479,7 +692,8 @@ public CompletableFuture verifyRepository(VerifyReposi } /** - * Verifies a repository. + * Verify a snapshot repository. Check for common misconfigurations in a + * snapshot repository. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotClient.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotClient.java index 8e0b2dee8..2866e1630 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotClient.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/ElasticsearchSnapshotClient.java @@ -68,8 +68,9 @@ public ElasticsearchSnapshotClient withTransportOptions(@Nullable TransportOptio // ----- Endpoint: snapshot.cleanup_repository /** - * Triggers the review of a snapshot repository’s contents and deletes any stale - * data not referenced by existing snapshots. + * Clean up the snapshot repository. Trigger the review of the contents of a + * snapshot repository and delete any stale data not referenced by existing + * snapshots. * * @see Documentation @@ -85,8 +86,9 @@ public CleanupRepositoryResponse cleanupRepository(CleanupRepositoryRequest requ } /** - * Triggers the review of a snapshot repository’s contents and deletes any stale - * data not referenced by existing snapshots. + * Clean up the snapshot repository. Trigger the review of the contents of a + * snapshot repository and delete any stale data not referenced by existing + * snapshots. * * @param fn * a function that initializes a builder to create the @@ -105,8 +107,8 @@ public final CleanupRepositoryResponse cleanupRepository( // ----- Endpoint: snapshot.clone /** - * Clones indices from one snapshot into another snapshot in the same - * repository. + * Clone a snapshot. Clone part of all of a snapshot into another snapshot in + * the same repository. * * @see Documentation @@ -121,8 +123,8 @@ public CloneSnapshotResponse clone(CloneSnapshotRequest request) throws IOExcept } /** - * Clones indices from one snapshot into another snapshot in the same - * repository. + * Clone a snapshot. Clone part of all of a snapshot into another snapshot in + * the same repository. * * @param fn * a function that initializes a builder to create the @@ -141,7 +143,8 @@ public final CloneSnapshotResponse clone( // ----- Endpoint: snapshot.create /** - * Creates a snapshot in a repository. + * Create a snapshot. Take a snapshot of a cluster or of data streams and + * indices. * * @see Documentation @@ -156,7 +159,8 @@ public CreateSnapshotResponse create(CreateSnapshotRequest request) throws IOExc } /** - * Creates a snapshot in a repository. + * Create a snapshot. Take a snapshot of a cluster or of data streams and + * indices. * * @param fn * a function that initializes a builder to create the @@ -175,7 +179,13 @@ public final CreateSnapshotResponse create( // ----- Endpoint: snapshot.create_repository /** - * Creates a repository. + * Create or update a snapshot repository. IMPORTANT: If you are migrating + * searchable snapshots, the repository name must be identical in the source and + * destination clusters. To register a snapshot repository, the cluster's global + * metadata must be writeable. Ensure there are no cluster blocks (for example, + * cluster.blocks.read_only and + * clsuter.blocks.read_only_allow_delete settings) that prevent + * write access. * * @see Documentation @@ -191,7 +201,13 @@ public CreateRepositoryResponse createRepository(CreateRepositoryRequest request } /** - * Creates a repository. + * Create or update a snapshot repository. IMPORTANT: If you are migrating + * searchable snapshots, the repository name must be identical in the source and + * destination clusters. To register a snapshot repository, the cluster's global + * metadata must be writeable. Ensure there are no cluster blocks (for example, + * cluster.blocks.read_only and + * clsuter.blocks.read_only_allow_delete settings) that prevent + * write access. * * @param fn * a function that initializes a builder to create the @@ -210,7 +226,7 @@ public final CreateRepositoryResponse createRepository( // ----- Endpoint: snapshot.delete /** - * Deletes one or more snapshots. + * Delete snapshots. * * @see Documentation @@ -225,7 +241,7 @@ public DeleteSnapshotResponse delete(DeleteSnapshotRequest request) throws IOExc } /** - * Deletes one or more snapshots. + * Delete snapshots. * * @param fn * a function that initializes a builder to create the @@ -244,7 +260,10 @@ public final DeleteSnapshotResponse delete( // ----- Endpoint: snapshot.delete_repository /** - * Deletes a repository. + * Delete snapshot repositories. When a repository is unregistered, + * Elasticsearch removes only the reference to the location where the repository + * is storing the snapshots. The snapshots themselves are left untouched and in + * place. * * @see Documentation @@ -260,7 +279,10 @@ public DeleteRepositoryResponse deleteRepository(DeleteRepositoryRequest request } /** - * Deletes a repository. + * Delete snapshot repositories. When a repository is unregistered, + * Elasticsearch removes only the reference to the location where the repository + * is storing the snapshots. The snapshots themselves are left untouched and in + * place. * * @param fn * a function that initializes a builder to create the @@ -279,7 +301,7 @@ public final DeleteRepositoryResponse deleteRepository( // ----- Endpoint: snapshot.get /** - * Returns information about a snapshot. + * Get snapshot information. * * @see Documentation @@ -294,7 +316,7 @@ public GetSnapshotResponse get(GetSnapshotRequest request) throws IOException, E } /** - * Returns information about a snapshot. + * Get snapshot information. * * @param fn * a function that initializes a builder to create the @@ -312,7 +334,7 @@ public final GetSnapshotResponse get(FunctionDocumentation @@ -328,7 +350,7 @@ public GetRepositoryResponse getRepository(GetRepositoryRequest request) } /** - * Returns information about a repository. + * Get snapshot repository information. * * @param fn * a function that initializes a builder to create the @@ -345,7 +367,7 @@ public final GetRepositoryResponse getRepository( } /** - * Returns information about a repository. + * Get snapshot repository information. * * @see Documentation @@ -360,7 +382,56 @@ public GetRepositoryResponse getRepository() throws IOException, ElasticsearchEx // ----- Endpoint: snapshot.repository_verify_integrity /** - * Verifies the integrity of the contents of a snapshot repository + * Verify the repository integrity. Verify the integrity of the contents of a + * snapshot repository. + *

+ * This API enables you to perform a comprehensive check of the contents of a + * repository, looking for any anomalies in its data or metadata which might + * prevent you from restoring snapshots from the repository or which might cause + * future snapshot create or delete operations to fail. + *

+ * If you suspect the integrity of the contents of one of your snapshot + * repositories, cease all write activity to this repository immediately, set + * its read_only option to true, and use this API to + * verify its integrity. Until you do so: + *

+ *

+ * If the API finds any problems with the integrity of the contents of your + * repository, Elasticsearch will not be able to repair the damage. The only way + * to bring the repository back into a fully working state after its contents + * have been damaged is by restoring its contents from a repository backup which + * was taken before the damage occurred. You must also identify what caused the + * damage and take action to prevent it from happening again. + *

+ * If you cannot restore a repository backup, register a new repository and use + * this for all future snapshot operations. In some cases it may be possible to + * recover some of the contents of a damaged repository, either by restoring as + * many of its snapshots as needed and taking new snapshots of the restored + * data, or by using the reindex API to copy data from any searchable snapshots + * mounted from the damaged repository. + *

+ * Avoid all operations which write to the repository while the verify + * repository integrity API is running. If something changes the repository + * contents while an integrity verification is running then Elasticsearch may + * incorrectly report having detected some anomalies in its contents due to the + * concurrent writes. It may also incorrectly fail to report some anomalies that + * the concurrent writes prevented it from detecting. + *

+ * NOTE: This API is intended for exploratory use by humans. You should expect + * the request parameters and the response format to vary in future versions. + *

+ * NOTE: This API may not work correctly in a mixed-version cluster. * * @see Documentation @@ -376,7 +447,56 @@ public RepositoryVerifyIntegrityResponse repositoryVerifyIntegrity(RepositoryVer } /** - * Verifies the integrity of the contents of a snapshot repository + * Verify the repository integrity. Verify the integrity of the contents of a + * snapshot repository. + *

+ * This API enables you to perform a comprehensive check of the contents of a + * repository, looking for any anomalies in its data or metadata which might + * prevent you from restoring snapshots from the repository or which might cause + * future snapshot create or delete operations to fail. + *

+ * If you suspect the integrity of the contents of one of your snapshot + * repositories, cease all write activity to this repository immediately, set + * its read_only option to true, and use this API to + * verify its integrity. Until you do so: + *

+ *

+ * If the API finds any problems with the integrity of the contents of your + * repository, Elasticsearch will not be able to repair the damage. The only way + * to bring the repository back into a fully working state after its contents + * have been damaged is by restoring its contents from a repository backup which + * was taken before the damage occurred. You must also identify what caused the + * damage and take action to prevent it from happening again. + *

+ * If you cannot restore a repository backup, register a new repository and use + * this for all future snapshot operations. In some cases it may be possible to + * recover some of the contents of a damaged repository, either by restoring as + * many of its snapshots as needed and taking new snapshots of the restored + * data, or by using the reindex API to copy data from any searchable snapshots + * mounted from the damaged repository. + *

+ * Avoid all operations which write to the repository while the verify + * repository integrity API is running. If something changes the repository + * contents while an integrity verification is running then Elasticsearch may + * incorrectly report having detected some anomalies in its contents due to the + * concurrent writes. It may also incorrectly fail to report some anomalies that + * the concurrent writes prevented it from detecting. + *

+ * NOTE: This API is intended for exploratory use by humans. You should expect + * the request parameters and the response format to vary in future versions. + *

+ * NOTE: This API may not work correctly in a mixed-version cluster. * * @param fn * a function that initializes a builder to create the @@ -395,7 +515,32 @@ public final RepositoryVerifyIntegrityResponse repositoryVerifyIntegrity( // ----- Endpoint: snapshot.restore /** - * Restores a snapshot. + * Restore a snapshot. Restore a snapshot of a cluster or data streams and + * indices. + *

+ * You can restore a snapshot only to a running cluster with an elected master + * node. The snapshot repository must be registered and available to the + * cluster. The snapshot and cluster versions must be compatible. + *

+ * To restore a snapshot, the cluster's global metadata must be writable. Ensure + * there are't any cluster blocks that prevent writes. The restore operation + * ignores index blocks. + *

+ * Before you restore a data stream, ensure the cluster contains a matching + * index template with data streams enabled. To check, use the index management + * feature in Kibana or the get index template API: + * + *

+	 * GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream
+	 * 
+	 * 
+ *

+ * If no such template exists, you can create one or restore a cluster state + * that contains one. Without a matching index template, a data stream can't + * roll over or create backing indices. + *

+ * If your snapshot contains data from App Search or Workplace Search, you must + * restore the Enterprise Search encryption key before you restore the snapshot. * * @see Documentation @@ -410,7 +555,32 @@ public RestoreResponse restore(RestoreRequest request) throws IOException, Elast } /** - * Restores a snapshot. + * Restore a snapshot. Restore a snapshot of a cluster or data streams and + * indices. + *

+ * You can restore a snapshot only to a running cluster with an elected master + * node. The snapshot repository must be registered and available to the + * cluster. The snapshot and cluster versions must be compatible. + *

+ * To restore a snapshot, the cluster's global metadata must be writable. Ensure + * there are't any cluster blocks that prevent writes. The restore operation + * ignores index blocks. + *

+ * Before you restore a data stream, ensure the cluster contains a matching + * index template with data streams enabled. To check, use the index management + * feature in Kibana or the get index template API: + * + *

+	 * GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream
+	 * 
+	 * 
+ *

+ * If no such template exists, you can create one or restore a cluster state + * that contains one. Without a matching index template, a data stream can't + * roll over or create backing indices. + *

+ * If your snapshot contains data from App Search or Workplace Search, you must + * restore the Enterprise Search encryption key before you restore the snapshot. * * @param fn * a function that initializes a builder to create the @@ -428,7 +598,21 @@ public final RestoreResponse restore(Function + * WARNING: Using the API to return the status of any snapshots other than + * currently running snapshots can be expensive. The API requires a read from + * the repository for each shard in each snapshot. For example, if you have 100 + * snapshots with 1,000 shards each, an API request that includes all snapshots + * will require 100,000 reads (100 snapshots x 1,000 shards). + *

+ * Depending on the latency of your storage, such requests can take an extremely + * long time to return results. These requests can also tax machine resources + * and, when using cloud storage, incur high processing costs. * * @see Documentation @@ -443,7 +627,21 @@ public SnapshotStatusResponse status(SnapshotStatusRequest request) throws IOExc } /** - * Returns information about the status of a snapshot. + * Get the snapshot status. Get a detailed description of the current state for + * each shard participating in the snapshot. Note that this API should be used + * only to obtain detailed shard-level information for ongoing snapshots. If + * this detail is not needed or you want to obtain information about one or more + * existing snapshots, use the get snapshot API. + *

+ * WARNING: Using the API to return the status of any snapshots other than + * currently running snapshots can be expensive. The API requires a read from + * the repository for each shard in each snapshot. For example, if you have 100 + * snapshots with 1,000 shards each, an API request that includes all snapshots + * will require 100,000 reads (100 snapshots x 1,000 shards). + *

+ * Depending on the latency of your storage, such requests can take an extremely + * long time to return results. These requests can also tax machine resources + * and, when using cloud storage, incur high processing costs. * * @param fn * a function that initializes a builder to create the @@ -460,7 +658,21 @@ public final SnapshotStatusResponse status( } /** - * Returns information about the status of a snapshot. + * Get the snapshot status. Get a detailed description of the current state for + * each shard participating in the snapshot. Note that this API should be used + * only to obtain detailed shard-level information for ongoing snapshots. If + * this detail is not needed or you want to obtain information about one or more + * existing snapshots, use the get snapshot API. + *

+ * WARNING: Using the API to return the status of any snapshots other than + * currently running snapshots can be expensive. The API requires a read from + * the repository for each shard in each snapshot. For example, if you have 100 + * snapshots with 1,000 shards each, an API request that includes all snapshots + * will require 100,000 reads (100 snapshots x 1,000 shards). + *

+ * Depending on the latency of your storage, such requests can take an extremely + * long time to return results. These requests can also tax machine resources + * and, when using cloud storage, incur high processing costs. * * @see Documentation @@ -475,7 +687,8 @@ public SnapshotStatusResponse status() throws IOException, ElasticsearchExceptio // ----- Endpoint: snapshot.verify_repository /** - * Verifies a repository. + * Verify a snapshot repository. Check for common misconfigurations in a + * snapshot repository. * * @see Documentation @@ -491,7 +704,8 @@ public VerifyRepositoryResponse verifyRepository(VerifyRepositoryRequest request } /** - * Verifies a repository. + * Verify a snapshot repository. Check for common misconfigurations in a + * snapshot repository. * * @param fn * a function that initializes a builder to create the diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/GetRepositoryRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/GetRepositoryRequest.java index 55a08eeba..305661c71 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/GetRepositoryRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/GetRepositoryRequest.java @@ -59,7 +59,7 @@ // typedef: snapshot.get_repository.Request /** - * Returns information about a repository. + * Get snapshot repository information. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/GetSnapshotRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/GetSnapshotRequest.java index 90544d289..b62955a32 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/GetSnapshotRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/GetSnapshotRequest.java @@ -61,7 +61,7 @@ // typedef: snapshot.get.Request /** - * Returns information about a snapshot. + * Get snapshot information. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RepositoryVerifyIntegrityRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RepositoryVerifyIntegrityRequest.java index a71c3ea66..632463a4e 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RepositoryVerifyIntegrityRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RepositoryVerifyIntegrityRequest.java @@ -59,7 +59,56 @@ // typedef: snapshot.repository_verify_integrity.Request /** - * Verifies the integrity of the contents of a snapshot repository + * Verify the repository integrity. Verify the integrity of the contents of a + * snapshot repository. + *

+ * This API enables you to perform a comprehensive check of the contents of a + * repository, looking for any anomalies in its data or metadata which might + * prevent you from restoring snapshots from the repository or which might cause + * future snapshot create or delete operations to fail. + *

+ * If you suspect the integrity of the contents of one of your snapshot + * repositories, cease all write activity to this repository immediately, set + * its read_only option to true, and use this API to + * verify its integrity. Until you do so: + *

    + *
  • It may not be possible to restore some snapshots from this + * repository.
  • + *
  • Searchable snapshots may report errors when searched or may have + * unassigned shards.
  • + *
  • Taking snapshots into this repository may fail or may appear to succeed + * but have created a snapshot which cannot be restored.
  • + *
  • Deleting snapshots from this repository may fail or may appear to succeed + * but leave the underlying data on disk.
  • + *
  • Continuing to write to the repository while it is in an invalid state may + * causing additional damage to its contents.
  • + *
+ *

+ * If the API finds any problems with the integrity of the contents of your + * repository, Elasticsearch will not be able to repair the damage. The only way + * to bring the repository back into a fully working state after its contents + * have been damaged is by restoring its contents from a repository backup which + * was taken before the damage occurred. You must also identify what caused the + * damage and take action to prevent it from happening again. + *

+ * If you cannot restore a repository backup, register a new repository and use + * this for all future snapshot operations. In some cases it may be possible to + * recover some of the contents of a damaged repository, either by restoring as + * many of its snapshots as needed and taking new snapshots of the restored + * data, or by using the reindex API to copy data from any searchable snapshots + * mounted from the damaged repository. + *

+ * Avoid all operations which write to the repository while the verify + * repository integrity API is running. If something changes the repository + * contents while an integrity verification is running then Elasticsearch may + * incorrectly report having detected some anomalies in its contents due to the + * concurrent writes. It may also incorrectly fail to report some anomalies that + * the concurrent writes prevented it from detecting. + *

+ * NOTE: This API is intended for exploratory use by humans. You should expect + * the request parameters and the response format to vary in future versions. + *

+ * NOTE: This API may not work correctly in a mixed-version cluster. * * @see API diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RestoreRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RestoreRequest.java index 1176d6dac..509e7f8a9 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RestoreRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/RestoreRequest.java @@ -61,7 +61,32 @@ // typedef: snapshot.restore.Request /** - * Restores a snapshot. + * Restore a snapshot. Restore a snapshot of a cluster or data streams and + * indices. + *

+ * You can restore a snapshot only to a running cluster with an elected master + * node. The snapshot repository must be registered and available to the + * cluster. The snapshot and cluster versions must be compatible. + *

+ * To restore a snapshot, the cluster's global metadata must be writable. Ensure + * there are't any cluster blocks that prevent writes. The restore operation + * ignores index blocks. + *

+ * Before you restore a data stream, ensure the cluster contains a matching + * index template with data streams enabled. To check, use the index management + * feature in Kibana or the get index template API: + * + *

+ * GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream
+ * 
+ * 
+ *

+ * If no such template exists, you can create one or restore a cluster state + * that contains one. Without a matching index template, a data stream can't + * roll over or create backing indices. + *

+ * If your snapshot contains data from App Search or Workplace Search, you must + * restore the Enterprise Search encryption key before you restore the snapshot. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/SnapshotStatusRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/SnapshotStatusRequest.java index c9da0634a..45641441c 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/SnapshotStatusRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/SnapshotStatusRequest.java @@ -59,7 +59,21 @@ // typedef: snapshot.status.Request /** - * Returns information about the status of a snapshot. + * Get the snapshot status. Get a detailed description of the current state for + * each shard participating in the snapshot. Note that this API should be used + * only to obtain detailed shard-level information for ongoing snapshots. If + * this detail is not needed or you want to obtain information about one or more + * existing snapshots, use the get snapshot API. + *

+ * WARNING: Using the API to return the status of any snapshots other than + * currently running snapshots can be expensive. The API requires a read from + * the repository for each shard in each snapshot. For example, if you have 100 + * snapshots with 1,000 shards each, an API request that includes all snapshots + * will require 100,000 reads (100 snapshots x 1,000 shards). + *

+ * Depending on the latency of your storage, such requests can take an extremely + * long time to return results. These requests can also tax machine resources + * and, when using cloud storage, incur high processing costs. * * @see API * specification diff --git a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/VerifyRepositoryRequest.java b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/VerifyRepositoryRequest.java index fffc7bc18..2371c4f0f 100644 --- a/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/VerifyRepositoryRequest.java +++ b/java-client/src/main/java/co/elastic/clients/elasticsearch/snapshot/VerifyRepositoryRequest.java @@ -56,7 +56,8 @@ // typedef: snapshot.verify_repository.Request /** - * Verifies a repository. + * Verify a snapshot repository. Check for common misconfigurations in a + * snapshot repository. * * @see API