From c082b85542509c13e963189cc6d819aee0155945 Mon Sep 17 00:00:00 2001
From: Elastic Machine <elasticmachine@users.noreply.github.com>
Date: Mon, 16 Dec 2024 06:03:06 +0000
Subject: [PATCH] Auto-generated API code

---
 .../_async/client/__init__.py                 | 10 ++++
 .../_async/client/indices.py                  |  4 +-
 .../_async/client/license.py                  |  8 +--
 elasticsearch_serverless/_async/client/ml.py  | 52 +++++++++++++++++++
 .../_sync/client/__init__.py                  | 10 ++++
 .../_sync/client/indices.py                   |  4 +-
 .../_sync/client/license.py                   |  8 +--
 elasticsearch_serverless/_sync/client/ml.py   | 52 +++++++++++++++++++
 8 files changed, 138 insertions(+), 10 deletions(-)

diff --git a/elasticsearch_serverless/_async/client/__init__.py b/elasticsearch_serverless/_async/client/__init__.py
index f39b516..8eb78fa 100644
--- a/elasticsearch_serverless/_async/client/__init__.py
+++ b/elasticsearch_serverless/_async/client/__init__.py
@@ -456,12 +456,14 @@ async def bulk(
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        list_executed_pipelines: t.Optional[bool] = None,
         pipeline: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
         refresh: t.Optional[
             t.Union[bool, str, t.Literal["false", "true", "wait_for"]]
         ] = None,
         require_alias: t.Optional[bool] = None,
+        require_data_stream: t.Optional[bool] = None,
         routing: t.Optional[str] = None,
         source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None,
         source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None,
@@ -481,6 +483,8 @@ async def bulk(
         :param operations:
         :param index: Name of the data stream, index, or index alias to perform bulk
             actions on.
+        :param list_executed_pipelines: If `true`, the response will include the ingest
+            pipelines that were executed for each index or create.
         :param pipeline: ID of the pipeline to use to preprocess incoming documents.
             If the index has a default ingest pipeline specified, then setting the value
             to `_none` disables the default ingest pipeline for this request. If a final
@@ -491,6 +495,8 @@ async def bulk(
             make this operation visible to search, if `false` do nothing with refreshes.
             Valid values: `true`, `false`, `wait_for`.
         :param require_alias: If `true`, the request’s actions must target an index alias.
+        :param require_data_stream: If `true`, the request's actions must target a data
+            stream (existing or to-be-created).
         :param routing: Custom value used to route operations to a specific shard.
         :param source: `true` or `false` to return the `_source` field or not, or a list
             of fields to return.
@@ -524,6 +530,8 @@ async def bulk(
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if list_executed_pipelines is not None:
+            __query["list_executed_pipelines"] = list_executed_pipelines
         if pipeline is not None:
             __query["pipeline"] = pipeline
         if pretty is not None:
@@ -532,6 +540,8 @@ async def bulk(
             __query["refresh"] = refresh
         if require_alias is not None:
             __query["require_alias"] = require_alias
+        if require_data_stream is not None:
+            __query["require_data_stream"] = require_data_stream
         if routing is not None:
             __query["routing"] = routing
         if source is not None:
diff --git a/elasticsearch_serverless/_async/client/indices.py b/elasticsearch_serverless/_async/client/indices.py
index 2954c63..218c5ab 100644
--- a/elasticsearch_serverless/_async/client/indices.py
+++ b/elasticsearch_serverless/_async/client/indices.py
@@ -824,8 +824,8 @@ async def explain_data_lifecycle(
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
-        Get the status for a data stream lifecycle. Retrieves information about an index
-        or data stream’s current data stream lifecycle status, such as time since index
+        Get the status for a data stream lifecycle. Get information about an index or
+        data stream's current data stream lifecycle status, such as time since index
         creation, time since rollover, the lifecycle configuration managing the index,
         or any errors encountered during lifecycle execution.
 
diff --git a/elasticsearch_serverless/_async/client/license.py b/elasticsearch_serverless/_async/client/license.py
index 70bb65c..41f7bc8 100644
--- a/elasticsearch_serverless/_async/client/license.py
+++ b/elasticsearch_serverless/_async/client/license.py
@@ -37,9 +37,11 @@ async def get(
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
-        Get license information. Returns information about your Elastic license, including
-        its type, its status, when it was issued, and when it expires. For more information
-        about the different types of licenses, refer to [Elastic Stack subscriptions](https://www.elastic.co/subscriptions).
+        Get license information. Get information about your Elastic license including
+        its type, its status, when it was issued, and when it expires. NOTE: If the master
+        node is generating a new cluster state, the get license API may return a `404
+        Not Found` response. If you receive an unexpected 404 response after cluster
+        startup, wait a short period and retry the request.
 
         `<https://www.elastic.co/guide/en/elasticsearch/reference/master/get-license.html>`_
 
diff --git a/elasticsearch_serverless/_async/client/ml.py b/elasticsearch_serverless/_async/client/ml.py
index 51af55c..e4dfb5b 100644
--- a/elasticsearch_serverless/_async/client/ml.py
+++ b/elasticsearch_serverless/_async/client/ml.py
@@ -1464,6 +1464,7 @@ async def get_trained_models(
                 ],
             ]
         ] = None,
+        include_model_definition: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
         size: t.Optional[int] = None,
         tags: t.Optional[t.Union[str, t.Sequence[str]]] = None,
@@ -1490,6 +1491,8 @@ async def get_trained_models(
         :param from_: Skips the specified number of models.
         :param include: A comma delimited string of optional fields to include in the
             response body.
+        :param include_model_definition: parameter is deprecated! Use [include=definition]
+            instead
         :param size: Specifies the maximum number of models to obtain.
         :param tags: A comma delimited string of tags. A trained model can have many
             tags, or none. When supplied, only trained models that contain all the supplied
@@ -1519,6 +1522,8 @@ async def get_trained_models(
             __query["human"] = human
         if include is not None:
             __query["include"] = include
+        if include_model_definition is not None:
+            __query["include_model_definition"] = include_model_definition
         if pretty is not None:
             __query["pretty"] = pretty
         if size is not None:
@@ -2038,9 +2043,11 @@ async def put_calendar_job(
             "description",
             "headers",
             "max_num_threads",
+            "meta",
             "model_memory_limit",
             "version",
         ),
+        parameter_aliases={"_meta": "meta"},
         ignore_deprecated_options={"headers"},
     )
     async def put_data_frame_analytics(
@@ -2058,6 +2065,7 @@ async def put_data_frame_analytics(
         headers: t.Optional[t.Mapping[str, t.Union[str, t.Sequence[str]]]] = None,
         human: t.Optional[bool] = None,
         max_num_threads: t.Optional[int] = None,
+        meta: t.Optional[t.Mapping[str, t.Any]] = None,
         model_memory_limit: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
         version: t.Optional[str] = None,
@@ -2118,6 +2126,7 @@ async def put_data_frame_analytics(
             Using more threads may decrease the time necessary to complete the analysis
             at the cost of using more CPU. Note that the process may use additional threads
             for operational functionality other than the analysis itself.
+        :param meta:
         :param model_memory_limit: The approximate maximum amount of memory resources
             that are permitted for analytical processing. If your `elasticsearch.yml`
             file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs
@@ -2162,6 +2171,8 @@ async def put_data_frame_analytics(
                 __body["headers"] = headers
             if max_num_threads is not None:
                 __body["max_num_threads"] = max_num_threads
+            if meta is not None:
+                __body["_meta"] = meta
             if model_memory_limit is not None:
                 __body["model_memory_limit"] = model_memory_limit
             if version is not None:
@@ -2180,6 +2191,7 @@ async def put_data_frame_analytics(
     @_rewrite_parameters(
         body_fields=(
             "aggregations",
+            "aggs",
             "chunking_config",
             "delayed_data_check_config",
             "frequency",
@@ -2202,6 +2214,7 @@ async def put_datafeed(
         *,
         datafeed_id: str,
         aggregations: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None,
+        aggs: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None,
         allow_no_indices: t.Optional[bool] = None,
         chunking_config: t.Optional[t.Mapping[str, t.Any]] = None,
         delayed_data_check_config: t.Optional[t.Mapping[str, t.Any]] = None,
@@ -2255,6 +2268,8 @@ async def put_datafeed(
         :param aggregations: If set, the datafeed performs aggregation searches. Support
             for aggregations is limited and should be used only with low cardinality
             data.
+        :param aggs: If set, the datafeed performs aggregation searches. Support for
+            aggregations is limited and should be used only with low cardinality data.
         :param allow_no_indices: If true, wildcard indices expressions that resolve into
             no concrete indices are ignored. This includes the `_all` string or when
             no indices are specified.
@@ -2342,6 +2357,8 @@ async def put_datafeed(
         if not __body:
             if aggregations is not None:
                 __body["aggregations"] = aggregations
+            if aggs is not None:
+                __body["aggs"] = aggs
             if chunking_config is not None:
                 __body["chunking_config"] = chunking_config
             if delayed_data_check_config is not None:
@@ -2464,6 +2481,7 @@ async def put_job(
         analysis_config: t.Optional[t.Mapping[str, t.Any]] = None,
         data_description: t.Optional[t.Mapping[str, t.Any]] = None,
         allow_lazy_open: t.Optional[bool] = None,
+        allow_no_indices: t.Optional[bool] = None,
         analysis_limits: t.Optional[t.Mapping[str, t.Any]] = None,
         background_persist_interval: t.Optional[
             t.Union[str, t.Literal[-1], t.Literal[0]]
@@ -2473,9 +2491,19 @@ async def put_job(
         datafeed_config: t.Optional[t.Mapping[str, t.Any]] = None,
         description: t.Optional[str] = None,
         error_trace: t.Optional[bool] = None,
+        expand_wildcards: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]]
+                ],
+                t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]],
+            ]
+        ] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         groups: t.Optional[t.Sequence[str]] = None,
         human: t.Optional[bool] = None,
+        ignore_throttled: t.Optional[bool] = None,
+        ignore_unavailable: t.Optional[bool] = None,
         model_plot_config: t.Optional[t.Mapping[str, t.Any]] = None,
         model_snapshot_retention_days: t.Optional[int] = None,
         pretty: t.Optional[bool] = None,
@@ -2510,6 +2538,9 @@ async def put_job(
             to true, the open anomaly detection jobs API does not return an error and
             the job waits in the opening state until sufficient machine learning node
             capacity is available.
+        :param allow_no_indices: If `true`, wildcard indices expressions that resolve
+            into no concrete indices are ignored. This includes the `_all` string or
+            when no indices are specified.
         :param analysis_limits: Limits can be applied for the resources required to hold
             the mathematical models in memory. These limits are approximate and can be
             set per job. They do not control the memory used by other processes, for
@@ -2533,7 +2564,20 @@ async def put_job(
             using those same roles. If you provide secondary authorization headers, those
             credentials are used instead.
         :param description: A description of the job.
+        :param expand_wildcards: Type of index that wildcard patterns can match. If the
+            request can target data streams, this argument determines whether wildcard
+            expressions match hidden data streams. Supports comma-separated values. Valid
+            values are: * `all`: Match any data stream or index, including hidden ones.
+            * `closed`: Match closed, non-hidden indices. Also matches any non-hidden
+            data stream. Data streams cannot be closed. * `hidden`: Match hidden data
+            streams and hidden indices. Must be combined with `open`, `closed`, or both.
+            * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden
+            indices. Also matches any non-hidden data stream.
         :param groups: A list of job groups. A job can belong to no groups or many.
+        :param ignore_throttled: If `true`, concrete, expanded or aliased indices are
+            ignored when frozen.
+        :param ignore_unavailable: If `true`, unavailable indices (missing or closed)
+            are ignored.
         :param model_plot_config: This advanced configuration option stores model information
             along with the results. It provides a more detailed view into anomaly detection.
             If you enable model plot it can add considerable overhead to the performance
@@ -2573,12 +2617,20 @@ async def put_job(
         __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}'
         __query: t.Dict[str, t.Any] = {}
         __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if allow_no_indices is not None:
+            __query["allow_no_indices"] = allow_no_indices
         if error_trace is not None:
             __query["error_trace"] = error_trace
+        if expand_wildcards is not None:
+            __query["expand_wildcards"] = expand_wildcards
         if filter_path is not None:
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if ignore_throttled is not None:
+            __query["ignore_throttled"] = ignore_throttled
+        if ignore_unavailable is not None:
+            __query["ignore_unavailable"] = ignore_unavailable
         if pretty is not None:
             __query["pretty"] = pretty
         if not __body:
diff --git a/elasticsearch_serverless/_sync/client/__init__.py b/elasticsearch_serverless/_sync/client/__init__.py
index 968bd94..569a774 100644
--- a/elasticsearch_serverless/_sync/client/__init__.py
+++ b/elasticsearch_serverless/_sync/client/__init__.py
@@ -454,12 +454,14 @@ def bulk(
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        list_executed_pipelines: t.Optional[bool] = None,
         pipeline: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
         refresh: t.Optional[
             t.Union[bool, str, t.Literal["false", "true", "wait_for"]]
         ] = None,
         require_alias: t.Optional[bool] = None,
+        require_data_stream: t.Optional[bool] = None,
         routing: t.Optional[str] = None,
         source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None,
         source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None,
@@ -479,6 +481,8 @@ def bulk(
         :param operations:
         :param index: Name of the data stream, index, or index alias to perform bulk
             actions on.
+        :param list_executed_pipelines: If `true`, the response will include the ingest
+            pipelines that were executed for each index or create.
         :param pipeline: ID of the pipeline to use to preprocess incoming documents.
             If the index has a default ingest pipeline specified, then setting the value
             to `_none` disables the default ingest pipeline for this request. If a final
@@ -489,6 +493,8 @@ def bulk(
             make this operation visible to search, if `false` do nothing with refreshes.
             Valid values: `true`, `false`, `wait_for`.
         :param require_alias: If `true`, the request’s actions must target an index alias.
+        :param require_data_stream: If `true`, the request's actions must target a data
+            stream (existing or to-be-created).
         :param routing: Custom value used to route operations to a specific shard.
         :param source: `true` or `false` to return the `_source` field or not, or a list
             of fields to return.
@@ -522,6 +528,8 @@ def bulk(
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if list_executed_pipelines is not None:
+            __query["list_executed_pipelines"] = list_executed_pipelines
         if pipeline is not None:
             __query["pipeline"] = pipeline
         if pretty is not None:
@@ -530,6 +538,8 @@ def bulk(
             __query["refresh"] = refresh
         if require_alias is not None:
             __query["require_alias"] = require_alias
+        if require_data_stream is not None:
+            __query["require_data_stream"] = require_data_stream
         if routing is not None:
             __query["routing"] = routing
         if source is not None:
diff --git a/elasticsearch_serverless/_sync/client/indices.py b/elasticsearch_serverless/_sync/client/indices.py
index b5e225a..dbd2c78 100644
--- a/elasticsearch_serverless/_sync/client/indices.py
+++ b/elasticsearch_serverless/_sync/client/indices.py
@@ -824,8 +824,8 @@ def explain_data_lifecycle(
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
-        Get the status for a data stream lifecycle. Retrieves information about an index
-        or data stream’s current data stream lifecycle status, such as time since index
+        Get the status for a data stream lifecycle. Get information about an index or
+        data stream's current data stream lifecycle status, such as time since index
         creation, time since rollover, the lifecycle configuration managing the index,
         or any errors encountered during lifecycle execution.
 
diff --git a/elasticsearch_serverless/_sync/client/license.py b/elasticsearch_serverless/_sync/client/license.py
index b63fecd..24e09fb 100644
--- a/elasticsearch_serverless/_sync/client/license.py
+++ b/elasticsearch_serverless/_sync/client/license.py
@@ -37,9 +37,11 @@ def get(
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
-        Get license information. Returns information about your Elastic license, including
-        its type, its status, when it was issued, and when it expires. For more information
-        about the different types of licenses, refer to [Elastic Stack subscriptions](https://www.elastic.co/subscriptions).
+        Get license information. Get information about your Elastic license including
+        its type, its status, when it was issued, and when it expires. NOTE: If the master
+        node is generating a new cluster state, the get license API may return a `404
+        Not Found` response. If you receive an unexpected 404 response after cluster
+        startup, wait a short period and retry the request.
 
         `<https://www.elastic.co/guide/en/elasticsearch/reference/master/get-license.html>`_
 
diff --git a/elasticsearch_serverless/_sync/client/ml.py b/elasticsearch_serverless/_sync/client/ml.py
index ff876ca..64709c9 100644
--- a/elasticsearch_serverless/_sync/client/ml.py
+++ b/elasticsearch_serverless/_sync/client/ml.py
@@ -1464,6 +1464,7 @@ def get_trained_models(
                 ],
             ]
         ] = None,
+        include_model_definition: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
         size: t.Optional[int] = None,
         tags: t.Optional[t.Union[str, t.Sequence[str]]] = None,
@@ -1490,6 +1491,8 @@ def get_trained_models(
         :param from_: Skips the specified number of models.
         :param include: A comma delimited string of optional fields to include in the
             response body.
+        :param include_model_definition: parameter is deprecated! Use [include=definition]
+            instead
         :param size: Specifies the maximum number of models to obtain.
         :param tags: A comma delimited string of tags. A trained model can have many
             tags, or none. When supplied, only trained models that contain all the supplied
@@ -1519,6 +1522,8 @@ def get_trained_models(
             __query["human"] = human
         if include is not None:
             __query["include"] = include
+        if include_model_definition is not None:
+            __query["include_model_definition"] = include_model_definition
         if pretty is not None:
             __query["pretty"] = pretty
         if size is not None:
@@ -2038,9 +2043,11 @@ def put_calendar_job(
             "description",
             "headers",
             "max_num_threads",
+            "meta",
             "model_memory_limit",
             "version",
         ),
+        parameter_aliases={"_meta": "meta"},
         ignore_deprecated_options={"headers"},
     )
     def put_data_frame_analytics(
@@ -2058,6 +2065,7 @@ def put_data_frame_analytics(
         headers: t.Optional[t.Mapping[str, t.Union[str, t.Sequence[str]]]] = None,
         human: t.Optional[bool] = None,
         max_num_threads: t.Optional[int] = None,
+        meta: t.Optional[t.Mapping[str, t.Any]] = None,
         model_memory_limit: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
         version: t.Optional[str] = None,
@@ -2118,6 +2126,7 @@ def put_data_frame_analytics(
             Using more threads may decrease the time necessary to complete the analysis
             at the cost of using more CPU. Note that the process may use additional threads
             for operational functionality other than the analysis itself.
+        :param meta:
         :param model_memory_limit: The approximate maximum amount of memory resources
             that are permitted for analytical processing. If your `elasticsearch.yml`
             file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs
@@ -2162,6 +2171,8 @@ def put_data_frame_analytics(
                 __body["headers"] = headers
             if max_num_threads is not None:
                 __body["max_num_threads"] = max_num_threads
+            if meta is not None:
+                __body["_meta"] = meta
             if model_memory_limit is not None:
                 __body["model_memory_limit"] = model_memory_limit
             if version is not None:
@@ -2180,6 +2191,7 @@ def put_data_frame_analytics(
     @_rewrite_parameters(
         body_fields=(
             "aggregations",
+            "aggs",
             "chunking_config",
             "delayed_data_check_config",
             "frequency",
@@ -2202,6 +2214,7 @@ def put_datafeed(
         *,
         datafeed_id: str,
         aggregations: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None,
+        aggs: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None,
         allow_no_indices: t.Optional[bool] = None,
         chunking_config: t.Optional[t.Mapping[str, t.Any]] = None,
         delayed_data_check_config: t.Optional[t.Mapping[str, t.Any]] = None,
@@ -2255,6 +2268,8 @@ def put_datafeed(
         :param aggregations: If set, the datafeed performs aggregation searches. Support
             for aggregations is limited and should be used only with low cardinality
             data.
+        :param aggs: If set, the datafeed performs aggregation searches. Support for
+            aggregations is limited and should be used only with low cardinality data.
         :param allow_no_indices: If true, wildcard indices expressions that resolve into
             no concrete indices are ignored. This includes the `_all` string or when
             no indices are specified.
@@ -2342,6 +2357,8 @@ def put_datafeed(
         if not __body:
             if aggregations is not None:
                 __body["aggregations"] = aggregations
+            if aggs is not None:
+                __body["aggs"] = aggs
             if chunking_config is not None:
                 __body["chunking_config"] = chunking_config
             if delayed_data_check_config is not None:
@@ -2464,6 +2481,7 @@ def put_job(
         analysis_config: t.Optional[t.Mapping[str, t.Any]] = None,
         data_description: t.Optional[t.Mapping[str, t.Any]] = None,
         allow_lazy_open: t.Optional[bool] = None,
+        allow_no_indices: t.Optional[bool] = None,
         analysis_limits: t.Optional[t.Mapping[str, t.Any]] = None,
         background_persist_interval: t.Optional[
             t.Union[str, t.Literal[-1], t.Literal[0]]
@@ -2473,9 +2491,19 @@ def put_job(
         datafeed_config: t.Optional[t.Mapping[str, t.Any]] = None,
         description: t.Optional[str] = None,
         error_trace: t.Optional[bool] = None,
+        expand_wildcards: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]]
+                ],
+                t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]],
+            ]
+        ] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         groups: t.Optional[t.Sequence[str]] = None,
         human: t.Optional[bool] = None,
+        ignore_throttled: t.Optional[bool] = None,
+        ignore_unavailable: t.Optional[bool] = None,
         model_plot_config: t.Optional[t.Mapping[str, t.Any]] = None,
         model_snapshot_retention_days: t.Optional[int] = None,
         pretty: t.Optional[bool] = None,
@@ -2510,6 +2538,9 @@ def put_job(
             to true, the open anomaly detection jobs API does not return an error and
             the job waits in the opening state until sufficient machine learning node
             capacity is available.
+        :param allow_no_indices: If `true`, wildcard indices expressions that resolve
+            into no concrete indices are ignored. This includes the `_all` string or
+            when no indices are specified.
         :param analysis_limits: Limits can be applied for the resources required to hold
             the mathematical models in memory. These limits are approximate and can be
             set per job. They do not control the memory used by other processes, for
@@ -2533,7 +2564,20 @@ def put_job(
             using those same roles. If you provide secondary authorization headers, those
             credentials are used instead.
         :param description: A description of the job.
+        :param expand_wildcards: Type of index that wildcard patterns can match. If the
+            request can target data streams, this argument determines whether wildcard
+            expressions match hidden data streams. Supports comma-separated values. Valid
+            values are: * `all`: Match any data stream or index, including hidden ones.
+            * `closed`: Match closed, non-hidden indices. Also matches any non-hidden
+            data stream. Data streams cannot be closed. * `hidden`: Match hidden data
+            streams and hidden indices. Must be combined with `open`, `closed`, or both.
+            * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden
+            indices. Also matches any non-hidden data stream.
         :param groups: A list of job groups. A job can belong to no groups or many.
+        :param ignore_throttled: If `true`, concrete, expanded or aliased indices are
+            ignored when frozen.
+        :param ignore_unavailable: If `true`, unavailable indices (missing or closed)
+            are ignored.
         :param model_plot_config: This advanced configuration option stores model information
             along with the results. It provides a more detailed view into anomaly detection.
             If you enable model plot it can add considerable overhead to the performance
@@ -2573,12 +2617,20 @@ def put_job(
         __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}'
         __query: t.Dict[str, t.Any] = {}
         __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if allow_no_indices is not None:
+            __query["allow_no_indices"] = allow_no_indices
         if error_trace is not None:
             __query["error_trace"] = error_trace
+        if expand_wildcards is not None:
+            __query["expand_wildcards"] = expand_wildcards
         if filter_path is not None:
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if ignore_throttled is not None:
+            __query["ignore_throttled"] = ignore_throttled
+        if ignore_unavailable is not None:
+            __query["ignore_unavailable"] = ignore_unavailable
         if pretty is not None:
             __query["pretty"] = pretty
         if not __body: