diff --git a/elasticsearch/_async/client/cat.py b/elasticsearch/_async/client/cat.py
index ba9795103..80120a0c0 100644
--- a/elasticsearch/_async/client/cat.py
+++ b/elasticsearch/_async/client/cat.py
@@ -1774,7 +1774,200 @@ async def nodes(
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
format: t.Optional[str] = None,
full_id: t.Optional[t.Union[bool, str]] = None,
- h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+ h: t.Optional[
+ t.Union[
+ t.Sequence[
+ t.Union[
+ str,
+ t.Literal[
+ "build",
+ "completion.size",
+ "cpu",
+ "disk.avail",
+ "disk.total",
+ "disk.used",
+ "disk.used_percent",
+ "fielddata.evictions",
+ "fielddata.memory_size",
+ "file_desc.current",
+ "file_desc.max",
+ "file_desc.percent",
+ "flush.total",
+ "flush.total_time",
+ "get.current",
+ "get.exists_time",
+ "get.exists_total",
+ "get.missing_time",
+ "get.missing_total",
+ "get.time",
+ "get.total",
+ "heap.current",
+ "heap.max",
+ "heap.percent",
+ "http_address",
+ "id",
+ "indexing.delete_current",
+ "indexing.delete_time",
+ "indexing.delete_total",
+ "indexing.index_current",
+ "indexing.index_failed",
+ "indexing.index_failed_due_to_version_conflict",
+ "indexing.index_time",
+ "indexing.index_total",
+ "ip",
+ "jdk",
+ "load_15m",
+ "load_1m",
+ "load_5m",
+ "mappings.total_count",
+ "mappings.total_estimated_overhead_in_bytes",
+ "master",
+ "merges.current",
+ "merges.current_docs",
+ "merges.current_size",
+ "merges.total",
+ "merges.total_docs",
+ "merges.total_size",
+ "merges.total_time",
+ "name",
+ "node.role",
+ "pid",
+ "port",
+ "query_cache.evictions",
+ "query_cache.hit_count",
+ "query_cache.memory_size",
+ "query_cache.miss_count",
+ "ram.current",
+ "ram.max",
+ "ram.percent",
+ "refresh.time",
+ "refresh.total",
+ "request_cache.evictions",
+ "request_cache.hit_count",
+ "request_cache.memory_size",
+ "request_cache.miss_count",
+ "script.cache_evictions",
+ "script.compilations",
+ "search.fetch_current",
+ "search.fetch_time",
+ "search.fetch_total",
+ "search.open_contexts",
+ "search.query_current",
+ "search.query_time",
+ "search.query_total",
+ "search.scroll_current",
+ "search.scroll_time",
+ "search.scroll_total",
+ "segments.count",
+ "segments.fixed_bitset_memory",
+ "segments.index_writer_memory",
+ "segments.memory",
+ "segments.version_map_memory",
+ "shard_stats.total_count",
+ "suggest.current",
+ "suggest.time",
+ "suggest.total",
+ "uptime",
+ "version",
+ ],
+ ]
+ ],
+ t.Union[
+ str,
+ t.Literal[
+ "build",
+ "completion.size",
+ "cpu",
+ "disk.avail",
+ "disk.total",
+ "disk.used",
+ "disk.used_percent",
+ "fielddata.evictions",
+ "fielddata.memory_size",
+ "file_desc.current",
+ "file_desc.max",
+ "file_desc.percent",
+ "flush.total",
+ "flush.total_time",
+ "get.current",
+ "get.exists_time",
+ "get.exists_total",
+ "get.missing_time",
+ "get.missing_total",
+ "get.time",
+ "get.total",
+ "heap.current",
+ "heap.max",
+ "heap.percent",
+ "http_address",
+ "id",
+ "indexing.delete_current",
+ "indexing.delete_time",
+ "indexing.delete_total",
+ "indexing.index_current",
+ "indexing.index_failed",
+ "indexing.index_failed_due_to_version_conflict",
+ "indexing.index_time",
+ "indexing.index_total",
+ "ip",
+ "jdk",
+ "load_15m",
+ "load_1m",
+ "load_5m",
+ "mappings.total_count",
+ "mappings.total_estimated_overhead_in_bytes",
+ "master",
+ "merges.current",
+ "merges.current_docs",
+ "merges.current_size",
+ "merges.total",
+ "merges.total_docs",
+ "merges.total_size",
+ "merges.total_time",
+ "name",
+ "node.role",
+ "pid",
+ "port",
+ "query_cache.evictions",
+ "query_cache.hit_count",
+ "query_cache.memory_size",
+ "query_cache.miss_count",
+ "ram.current",
+ "ram.max",
+ "ram.percent",
+ "refresh.time",
+ "refresh.total",
+ "request_cache.evictions",
+ "request_cache.hit_count",
+ "request_cache.memory_size",
+ "request_cache.miss_count",
+ "script.cache_evictions",
+ "script.compilations",
+ "search.fetch_current",
+ "search.fetch_time",
+ "search.fetch_total",
+ "search.open_contexts",
+ "search.query_current",
+ "search.query_time",
+ "search.query_total",
+ "search.scroll_current",
+ "search.scroll_time",
+ "search.scroll_total",
+ "segments.count",
+ "segments.fixed_bitset_memory",
+ "segments.index_writer_memory",
+ "segments.memory",
+ "segments.version_map_memory",
+ "shard_stats.total_count",
+ "suggest.current",
+ "suggest.time",
+ "suggest.total",
+ "uptime",
+ "version",
+ ],
+ ],
+ ]
+ ] = None,
help: t.Optional[bool] = None,
human: t.Optional[bool] = None,
include_unloaded_segments: t.Optional[bool] = None,
@@ -1801,16 +1994,17 @@ async def nodes(
to `text`, `json`, `cbor`, `yaml`, or `smile`.
:param full_id: If `true`, return the full node ID. If `false`, return the shortened
node ID.
- :param h: List of columns to appear in the response. Supports simple wildcards.
+ :param h: A comma-separated list of columns names to display. It supports simple
+ wildcards.
:param help: When set to `true` will output available columns. This option can't
be combined with any other query string option.
:param include_unloaded_segments: If true, the response includes information
from segments that are not loaded into memory.
- :param master_timeout: Period to wait for a connection to the master node.
- :param s: List of columns that determine how the table should be sorted. Sorting
- defaults to ascending and can be changed by setting `:asc` or `:desc` as
- a suffix to the column name.
- :param time: Unit used to display time values.
+ :param master_timeout: The period to wait for a connection to the master node.
+ :param s: A comma-separated list of column names or aliases that determines the
+ sort order. Sorting defaults to ascending and can be changed by setting `:asc`
+ or `:desc` as a suffix to the column name.
+ :param time: The unit used to display time values.
:param v: When set to `true` will enable verbose output.
"""
__path_parts: t.Dict[str, str] = {}
diff --git a/elasticsearch/_async/client/cluster.py b/elasticsearch/_async/client/cluster.py
index 78ab8d492..09b1b115a 100644
--- a/elasticsearch/_async/client/cluster.py
+++ b/elasticsearch/_async/client/cluster.py
@@ -870,9 +870,9 @@ async def put_settings(
:param flat_settings: Return settings in flat format (default: false)
:param master_timeout: Explicit operation timeout for connection to master node
- :param persistent:
+ :param persistent: The settings that persist after the cluster restarts.
:param timeout: Explicit operation timeout
- :param transient:
+ :param transient: The settings that do not persist after the cluster restarts.
"""
__path_parts: t.Dict[str, str] = {}
__path = "/_cluster/settings"
@@ -928,7 +928,7 @@ async def remote_info(
This API returns information that reflects current state on the local cluster.
The connected
field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it.
Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster.
- To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the resolve cluster endpoint.
/_resolve/cluster
endpoint.
diff --git a/elasticsearch/_async/client/esql.py b/elasticsearch/_async/client/esql.py
index 6e3e00524..ca083db70 100644
--- a/elasticsearch/_async/client/esql.py
+++ b/elasticsearch/_async/client/esql.py
@@ -31,6 +31,8 @@ class EsqlClient(NamespacedClient):
"columnar",
"filter",
"include_ccs_metadata",
+ "keep_alive",
+ "keep_on_completion",
"locale",
"params",
"profile",
@@ -147,10 +149,6 @@ async def async_query(
__query["format"] = format
if human is not None:
__query["human"] = human
- if keep_alive is not None:
- __query["keep_alive"] = keep_alive
- if keep_on_completion is not None:
- __query["keep_on_completion"] = keep_on_completion
if pretty is not None:
__query["pretty"] = pretty
if not __body:
@@ -162,6 +160,10 @@ async def async_query(
__body["filter"] = filter
if include_ccs_metadata is not None:
__body["include_ccs_metadata"] = include_ccs_metadata
+ if keep_alive is not None:
+ __body["keep_alive"] = keep_alive
+ if keep_on_completion is not None:
+ __body["keep_on_completion"] = keep_on_completion
if locale is not None:
__body["locale"] = locale
if params is not None:
diff --git a/elasticsearch/_async/client/fleet.py b/elasticsearch/_async/client/fleet.py
index 38b3771d5..aca07e0ac 100644
--- a/elasticsearch/_async/client/fleet.py
+++ b/elasticsearch/_async/client/fleet.py
@@ -138,9 +138,9 @@ async def msearch(
"""
.. raw:: html
- Executes several fleet searches with a single API request. - The API follows the same structure as the multi search API. However, similar to the fleet search API, it - supports the wait_for_checkpoints parameter.
+Executes several fleet searches with a single API request.
+The API follows the same structure as the multi search (_msearch
) API.
+ However, similar to the fleet search API, it supports the wait_for_checkpoints
parameter.
_analyze
endpoint without a specified index will always use 10000
as its limit.
- `Delete a legacy index template.
+Delete a legacy index template. + IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.
`Get index templates. +
Get legacy index templates. Get information about one or more index templates.
IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.
@@ -3850,8 +3851,34 @@ async def put_settings( Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default.To revert a setting to the default value, use a null value.
- The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation.
+ The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation.
To preserve existing settings from being updated, set the preserve_existing
parameter to true
.
There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example:
+{
+ "number_of_replicas": 1
+ }
+
+ Or you can use an index
setting object:
{
+ "index": {
+ "number_of_replicas": 1
+ }
+ }
+
+ Or you can use dot annotation:
+{
+ "index.number_of_replicas": 1
+ }
+
+ Or you can embed any of the aforementioned options in a settings
object. For example:
{
+ "settings": {
+ "index": {
+ "number_of_replicas": 1
+ }
+ }
+ }
+
NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. @@ -3971,7 +3998,7 @@ async def put_template( """ .. raw:: html -
Create or update an index template. +
Create or update a legacy index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name.
IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.
diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index 7d90246e5..8ea5252f7 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -370,22 +370,37 @@ async def put( """ .. raw:: html -Create an inference endpoint.
- When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an inference endpoint.
IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
+The following integrations are available through the inference API. You can find the available task types next to the integration name:
+completion
, rerank
, sparse_embedding
, text_embedding
)completion
, text_embedding
)completion
)completion
, text_embedding
)completion
, text_embedding
)completion
, rerank
, text_embedding
)rerank
, sparse_embedding
, text_embedding
- this service is for built-in models and models uploaded through Eland)sparse_embedding
)completion
, text_embedding
)rerank
, text_embedding
)text_embedding
)text_embedding
)chat_completion
, completion
, text_embedding
)text_embedding
, rerank
)text_embedding
)text_embedding
, rerank
)Create an AlibabaCloud AI Search inference endpoint.
Create an inference endpoint to perform an inference task with the alibabacloud-ai-search
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an Amazon Bedrock inference endpoint.
-Creates an inference endpoint to perform an inference task with the amazonbedrock
service.
Create an inference endpoint to perform an inference task with the amazonbedrock
service.
-info You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an Anthropic inference endpoint.
Create an inference endpoint to perform an inference task with the anthropic
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an Azure AI studio inference endpoint.
Create an inference endpoint to perform an inference task with the azureaistudio
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
The list of embeddings models that you can choose from in your deployment can be found in the Azure models documentation.
-When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create a Cohere inference endpoint.
Create an inference endpoint to perform an inference task with the cohere
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an Google AI Studio inference endpoint.
Create an inference endpoint to perform an inference task with the googleaistudio
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create a Google Vertex AI inference endpoint.
Create an inference endpoint to perform an inference task with the googlevertexai
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
multilingual-e5-base
multilingual-e5-small
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an inference endpoint to perform an inference task with the jinaai
service.
To review the available rerank
models, refer to https://jina.ai/reranker.
To review the available text_embedding
models, refer to the https://jina.ai/embeddings/.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create a Mistral inference endpoint.
Creates an inference endpoint to perform an inference task with the mistral
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an OpenAI inference endpoint.
Create an inference endpoint to perform an inference task with the openai
service or openai
compatible APIs.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an inference endpoint to perform an inference task with the watsonxai
service.
You need an IBM Cloud Databases for Elasticsearch deployment to use the watsonxai
inference service.
You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
connected
field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it.
Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster.
- To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the resolve cluster endpoint.
+ To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the /_resolve/cluster
endpoint.
diff --git a/elasticsearch/_sync/client/esql.py b/elasticsearch/_sync/client/esql.py
index b4ddd2052..9e354f747 100644
--- a/elasticsearch/_sync/client/esql.py
+++ b/elasticsearch/_sync/client/esql.py
@@ -31,6 +31,8 @@ class EsqlClient(NamespacedClient):
"columnar",
"filter",
"include_ccs_metadata",
+ "keep_alive",
+ "keep_on_completion",
"locale",
"params",
"profile",
@@ -147,10 +149,6 @@ def async_query(
__query["format"] = format
if human is not None:
__query["human"] = human
- if keep_alive is not None:
- __query["keep_alive"] = keep_alive
- if keep_on_completion is not None:
- __query["keep_on_completion"] = keep_on_completion
if pretty is not None:
__query["pretty"] = pretty
if not __body:
@@ -162,6 +160,10 @@ def async_query(
__body["filter"] = filter
if include_ccs_metadata is not None:
__body["include_ccs_metadata"] = include_ccs_metadata
+ if keep_alive is not None:
+ __body["keep_alive"] = keep_alive
+ if keep_on_completion is not None:
+ __body["keep_on_completion"] = keep_on_completion
if locale is not None:
__body["locale"] = locale
if params is not None:
diff --git a/elasticsearch/_sync/client/fleet.py b/elasticsearch/_sync/client/fleet.py
index b27c06f09..1d8284625 100644
--- a/elasticsearch/_sync/client/fleet.py
+++ b/elasticsearch/_sync/client/fleet.py
@@ -138,9 +138,9 @@ def msearch(
"""
.. raw:: html
- Executes several fleet searches with a single API request. - The API follows the same structure as the multi search API. However, similar to the fleet search API, it - supports the wait_for_checkpoints parameter.
+Executes several fleet searches with a single API request.
+The API follows the same structure as the multi search (_msearch
) API.
+ However, similar to the fleet search API, it supports the wait_for_checkpoints
parameter.
_analyze
endpoint without a specified index will always use 10000
as its limit.
- `Delete a legacy index template.
+Delete a legacy index template. + IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.
`Get index templates. +
Get legacy index templates. Get information about one or more index templates.
IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.
@@ -3850,8 +3851,34 @@ def put_settings( Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default.To revert a setting to the default value, use a null value.
- The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation.
+ The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation.
To preserve existing settings from being updated, set the preserve_existing
parameter to true
.
There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example:
+{
+ "number_of_replicas": 1
+ }
+
+ Or you can use an index
setting object:
{
+ "index": {
+ "number_of_replicas": 1
+ }
+ }
+
+ Or you can use dot annotation:
+{
+ "index.number_of_replicas": 1
+ }
+
+ Or you can embed any of the aforementioned options in a settings
object. For example:
{
+ "settings": {
+ "index": {
+ "number_of_replicas": 1
+ }
+ }
+ }
+
NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. @@ -3971,7 +3998,7 @@ def put_template( """ .. raw:: html -
Create or update an index template. +
Create or update a legacy index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name.
IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.
diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py index e77ad84f0..4b1390c29 100644 --- a/elasticsearch/_sync/client/inference.py +++ b/elasticsearch/_sync/client/inference.py @@ -370,22 +370,37 @@ def put( """ .. raw:: html -Create an inference endpoint.
- When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an inference endpoint.
IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
+The following integrations are available through the inference API. You can find the available task types next to the integration name:
+completion
, rerank
, sparse_embedding
, text_embedding
)completion
, text_embedding
)completion
)completion
, text_embedding
)completion
, text_embedding
)completion
, rerank
, text_embedding
)rerank
, sparse_embedding
, text_embedding
- this service is for built-in models and models uploaded through Eland)sparse_embedding
)completion
, text_embedding
)rerank
, text_embedding
)text_embedding
)text_embedding
)chat_completion
, completion
, text_embedding
)text_embedding
, rerank
)text_embedding
)text_embedding
, rerank
)Create an AlibabaCloud AI Search inference endpoint.
Create an inference endpoint to perform an inference task with the alibabacloud-ai-search
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an Amazon Bedrock inference endpoint.
-Creates an inference endpoint to perform an inference task with the amazonbedrock
service.
Create an inference endpoint to perform an inference task with the amazonbedrock
service.
-info You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an Anthropic inference endpoint.
Create an inference endpoint to perform an inference task with the anthropic
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an Azure AI studio inference endpoint.
Create an inference endpoint to perform an inference task with the azureaistudio
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
The list of embeddings models that you can choose from in your deployment can be found in the Azure models documentation.
-When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create a Cohere inference endpoint.
Create an inference endpoint to perform an inference task with the cohere
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an Google AI Studio inference endpoint.
Create an inference endpoint to perform an inference task with the googleaistudio
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create a Google Vertex AI inference endpoint.
Create an inference endpoint to perform an inference task with the googlevertexai
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
multilingual-e5-base
multilingual-e5-small
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an inference endpoint to perform an inference task with the jinaai
service.
To review the available rerank
models, refer to https://jina.ai/reranker.
To review the available text_embedding
models, refer to the https://jina.ai/embeddings/.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create a Mistral inference endpoint.
Creates an inference endpoint to perform an inference task with the mistral
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an OpenAI inference endpoint.
Create an inference endpoint to perform an inference task with the openai
service or openai
compatible APIs.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an inference endpoint to perform an inference task with the watsonxai
service.
You need an IBM Cloud Databases for Elasticsearch deployment to use the watsonxai
inference service.
You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.