diff --git a/api-extractor.json b/api-extractor.json new file mode 100644 index 000000000..bf29c6946 --- /dev/null +++ b/api-extractor.json @@ -0,0 +1,39 @@ +{ + "$schema": "https://developer.microsoft.com/json-schemas/api-extractor/v7/api-extractor.schema.json", + "mainEntryPointFilePath": "<projectFolder>/lib/client.d.ts", + "bundledPackages": [ + "@elastic/*" + ], + "apiReport": { + "enabled": false + }, + "docModel": { + "enabled": true, + "apiJsonFilePath": "<projectFolder>/api-extractor/<unscopedPackageName>.api.json", + "includeForgottenExports": true + }, + "dtsRollup": { + "enabled": false + }, + "tsdocMetadata": { + "enabled": true, + "tsdocMetadataFilePath": "<projectFolder>/api-extractor/tsdoc-metadata.json" + }, + "messages": { + "compilerMessageReporting": { + "default": { + "logLevel": "warning" + } + }, + "extractorMessageReporting": { + "default": { + "logLevel": "warning" + } + }, + "tsdocMessageReporting": { + "default": { + "logLevel": "warning" + } + } + } +} diff --git a/docs/basic-config.asciidoc b/docs/basic-config.asciidoc index 799866f93..fa0982618 100644 --- a/docs/basic-config.asciidoc +++ b/docs/basic-config.asciidoc @@ -85,7 +85,7 @@ _Default:_ `3` _Default:_ `30000` |`pingTimeout` -|`number` - Max ping request timeout in milliseconds for each request. + +|`number` - Max number of milliseconds a `ClusterConnectionPool` will wait when pinging nodes before marking them dead. + _Default:_ `3000` |`sniffInterval` @@ -105,17 +105,13 @@ _Default:_ `'_nodes/_all/http'` _Default:_ `false` |`resurrectStrategy` -|`string` - Configure the node resurrection strategy. + +|`string` - Strategy for resurrecting dead nodes when using `ClusterConnectionPool`. 'ping' will issue a test request to a node and resurrect it if it responds. 'optimistic' marks a node as alive without testing it. 'none' will never attempt to revive a dead connection. + _Options:_ `'ping'`, `'optimistic'`, `'none'` + _Default:_ `'ping'` -|`suggestCompression` -|`boolean` - Adds `accept-encoding` header to every request. + -_Default:_ `false` - |`compression` |`string, boolean` - Enables gzip request body compression. + -_Options:_ `'gzip'`, `false` + +_Options:_ `true`, `false` + _Default:_ `false` |`tls` diff --git a/docs/changelog.asciidoc b/docs/changelog.asciidoc index 02745171c..9f9de0e29 100644 --- a/docs/changelog.asciidoc +++ b/docs/changelog.asciidoc @@ -118,8 +118,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/8.15/release-notes-8.15. ===== OpenTelemetry zero-code instrumentation support For those that use an observability service that supports OpenTelemetry spans, the client will now automatically generate traces for each Elasticsearch request it makes. -See {jsclient}/observability.html#_opentelemetry[the docs] -for more information. +See <<o11y-otel,the docs>> for more information. [discrete] === 8.14.1 @@ -329,7 +328,7 @@ https://www.elastic.co/guide/en/elasticsearch/reference/8.9/release-notes-8.9.0. [discrete] ===== Allow document to be overwritten in `onDocument` iteratee of bulk helper https://github.com/elastic/elasticsearch-js/pull/1732[#1732] -In the {jsclient}/client-helpers.html#bulk-helper[bulk helper], documents could not be modified before being sent to Elasticsearch. It is now possible to {jsclient}/client-helpers.html#_modifying_a_document_before_operation[modify a document] before sending it. +In the <<bulk-helper,bulk helper>>, documents could not be modified before being sent to Elasticsearch. It is now possible to <<bulk-modify-doc,modify a document>> before sending it. [discrete] ==== Fixes diff --git a/docs/examples/bulk.asciidoc b/docs/examples/bulk.asciidoc index 74725c9e9..90f5de27a 100644 --- a/docs/examples/bulk.asciidoc +++ b/docs/examples/bulk.asciidoc @@ -1,10 +1,10 @@ [[bulk_examples]] === Bulk -With the {jsclient}/api-reference.html#_bulk[`bulk` API], you can perform multiple index/delete operations in a +With the <<client.bulk,`bulk` API>>, you can perform multiple index/delete operations in a single API call. The `bulk` API significantly increases indexing speed. -NOTE: You can also use the {jsclient}/client-helpers.html[bulk helper]. +NOTE: You can also use the <<bulk-helper,bulk helper>>. [source,js] ---- diff --git a/docs/examples/scroll.asciidoc b/docs/examples/scroll.asciidoc index 0f23a1bc1..87f302876 100644 --- a/docs/examples/scroll.asciidoc +++ b/docs/examples/scroll.asciidoc @@ -1,25 +1,25 @@ [[scroll_examples]] === Scroll -While a search request returns a single “page” of results, the scroll API can be -used to retrieve large numbers of results (or even all results) from a single -search request, in much the same way as you would use a cursor on a traditional +While a search request returns a single “page” of results, the scroll API can be +used to retrieve large numbers of results (or even all results) from a single +search request, in much the same way as you would use a cursor on a traditional database. -Scrolling is not intended for real time user requests, but rather for processing -large amounts of data, for example in order to reindex the contents of one index +Scrolling is not intended for real time user requests, but rather for processing +large amounts of data, for example in order to reindex the contents of one index into a new index with a different configuration. -NOTE: The results that are returned from a scroll request reflect the state of -the index at the time that the initial search request was made, like a snapshot -in time. Subsequent changes to documents (index, update or delete) will only +NOTE: The results that are returned from a scroll request reflect the state of +the index at the time that the initial search request was made, like a snapshot +in time. Subsequent changes to documents (index, update or delete) will only affect later search requests. -In order to use scrolling, the initial search request should specify the scroll -parameter in the query string, which tells {es} how long it should keep the +In order to use scrolling, the initial search request should specify the scroll +parameter in the query string, which tells {es} how long it should keep the “search context” alive. -NOTE: Did you know that we provide an helper for sending scroll requests? You can find it {jsclient}/client-helpers.html[here]. +NOTE: Did you know that we provide an helper for sending scroll requests? You can find it <<scroll-search-helper,here>>. [source,js] ---- @@ -113,7 +113,7 @@ async function run () { run().catch(console.log) ---- -Another cool usage of the `scroll` API can be done with Node.js ≥ 10, by using +Another cool usage of the `scroll` API can be done with Node.js ≥ 10, by using async iteration! [source,js] diff --git a/docs/helpers.asciidoc b/docs/helpers.asciidoc index cb60dbc51..021000417 100644 --- a/docs/helpers.asciidoc +++ b/docs/helpers.asciidoc @@ -338,6 +338,7 @@ console.log(result) ---- [discrete] +[[bulk-modify-doc]] ==== Modifying a document before operation ~Added~ ~in~ ~`v8.8.2`~ diff --git a/docs/index.asciidoc b/docs/index.asciidoc index 51206f0b0..6079ce079 100644 --- a/docs/index.asciidoc +++ b/docs/index.asciidoc @@ -1,8 +1,5 @@ = Elasticsearch JavaScript Client -include::{asciidoc-dir}/../../shared/versions/stack/{source_branch}.asciidoc[] -include::{asciidoc-dir}/../../shared/attributes.asciidoc[] - include::introduction.asciidoc[] include::getting-started.asciidoc[] include::changelog.asciidoc[] @@ -17,8 +14,7 @@ include::integrations.asciidoc[] include::observability.asciidoc[] include::transport.asciidoc[] include::typescript.asciidoc[] -include::reference.asciidoc[] +include::reference/main.asciidoc[] include::examples/index.asciidoc[] include::helpers.asciidoc[] -include::redirects.asciidoc[] include::timeout-best-practices.asciidoc[] diff --git a/docs/observability.asciidoc b/docs/observability.asciidoc index 9436d457f..f75b8d87c 100644 --- a/docs/observability.asciidoc +++ b/docs/observability.asciidoc @@ -16,6 +16,7 @@ features. All of these observability features are documented below. [discrete] +[[o11y-otel]] ==== OpenTelemetry The client supports OpenTelemetry's https://opentelemetry.io/docs/zero-code/js/[zero-code diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc deleted file mode 100644 index ddbff0a0b..000000000 --- a/docs/reference.asciidoc +++ /dev/null @@ -1,12083 +0,0 @@ -[[api-reference]] -//////// -=========================================================================================================================== -|| || -|| || -|| || -|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || -|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || -|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || -|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || -|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || -|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || -|| || -|| || -|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || -|| You should update the script that does the generation, which can be found in: || -|| https://github.com/elastic/elastic-client-generator-js || -|| || -|| You can run the script with the following command: || -|| npm run elasticsearch -- --version <version> || -|| || -|| || -|| || -=========================================================================================================================== -//////// -== API Reference - -[discrete] -=== bulk -Bulk index or delete documents. -Performs multiple indexing or delete operations in a single API call. -This reduces overhead and can greatly increase indexing speed. - -{ref}/docs-bulk.html[Endpoint documentation] -[source,ts] ----- -client.bulk({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string)*: Name of the data stream, index, or index alias to perform bulk actions on. -** *`operations` (Optional, { index, create, update, delete } | { detect_noop, doc, doc_as_upsert, script, scripted_upsert, _source, upsert } | object[])* -** *`pipeline` (Optional, string)*: ID of the pipeline to use to preprocess incoming documents. -If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. -If a final pipeline is configured it will always run, regardless of the value of this parameter. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` do nothing with refreshes. -Valid values: `true`, `false`, `wait_for`. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`_source` (Optional, boolean | string | string[])*: `true` or `false` to return the `_source` field or not, or a list of fields to return. -** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. -** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. -** *`timeout` (Optional, string | -1 | 0)*: Period each action waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). -** *`require_alias` (Optional, boolean)*: If `true`, the request’s actions must target an index alias. - -[discrete] -=== clear_scroll -Clear a scrolling search. - -Clear the search context and results for a scrolling search. - -{ref}/clear-scroll-api.html[Endpoint documentation] -[source,ts] ----- -client.clearScroll({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`scroll_id` (Optional, string | string[])*: List of scroll IDs to clear. -To clear all scroll IDs, use `_all`. - -[discrete] -=== close_point_in_time -Close a point in time. - -A point in time must be opened explicitly before being used in search requests. -The `keep_alive` parameter tells Elasticsearch how long it should persist. -A point in time is automatically closed when the `keep_alive` period has elapsed. -However, keeping points in time has a cost; close them as soon as they are no longer required for search requests. - -{ref}/point-in-time-api.html[Endpoint documentation] -[source,ts] ----- -client.closePointInTime({ id }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: The ID of the point-in-time. - -[discrete] -=== count -Count search results. -Get the number of documents matching a query. - -{ref}/search-count.html[Endpoint documentation] -[source,ts] ----- -client.count({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. -Supports wildcards (`*`). -To search all data streams and indices, omit this parameter or use `*` or `_all`. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`analyzer` (Optional, string)*: Analyzer to use for the query string. -This parameter can only be used when the `q` query string parameter is specified. -** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. -This parameter can only be used when the `q` query string parameter is specified. -** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. -This parameter can only be used when the `q` query string parameter is specified. -** *`df` (Optional, string)*: Field to use as default where no field prefix is given in the query string. -This parameter can only be used when the `q` query string parameter is specified. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -** *`ignore_throttled` (Optional, boolean)*: If `true`, concrete, expanded or aliased indices are ignored when frozen. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. -** *`min_score` (Optional, number)*: Sets the minimum `_score` value that documents must have to be included in the result. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. -Random by default. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`terminate_after` (Optional, number)*: Maximum number of documents to collect for each shard. -If a query reaches this limit, Elasticsearch terminates the query early. -Elasticsearch collects documents before sorting. -** *`q` (Optional, string)*: Query in the Lucene query string syntax. - -[discrete] -=== create -Index a document. -Adds a JSON document to the specified data stream or index and makes it searchable. -If the target is an index and the document already exists, the request updates the document and increments its version. - -{ref}/docs-index_.html[Endpoint documentation] -[source,ts] ----- -client.create({ id, index }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Unique identifier for the document. -** *`index` (string)*: Name of the data stream or index to target. -If the target doesn’t exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. -If the target doesn’t exist and doesn’t match a data stream template, this request creates the index. -** *`document` (Optional, object)*: A document. -** *`pipeline` (Optional, string)*: ID of the pipeline to use to preprocess incoming documents. -If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. -If a final pipeline is configured it will always run, regardless of the value of this parameter. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` do nothing with refreshes. -Valid values: `true`, `false`, `wait_for`. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`timeout` (Optional, string | -1 | 0)*: Period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. -** *`version` (Optional, number)*: Explicit version number for concurrency control. -The specified version must match the current version of the document for the request to succeed. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type: `external`, `external_gte`. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - -[discrete] -=== delete -Delete a document. -Removes a JSON document from the specified index. - -{ref}/docs-delete.html[Endpoint documentation] -[source,ts] ----- -client.delete({ id, index }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Unique identifier for the document. -** *`index` (string)*: Name of the target index. -** *`if_primary_term` (Optional, number)*: Only perform the operation if the document has this primary term. -** *`if_seq_no` (Optional, number)*: Only perform the operation if the document has this sequence number. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` do nothing with refreshes. -Valid values: `true`, `false`, `wait_for`. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for active shards. -** *`version` (Optional, number)*: Explicit version number for concurrency control. -The specified version must match the current version of the document for the request to succeed. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type: `external`, `external_gte`. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - -[discrete] -=== delete_by_query -Delete documents. -Deletes documents that match the specified query. - -{ref}/docs-delete-by-query.html[Endpoint documentation] -[source,ts] ----- -client.deleteByQuery({ index }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: List of data streams, indices, and aliases to search. -Supports wildcards (`*`). -To search all data streams or indices, omit this parameter or use `*` or `_all`. -** *`max_docs` (Optional, number)*: The maximum number of documents to delete. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies the documents to delete using the Query DSL. -** *`slice` (Optional, { field, id, max })*: Slice the request manually using the provided slice ID and total number of slices. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -** *`analyzer` (Optional, string)*: Analyzer to use for the query string. -** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. -** *`conflicts` (Optional, Enum("abort" | "proceed"))*: What to do if delete by query hits version conflicts: `abort` or `proceed`. -** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. -** *`df` (Optional, string)*: Field to use as default where no field prefix is given in the query string. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`from` (Optional, number)*: Starting offset (default: 0) -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. -Random by default. -** *`refresh` (Optional, boolean)*: If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. -** *`request_cache` (Optional, boolean)*: If `true`, the request cache is used for this request. -Defaults to the index-level setting. -** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`q` (Optional, string)*: Query in the Lucene query string syntax. -** *`scroll` (Optional, string | -1 | 0)*: Period to retain the search context for scrolling. -** *`scroll_size` (Optional, number)*: Size of the scroll request that powers the operation. -** *`search_timeout` (Optional, string | -1 | 0)*: Explicit timeout for each search request. -Defaults to no timeout. -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. -Available options: `query_then_fetch`, `dfs_query_then_fetch`. -** *`slices` (Optional, number | Enum("auto"))*: The number of slices this task should be divided into. -** *`sort` (Optional, string[])*: A list of <field>:<direction> pairs. -** *`stats` (Optional, string[])*: Specific `tag` of the request for logging and statistical purposes. -** *`terminate_after` (Optional, number)*: Maximum number of documents to collect for each shard. -If a query reaches this limit, Elasticsearch terminates the query early. -Elasticsearch collects documents before sorting. -Use with caution. -Elasticsearch applies this parameter to each shard handling the request. -When possible, let Elasticsearch perform early termination automatically. -Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. -** *`timeout` (Optional, string | -1 | 0)*: Period each deletion request waits for active shards. -** *`version` (Optional, boolean)*: If `true`, returns the document version as part of a hit. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). -** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the operation is complete. - -[discrete] -=== delete_by_query_rethrottle -Throttle a delete by query operation. - -Change the number of requests per second for a particular delete by query operation. -Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. - -{ref}/docs-delete-by-query.html[Endpoint documentation] -[source,ts] ----- -client.deleteByQueryRethrottle({ task_id }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`task_id` (string | number)*: The ID for the task. -** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. - -[discrete] -=== delete_script -Delete a script or search template. -Deletes a stored script or search template. - -{ref}/modules-scripting.html[Endpoint documentation] -[source,ts] ----- -client.deleteScript({ id }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the stored script or search template. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -=== exists -Check a document. -Checks if a specified document exists. - -{ref}/docs-get.html[Endpoint documentation] -[source,ts] ----- -client.exists({ id, index }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier of the document. -** *`index` (string)*: List of data streams, indices, and aliases. -Supports wildcards (`*`). -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. -Random by default. -** *`realtime` (Optional, boolean)*: If `true`, the request is real-time as opposed to near-real-time. -** *`refresh` (Optional, boolean)*: If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. -** *`routing` (Optional, string)*: Target the specified primary shard. -** *`_source` (Optional, boolean | string | string[])*: `true` or `false` to return the `_source` field or not, or a list of fields to return. -** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude in the response. -** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. -** *`stored_fields` (Optional, string | string[])*: List of stored fields to return as part of a hit. -If no fields are specified, no stored fields are included in the response. -If this field is specified, the `_source` parameter defaults to false. -** *`version` (Optional, number)*: Explicit version number for concurrency control. -The specified version must match the current version of the document for the request to succeed. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type: `external`, `external_gte`. - -[discrete] -=== exists_source -Check for a document source. -Checks if a document's `_source` is stored. - -{ref}/docs-get.html[Endpoint documentation] -[source,ts] ----- -client.existsSource({ id, index }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier of the document. -** *`index` (string)*: List of data streams, indices, and aliases. -Supports wildcards (`*`). -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. -Random by default. -** *`realtime` (Optional, boolean)*: If true, the request is real-time as opposed to near-real-time. -** *`refresh` (Optional, boolean)*: If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. -** *`routing` (Optional, string)*: Target the specified primary shard. -** *`_source` (Optional, boolean | string | string[])*: `true` or `false` to return the `_source` field or not, or a list of fields to return. -** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude in the response. -** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. -** *`version` (Optional, number)*: Explicit version number for concurrency control. -The specified version must match the current version of the document for the request to succeed. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type: `external`, `external_gte`. - -[discrete] -=== explain -Explain a document match result. -Returns information about why a specific document matches, or doesn’t match, a query. - -{ref}/search-explain.html[Endpoint documentation] -[source,ts] ----- -client.explain({ id, index }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Defines the document ID. -** *`index` (string)*: Index names used to limit the request. -Only a single index name can be provided to this parameter. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. -** *`analyzer` (Optional, string)*: Analyzer to use for the query string. -This parameter can only be used when the `q` query string parameter is specified. -** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. -** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. -** *`df` (Optional, string)*: Field to use as default where no field prefix is given in the query string. -** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. -Random by default. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`_source` (Optional, boolean | string | string[])*: True or false to return the `_source` field or not, or a list of fields to return. -** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. -** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. -** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return in the response. -** *`q` (Optional, string)*: Query in the Lucene query string syntax. - -[discrete] -=== field_caps -Get the field capabilities. - -Get information about the capabilities of fields among multiple indices. - -For data streams, the API returns field capabilities among the stream’s backing indices. -It returns runtime fields like any other field. -For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family. - -{ref}/search-field-caps.html[Endpoint documentation] -[source,ts] ----- -client.fieldCaps({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. -** *`fields` (Optional, string | string[])*: List of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. -** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter indices if the provided query rewrites to match_none on every shard. -** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines ad-hoc runtime fields in the request similar to the way it is done in search requests. -These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. -** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, -or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request -targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. -** *`ignore_unavailable` (Optional, boolean)*: If `true`, missing or closed indices are not included in the response. -** *`include_unmapped` (Optional, boolean)*: If true, unmapped fields are included in the response. -** *`filters` (Optional, string)*: An optional set of filters: can include +metadata,-metadata,-nested,-multifield,-parent -** *`types` (Optional, string[])*: Only return results for fields that have one of the types in the list -** *`include_empty_fields` (Optional, boolean)*: If false, empty fields are not included in the response. - -[discrete] -=== get -Get a document by its ID. -Retrieves the document with the specified ID from an index. - -{ref}/docs-get.html[Endpoint documentation] -[source,ts] ----- -client.get({ id, index }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Unique identifier of the document. -** *`index` (string)*: Name of the index that contains the document. -** *`force_synthetic_source` (Optional, boolean)*: Should this request force synthetic _source? -Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. -Fetches with this enabled will be slower the enabling synthetic source natively in the index. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. -** *`realtime` (Optional, boolean)*: If `true`, the request is real-time as opposed to near-real-time. -** *`refresh` (Optional, boolean)*: If true, Elasticsearch refreshes the affected shards to make this operation visible to search. If false, do nothing with refreshes. -** *`routing` (Optional, string)*: Target the specified primary shard. -** *`_source` (Optional, boolean | string | string[])*: True or false to return the _source field or not, or a list of fields to return. -** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude in the response. -** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. -** *`stored_fields` (Optional, string | string[])*: List of stored fields to return as part of a hit. -If no fields are specified, no stored fields are included in the response. -If this field is specified, the `_source` parameter defaults to false. -** *`version` (Optional, number)*: Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type: internal, external, external_gte. - -[discrete] -=== get_script -Get a script or search template. -Retrieves a stored script or search template. - -{ref}/modules-scripting.html[Endpoint documentation] -[source,ts] ----- -client.getScript({ id }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the stored script or search template. -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master - -[discrete] -=== get_script_context -Get script contexts. - -Get a list of supported script contexts and their methods. - -{painless}/painless-contexts.html[Endpoint documentation] -[source,ts] ----- -client.getScriptContext() ----- - -[discrete] -=== get_script_languages -Get script languages. - -Get a list of available script types, languages, and contexts. - -{ref}/modules-scripting.html[Endpoint documentation] -[source,ts] ----- -client.getScriptLanguages() ----- - -[discrete] -=== get_source -Get a document's source. -Returns the source of a document. - -{ref}/docs-get.html[Endpoint documentation] -[source,ts] ----- -client.getSource({ id, index }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Unique identifier of the document. -** *`index` (string)*: Name of the index that contains the document. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. -** *`realtime` (Optional, boolean)*: Boolean) If true, the request is real-time as opposed to near-real-time. -** *`refresh` (Optional, boolean)*: If true, Elasticsearch refreshes the affected shards to make this operation visible to search. If false, do nothing with refreshes. -** *`routing` (Optional, string)*: Target the specified primary shard. -** *`_source` (Optional, boolean | string | string[])*: True or false to return the _source field or not, or a list of fields to return. -** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude in the response. -** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. -** *`stored_fields` (Optional, string | string[])* -** *`version` (Optional, number)*: Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type: internal, external, external_gte. - -[discrete] -=== health_report -Returns the health of the cluster. - -{ref}/health-api.html[Endpoint documentation] -[source,ts] ----- -client.healthReport({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`feature` (Optional, string | string[])*: A feature of the cluster, as returned by the top-level health report API. -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout. -** *`verbose` (Optional, boolean)*: Opt-in for more information about the health of the system. -** *`size` (Optional, number)*: Limit the number of affected resources the health report API returns. - -[discrete] -=== index -Index a document. -Adds a JSON document to the specified data stream or index and makes it searchable. -If the target is an index and the document already exists, the request updates the document and increments its version. - -{ref}/docs-index_.html[Endpoint documentation] -[source,ts] ----- -client.index({ index }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: Name of the data stream or index to target. -** *`id` (Optional, string)*: Unique identifier for the document. -** *`document` (Optional, object)*: A document. -** *`if_primary_term` (Optional, number)*: Only perform the operation if the document has this primary term. -** *`if_seq_no` (Optional, number)*: Only perform the operation if the document has this sequence number. -** *`op_type` (Optional, Enum("index" | "create"))*: Set to create to only index the document if it does not already exist (put if absent). -If a document with the specified `_id` already exists, the indexing operation will fail. -Same as using the `<index>/_create` endpoint. -Valid values: `index`, `create`. -If document id is specified, it defaults to `index`. -Otherwise, it defaults to `create`. -** *`pipeline` (Optional, string)*: ID of the pipeline to use to preprocess incoming documents. -If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. -If a final pipeline is configured it will always run, regardless of the value of this parameter. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` do nothing with refreshes. -Valid values: `true`, `false`, `wait_for`. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`timeout` (Optional, string | -1 | 0)*: Period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. -** *`version` (Optional, number)*: Explicit version number for concurrency control. -The specified version must match the current version of the document for the request to succeed. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type: `external`, `external_gte`. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). -** *`require_alias` (Optional, boolean)*: If `true`, the destination must be an index alias. - -[discrete] -=== info -Get cluster info. -Returns basic information about the cluster. - -{ref}/index.html[Endpoint documentation] -[source,ts] ----- -client.info() ----- - -[discrete] -=== knn_search -Run a knn search. - -NOTE: The kNN search API has been replaced by the `knn` option in the search API. - -Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents. -Given a query vector, the API finds the k closest vectors and returns those documents as search hits. - -Elasticsearch uses the HNSW algorithm to support efficient kNN search. -Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed. -This means the results returned are not always the true k closest neighbors. - -The kNN search API supports restricting the search using a filter. -The search will return the top k documents that also match the filter query. - -{ref}/search-search.html[Endpoint documentation] -[source,ts] ----- -client.knnSearch({ index, knn }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: A list of index names to search; -use `_all` or to perform the operation on all indices -** *`knn` ({ field, query_vector, k, num_candidates })*: kNN query to execute -** *`_source` (Optional, boolean | { excludes, includes })*: Indicates which source fields are returned for matching documents. These -fields are returned in the hits._source property of the search response. -** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: The request returns doc values for field names matching these patterns -in the hits.fields property of the response. Accepts wildcard (*) patterns. -** *`stored_fields` (Optional, string | string[])*: List of stored fields to return as part of a hit. If no fields are specified, -no stored fields are included in the response. If this field is specified, the _source -parameter defaults to false. You can pass _source: true to return both source fields -and stored fields in the search response. -** *`fields` (Optional, string | string[])*: The request returns values for field names matching these patterns -in the hits.fields property of the response. Accepts wildcard (*) patterns. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: Query to filter the documents that can match. The kNN search will return the top -`k` documents that also match this filter. The value can be a single query or a -list of queries. If `filter` isn't provided, all documents are allowed to match. -** *`routing` (Optional, string)*: A list of specific routing values - -[discrete] -=== mget -Get multiple documents. - -Get multiple JSON documents by ID from one or more indices. -If you specify an index in the request URI, you only need to specify the document IDs in the request body. -To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. - -{ref}/docs-multi-get.html[Endpoint documentation] -[source,ts] ----- -client.mget({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string)*: Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. -** *`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])*: The documents you want to retrieve. Required if no index is specified in the request URI. -** *`ids` (Optional, string | string[])*: The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI. -** *`force_synthetic_source` (Optional, boolean)*: Should this request force synthetic _source? -Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. -Fetches with this enabled will be slower the enabling synthetic source natively in the index. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. -** *`realtime` (Optional, boolean)*: If `true`, the request is real-time as opposed to near-real-time. -** *`refresh` (Optional, boolean)*: If `true`, the request refreshes relevant shards before retrieving documents. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`_source` (Optional, boolean | string | string[])*: True or false to return the `_source` field or not, or a list of fields to return. -** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. -You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. -** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. -If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. -If the `_source` parameter is `false`, this parameter is ignored. -** *`stored_fields` (Optional, string | string[])*: If `true`, retrieves the document fields stored in the index rather than the document `_source`. - -[discrete] -=== msearch -Run multiple searches. - -The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format. -The structure is as follows: - -``` -header\n -body\n -header\n -body\n -``` - -This structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node. - -IMPORTANT: The final line of data must end with a newline character `\n`. -Each newline character may be preceded by a carriage return `\r`. -When sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. - -{ref}/search-multi-search.html[Endpoint documentation] -[source,ts] ----- -client.msearch({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and index aliases to search. -** *`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])* -** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. -** *`ccs_minimize_roundtrips` (Optional, boolean)*: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -** *`ignore_throttled` (Optional, boolean)*: If true, concrete, expanded or aliased indices are ignored when frozen. -** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. -** *`include_named_queries_score` (Optional, boolean)*: Indicates whether hit.matched_queries should be rendered as a map that includes -the name of the matched query associated with its score (true) -or as an array containing the name of the matched queries (false) -This functionality reruns each named query on every hit in a search response. -Typically, this adds a small overhead to a request. -However, using computationally expensive named queries on a large number of hits may add significant overhead. -** *`max_concurrent_searches` (Optional, number)*: Maximum number of concurrent searches the multi search API can execute. -** *`max_concurrent_shard_requests` (Optional, number)*: Maximum number of concurrent shard requests that each sub-search request executes per node. -** *`pre_filter_shard_size` (Optional, number)*: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. -** *`rest_total_hits_as_int` (Optional, boolean)*: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. -** *`routing` (Optional, string)*: Custom routing value used to route search operations to a specific shard. -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Indicates whether global term and document frequencies should be used when scoring returned documents. -** *`typed_keys` (Optional, boolean)*: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. - -[discrete] -=== msearch_template -Run multiple templated searches. - -{ref}/search-multi-search.html[Endpoint documentation] -[source,ts] ----- -client.msearchTemplate({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. -Supports wildcards (`*`). -To search all data streams and indices, omit this parameter or use `*`. -** *`search_templates` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])* -** *`ccs_minimize_roundtrips` (Optional, boolean)*: If `true`, network round-trips are minimized for cross-cluster search requests. -** *`max_concurrent_searches` (Optional, number)*: Maximum number of concurrent searches the API can run. -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. -Available options: `query_then_fetch`, `dfs_query_then_fetch`. -** *`rest_total_hits_as_int` (Optional, boolean)*: If `true`, the response returns `hits.total` as an integer. -If `false`, it returns `hits.total` as an object. -** *`typed_keys` (Optional, boolean)*: If `true`, the response prefixes aggregation and suggester names with their respective types. - -[discrete] -=== mtermvectors -Get multiple term vectors. - -You can specify existing documents by index and ID or provide artificial documents in the body of the request. -You can specify the index in the request body or request URI. -The response contains a `docs` array with all the fetched termvectors. -Each element has the structure provided by the termvectors API. - -{ref}/docs-multi-termvectors.html[Endpoint documentation] -[source,ts] ----- -client.mtermvectors({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string)*: Name of the index that contains the documents. -** *`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])*: Array of existing or artificial documents. -** *`ids` (Optional, string[])*: Simplified syntax to specify documents by their ID if they're in the same index. -** *`fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in the statistics. -Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. -** *`field_statistics` (Optional, boolean)*: If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. -** *`offsets` (Optional, boolean)*: If `true`, the response includes term offsets. -** *`payloads` (Optional, boolean)*: If `true`, the response includes term payloads. -** *`positions` (Optional, boolean)*: If `true`, the response includes term positions. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. -Random by default. -** *`realtime` (Optional, boolean)*: If true, the request is real-time as opposed to near-real-time. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`term_statistics` (Optional, boolean)*: If true, the response includes term frequency and document frequency. -** *`version` (Optional, number)*: If `true`, returns the document version as part of a hit. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type. - -[discrete] -=== open_point_in_time -Open a point in time. - -A search request by default runs against the most recent visible data of the target indices, -which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the -state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple -search requests using the same point in time. For example, if refreshes happen between -`search_after` requests, then the results of those requests might not be consistent as changes happening -between searches are only visible to the more recent point in time. - -A point in time must be opened explicitly before being used in search requests. -The `keep_alive` parameter tells Elasticsearch how long it should persist. - -{ref}/point-in-time-api.html[Endpoint documentation] -[source,ts] ----- -client.openPointInTime({ index, keep_alive }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: A list of index names to open point in time; use `_all` or empty string to perform the operation on all indices -** *`keep_alive` (string | -1 | 0)*: Extends the time to live of the corresponding point in time. -** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter indices if the provided query rewrites to `match_none` on every shard. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. -Random by default. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`allow_partial_search_results` (Optional, boolean)*: If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. -If `true`, the point in time will contain all the shards that are available at the time of the request. - -[discrete] -=== ping -Ping the cluster. -Returns whether the cluster is running. - -{ref}/index.html[Endpoint documentation] -[source,ts] ----- -client.ping() ----- - -[discrete] -=== put_script -Create or update a script or search template. -Creates or updates a stored script or search template. - -{ref}/modules-scripting.html[Endpoint documentation] -[source,ts] ----- -client.putScript({ id, script }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the stored script or search template. -Must be unique within the cluster. -** *`script` ({ lang, options, source })*: Contains the script or search template, its parameters, and its language. -** *`context` (Optional, string)*: Context in which the script or search template should run. -To prevent errors, the API immediately compiles the script or template in this context. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -=== rank_eval -Evaluate ranked search results. - -Evaluate the quality of ranked search results over a set of typical search queries. - -{ref}/search-rank-eval.html[Endpoint documentation] -[source,ts] ----- -client.rankEval({ requests }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`requests` ({ id, request, ratings, template_id, params }[])*: A set of typical search requests, together with their provided ratings. -** *`index` (Optional, string | string[])*: List of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. -To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. -** *`metric` (Optional, { precision, recall, mean_reciprocal_rank, dcg, expected_reciprocal_rank })*: Definition of the evaluation metric to calculate. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`ignore_unavailable` (Optional, boolean)*: If `true`, missing or closed indices are not included in the response. -** *`search_type` (Optional, string)*: Search operation type - -[discrete] -=== reindex -Reindex documents. -Copies documents from a source to a destination. The source can be any existing index, alias, or data stream. The destination must differ from the source. For example, you cannot reindex a data stream into itself. - -{ref}/docs-reindex.html[Endpoint documentation] -[source,ts] ----- -client.reindex({ dest, source }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`dest` ({ index, op_type, pipeline, routing, version_type })*: The destination you are copying to. -** *`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })*: The source you are copying from. -** *`conflicts` (Optional, Enum("abort" | "proceed"))*: Set to proceed to continue reindexing even if there are conflicts. -** *`max_docs` (Optional, number)*: The maximum number of documents to reindex. -** *`script` (Optional, { source, id, params, lang, options })*: The script to run to update the document source or metadata when reindexing. -** *`size` (Optional, number)* -** *`refresh` (Optional, boolean)*: If `true`, the request refreshes affected shards to make this operation visible to search. -** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. -Defaults to no throttle. -** *`scroll` (Optional, string | -1 | 0)*: Specifies how long a consistent view of the index should be maintained for scrolled search. -** *`slices` (Optional, number | Enum("auto"))*: The number of slices this task should be divided into. -Defaults to 1 slice, meaning the task isn’t sliced into subtasks. -** *`timeout` (Optional, string | -1 | 0)*: Period each indexing waits for automatic index creation, dynamic mapping updates, and waiting for active shards. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). -** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the operation is complete. -** *`require_alias` (Optional, boolean)*: If `true`, the destination must be an index alias. - -[discrete] -=== reindex_rethrottle -Throttle a reindex operation. - -Change the number of requests per second for a particular reindex operation. - -{ref}/docs-reindex.html[Endpoint documentation] -[source,ts] ----- -client.reindexRethrottle({ task_id }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`task_id` (string)*: Identifier for the task. -** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. - -[discrete] -=== render_search_template -Render a search template. - -Render a search template as a search request body. - -{ref}/render-search-template-api.html[Endpoint documentation] -[source,ts] ----- -client.renderSearchTemplate({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string)*: ID of the search template to render. -If no `source` is specified, this or the `id` request body parameter is required. -** *`file` (Optional, string)* -** *`params` (Optional, Record<string, User-defined value>)*: Key-value pairs used to replace Mustache variables in the template. -The key is the variable name. -The value is the variable value. -** *`source` (Optional, string)*: An inline search template. -Supports the same parameters as the search API's request body. -These parameters also support Mustache variables. -If no `id` or `<templated-id>` is specified, this parameter is required. - -[discrete] -=== scripts_painless_execute -Run a script. -Runs a script and returns a result. - -{painless}/painless-execute-api.html[Endpoint documentation] -[source,ts] ----- -client.scriptsPainlessExecute({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`context` (Optional, string)*: The context that the script should run in. -** *`context_setup` (Optional, { document, index, query })*: Additional parameters for the `context`. -** *`script` (Optional, { source, id, params, lang, options })*: The Painless script to execute. - -[discrete] -=== scroll -Run a scrolling search. - -IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT). - -The scroll API gets large sets of results from a single scrolling search request. -To get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter. -The `scroll` parameter indicates how long Elasticsearch should retain the search context for the request. -The search response returns a scroll ID in the `_scroll_id` response body parameter. -You can then use the scroll ID with the scroll API to retrieve the next batch of results for the request. -If the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search. - -You can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context. - -IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests. - -{ref}/search-request-body.html[Endpoint documentation] -[source,ts] ----- -client.scroll({ scroll_id }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`scroll_id` (string)*: Scroll ID of the search. -** *`scroll` (Optional, string | -1 | 0)*: Period to retain the search context for scrolling. -** *`rest_total_hits_as_int` (Optional, boolean)*: If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object. - -[discrete] -=== search -Run a search. - -Get search hits that match the query defined in the request. -You can provide search queries using the `q` query string parameter or the request body. -If both are specified, only the query parameter is used. - -{ref}/search-search.html[Endpoint documentation] -[source,ts] ----- -client.search({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. -Supports wildcards (`*`). -To search all data streams and indices, omit this parameter or use `*` or `_all`. -** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, random_sampler, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, time_series, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*: Defines the aggregations that are run as part of the search request. -** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })*: Collapses search results the values of the specified field. -** *`explain` (Optional, boolean)*: If true, returns detailed information about score computation as part of a hit. -** *`ext` (Optional, Record<string, User-defined value>)*: Configuration of search extensions defined by Elasticsearch plugins. -** *`from` (Optional, number)*: Starting document offset. -Needs to be non-negative. -By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. -To page through more hits, use the `search_after` parameter. -** *`highlight` (Optional, { encoder, fields })*: Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results. -** *`track_total_hits` (Optional, boolean | number)*: Number of hits matching the query to count accurately. -If `true`, the exact number of hits is returned at the cost of some performance. -If `false`, the response does not include the total number of hits matching the query. -** *`indices_boost` (Optional, Record<string, number>[])*: Boosts the _score of documents from specified indices. -** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (`*`) patterns. -The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. -** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits }[])*: Defines the approximate kNN search to run. -** *`rank` (Optional, { rrf })*: Defines the Reciprocal Rank Fusion (RRF) to use. -** *`min_score` (Optional, number)*: Minimum `_score` for matching documents. -Documents with a lower `_score` are not included in the search results. -** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Use the `post_filter` parameter to filter search results. -The search hits are filtered after the aggregations are calculated. -A post filter has no impact on the aggregation results. -** *`profile` (Optional, boolean)*: Set to `true` to return detailed timing information about the execution of individual components in a search request. -NOTE: This is a debugging tool and adds significant overhead to search execution. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. -** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])*: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. -** *`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker, rule })*: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as query and knn. -** *`script_fields` (Optional, Record<string, { script, ignore_failure }>)*: Retrieve a script evaluation (based on different fields) for each hit. -** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Used to retrieve the next page of hits using a set of sort values from the previous page. -** *`size` (Optional, number)*: The number of hits to return. -By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. -To page through more hits, use the `search_after` parameter. -** *`slice` (Optional, { field, id, max })*: Can be used to split a scrolled search into multiple slices that can be consumed independently. -** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: A list of <field>:<direction> pairs. -** *`_source` (Optional, boolean | { excludes, includes })*: Indicates which source fields are returned for matching documents. -These fields are returned in the hits._source property of the search response. -** *`fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (`*`) patterns. -The request returns values for field names matching these patterns in the `hits.fields` property of the response. -** *`suggest` (Optional, { text })*: Defines a suggester that provides similar looking terms based on a provided text. -** *`terminate_after` (Optional, number)*: Maximum number of documents to collect for each shard. -If a query reaches this limit, Elasticsearch terminates the query early. -Elasticsearch collects documents before sorting. -Use with caution. -Elasticsearch applies this parameter to each shard handling the request. -When possible, let Elasticsearch perform early termination automatically. -Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. -If set to `0` (default), the query does not terminate early. -** *`timeout` (Optional, string)*: Specifies the period of time to wait for a response from each shard. -If no response is received before the timeout expires, the request fails and returns an error. -Defaults to no timeout. -** *`track_scores` (Optional, boolean)*: If true, calculate and return document scores, even if the scores are not used for sorting. -** *`version` (Optional, boolean)*: If true, returns document version as part of a hit. -** *`seq_no_primary_term` (Optional, boolean)*: If `true`, returns sequence number and primary term of the last modification of each hit. -** *`stored_fields` (Optional, string | string[])*: List of stored fields to return as part of a hit. -If no fields are specified, no stored fields are included in the response. -If this field is specified, the `_source` parameter defaults to `false`. -You can pass `_source: true` to return both source fields and stored fields in the search response. -** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT). -If you provide a PIT, you cannot specify an `<index>` in the request path. -** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines one or more runtime fields in the search request. -These fields take precedence over mapped fields with the same name. -** *`stats` (Optional, string[])*: Stats groups to associate with the search. -Each group maintains a statistics aggregation for its associated searches. -You can retrieve these stats using the indices stats API. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -** *`allow_partial_search_results` (Optional, boolean)*: If true, returns partial results if there are shard request timeouts or shard failures. If false, returns an error with no partial results. -** *`analyzer` (Optional, string)*: Analyzer to use for the query string. -This parameter can only be used when the q query string parameter is specified. -** *`analyze_wildcard` (Optional, boolean)*: If true, wildcard and prefix queries are analyzed. -This parameter can only be used when the q query string parameter is specified. -** *`batched_reduce_size` (Optional, number)*: The number of shard results that should be reduced at once on the coordinating node. -This value should be used as a protection mechanism to reduce the memory overhead per search request if the potential number of shards in the request can be large. -** *`ccs_minimize_roundtrips` (Optional, boolean)*: If true, network round-trips between the coordinating node and the remote clusters are minimized when executing cross-cluster search (CCS) requests. -** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: AND or OR. -This parameter can only be used when the `q` query string parameter is specified. -** *`df` (Optional, string)*: Field to use as default where no field prefix is given in the query string. -This parameter can only be used when the q query string parameter is specified. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -** *`ignore_throttled` (Optional, boolean)*: If `true`, concrete, expanded or aliased indices will be ignored when frozen. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`include_named_queries_score` (Optional, boolean)*: Indicates whether hit.matched_queries should be rendered as a map that includes -the name of the matched query associated with its score (true) -or as an array containing the name of the matched queries (false) -This functionality reruns each named query on every hit in a search response. -Typically, this adds a small overhead to a request. -However, using computationally expensive named queries on a large number of hits may add significant overhead. -** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. -This parameter can only be used when the `q` query string parameter is specified. -** *`max_concurrent_shard_requests` (Optional, number)*: Defines the number of concurrent shard requests per node this search executes concurrently. -This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. -** *`preference` (Optional, string)*: Nodes and shards used for the search. -By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are: -`_only_local` to run the search only on shards on the local node; -`_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method; -`_only_nodes:<node-id>,<node-id>` to run the search on only the specified nodes IDs, where, if suitable shards exist on more than one selected node, use shards on those nodes using the default method, or if none of the specified nodes are available, select shards from any available node using the default method; -`_prefer_nodes:<node-id>,<node-id>` to if possible, run the search on the specified nodes IDs, or if not, select shards using the default method; -`_shards:<shard>,<shard>` to run the search only on the specified shards; -`<custom-string>` (any string that does not start with `_`) to route searches with the same `<custom-string>` to the same shards in the same order. -** *`pre_filter_shard_size` (Optional, number)*: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. -This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). -When unspecified, the pre-filter phase is executed if any of these conditions is met: -the request targets more than 128 shards; -the request targets one or more read-only index; -the primary sort of the query targets an indexed field. -** *`request_cache` (Optional, boolean)*: If `true`, the caching of search results is enabled for requests where `size` is `0`. -Defaults to index level settings. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`scroll` (Optional, string | -1 | 0)*: Period to retain the search context for scrolling. See Scroll search results. -By default, this value cannot exceed `1d` (24 hours). -You can change this limit using the `search.max_keep_alive` cluster-level setting. -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: How distributed term frequencies are calculated for relevance scoring. -** *`suggest_field` (Optional, string)*: Specifies which field to use for suggestions. -** *`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))*: Specifies the suggest mode. -This parameter can only be used when the `suggest_field` and `suggest_text` query string parameters are specified. -** *`suggest_size` (Optional, number)*: Number of suggestions to return. -This parameter can only be used when the `suggest_field` and `suggest_text` query string parameters are specified. -** *`suggest_text` (Optional, string)*: The source text for which the suggestions should be returned. -This parameter can only be used when the `suggest_field` and `suggest_text` query string parameters are specified. -** *`typed_keys` (Optional, boolean)*: If `true`, aggregation and suggester names are be prefixed by their respective types in the response. -** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response. -** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. -You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. -If the `_source` parameter is `false`, this parameter is ignored. -** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. -If this parameter is specified, only these source fields are returned. -You can exclude fields from this subset using the `_source_excludes` query parameter. -If the `_source` parameter is `false`, this parameter is ignored. -** *`q` (Optional, string)*: Query in the Lucene query string syntax using query parameter search. -Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing. -** *`force_synthetic_source` (Optional, boolean)*: Should this request force synthetic _source? -Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. -Fetches with this enabled will be slower the enabling synthetic source natively in the index. - -[discrete] -=== search_mvt -Search a vector tile. - -Search a vector tile for geospatial values. - -{ref}/search-vector-tile-api.html[Endpoint documentation] -[source,ts] ----- -client.searchMvt({ index, field, zoom, x, y }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: List of data streams, indices, or aliases to search -** *`field` (string)*: Field containing geospatial data to return -** *`zoom` (number)*: Zoom level for the vector tile to search -** *`x` (number)*: X coordinate for the vector tile to search -** *`y` (number)*: Y coordinate for the vector tile to search -** *`aggs` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, random_sampler, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, time_series, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*: Sub-aggregations for the geotile_grid. - -Supports the following aggregation types: -- avg -- cardinality -- max -- min -- sum -** *`buffer` (Optional, number)*: Size, in pixels, of a clipping buffer outside the tile. This allows renderers -to avoid outline artifacts from geometries that extend past the extent of the tile. -** *`exact_bounds` (Optional, boolean)*: If false, the meta layer’s feature is the bounding box of the tile. -If true, the meta layer’s feature is a bounding box resulting from a -geo_bounds aggregation. The aggregation runs on <field> values that intersect -the <zoom>/<x>/<y> tile with wrap_longitude set to false. The resulting -bounding box may be larger than the vector tile. -** *`extent` (Optional, number)*: Size, in pixels, of a side of the tile. Vector tiles are square with equal sides. -** *`fields` (Optional, string | string[])*: Fields to return in the `hits` layer. Supports wildcards (`*`). -This parameter does not support fields with array values. Fields with array -values may return inconsistent results. -** *`grid_agg` (Optional, Enum("geotile" | "geohex"))*: Aggregation used to create a grid for the `field`. -** *`grid_precision` (Optional, number)*: Additional zoom levels available through the aggs layer. For example, if <zoom> is 7 -and grid_precision is 8, you can zoom in up to level 15. Accepts 0-8. If 0, results -don’t include the aggs layer. -** *`grid_type` (Optional, Enum("grid" | "point" | "centroid"))*: Determines the geometry type for features in the aggs layer. In the aggs layer, -each feature represents a geotile_grid cell. If 'grid' each feature is a Polygon -of the cells bounding box. If 'point' each feature is a Point that is the centroid -of the cell. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query DSL used to filter documents for the search. -** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines one or more runtime fields in the search request. These fields take -precedence over mapped fields with the same name. -** *`size` (Optional, number)*: Maximum number of features to return in the hits layer. Accepts 0-10000. -If 0, results don’t include the hits layer. -** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: Sorts features in the hits layer. By default, the API calculates a bounding -box for each feature. It sorts features based on this box’s diagonal length, -from longest to shortest. -** *`track_total_hits` (Optional, boolean | number)*: Number of hits matching the query to count accurately. If `true`, the exact number -of hits is returned at the cost of some performance. If `false`, the response does -not include the total number of hits matching the query. -** *`with_labels` (Optional, boolean)*: If `true`, the hits and aggs layers will contain additional point features representing -suggested label positions for the original features. - -[discrete] -=== search_shards -Get the search shards. - -Get the indices and shards that a search request would be run against. -This information can be useful for working out issues or planning optimizations with routing and shard preferences. -When filtered aliases are used, the filter is returned as part of the indices section. - -{ref}/search-shards.html[Endpoint documentation] -[source,ts] ----- -client.searchShards({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: Returns the indices and shards that a search request would be executed against. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. -Random by default. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. - -[discrete] -=== search_template -Run a search with a search template. - -{ref}/search-template.html[Endpoint documentation] -[source,ts] ----- -client.searchTemplate({ ... }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, -and aliases to search. Supports wildcards (*). -** *`explain` (Optional, boolean)*: If `true`, returns detailed information about score calculation as part of each hit. -** *`id` (Optional, string)*: ID of the search template to use. If no source is specified, -this parameter is required. -** *`params` (Optional, Record<string, User-defined value>)*: Key-value pairs used to replace Mustache variables in the template. -The key is the variable name. -The value is the variable value. -** *`profile` (Optional, boolean)*: If `true`, the query execution is profiled. -** *`source` (Optional, string)*: An inline search template. Supports the same parameters as the search API's -request body. Also supports Mustache variables. If no id is specified, this -parameter is required. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -** *`ccs_minimize_roundtrips` (Optional, boolean)*: If `true`, network round-trips are minimized for cross-cluster search requests. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_throttled` (Optional, boolean)*: If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. -Random by default. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`scroll` (Optional, string | -1 | 0)*: Specifies how long a consistent view of the index -should be maintained for scrolled search. -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. -** *`rest_total_hits_as_int` (Optional, boolean)*: If true, hits.total are rendered as an integer in the response. -** *`typed_keys` (Optional, boolean)*: If `true`, the response prefixes aggregation and suggester names with their respective types. - -[discrete] -=== terms_enum -Get terms in an index. - -Discover terms that match a partial string in an index. -This "terms enum" API is designed for low-latency look-ups used in auto-complete scenarios. - -If the `complete` property in the response is false, the returned terms set may be incomplete and should be treated as approximate. -This can occur due to a few reasons, such as a request timeout or a node error. - -NOTE: The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. - -{ref}/search-terms-enum.html[Endpoint documentation] -[source,ts] ----- -client.termsEnum({ index, field }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: List of data streams, indices, and index aliases to search. Wildcard (*) expressions are supported. -** *`field` (string)*: The string to match at the start of indexed terms. If not provided, all terms in the field are considered. -** *`size` (Optional, number)*: How many matching terms to return. -** *`timeout` (Optional, string | -1 | 0)*: The maximum length of time to spend collecting results. Defaults to "1s" (one second). If the timeout is exceeded the complete flag set to false in the response and the results may be partial or empty. -** *`case_insensitive` (Optional, boolean)*: When true the provided search string is matched against index terms without case sensitivity. -** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter an index shard if the provided query rewrites to match_none. -** *`string` (Optional, string)*: The string after which terms in the index should be returned. Allows for a form of pagination if the last result from one request is passed as the search_after parameter for a subsequent request. -** *`search_after` (Optional, string)* - -[discrete] -=== termvectors -Get term vector information. - -Get information and statistics about terms in the fields of a particular document. - -{ref}/docs-termvectors.html[Endpoint documentation] -[source,ts] ----- -client.termvectors({ index }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: Name of the index that contains the document. -** *`id` (Optional, string)*: Unique identifier of the document. -** *`doc` (Optional, object)*: An artificial document (a document not present in the index) for which you want to retrieve term vectors. -** *`filter` (Optional, { max_doc_freq, max_num_terms, max_term_freq, max_word_length, min_doc_freq, min_term_freq, min_word_length })*: Filter terms based on their tf-idf scores. -** *`per_field_analyzer` (Optional, Record<string, string>)*: Overrides the default per-field analyzer. -** *`fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in the statistics. -Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. -** *`field_statistics` (Optional, boolean)*: If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. -** *`offsets` (Optional, boolean)*: If `true`, the response includes term offsets. -** *`payloads` (Optional, boolean)*: If `true`, the response includes term payloads. -** *`positions` (Optional, boolean)*: If `true`, the response includes term positions. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. -Random by default. -** *`realtime` (Optional, boolean)*: If true, the request is real-time as opposed to near-real-time. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`term_statistics` (Optional, boolean)*: If `true`, the response includes term frequency and document frequency. -** *`version` (Optional, number)*: If `true`, returns the document version as part of a hit. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type. - -[discrete] -=== update -Update a document. -Updates a document by running a script or passing a partial document. - -{ref}/docs-update.html[Endpoint documentation] -[source,ts] ----- -client.update({ id, index }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Document ID -** *`index` (string)*: The name of the index -** *`detect_noop` (Optional, boolean)*: Set to false to disable setting 'result' in the response -to 'noop' if no change to the document occurred. -** *`doc` (Optional, object)*: A partial update to an existing document. -** *`doc_as_upsert` (Optional, boolean)*: Set to true to use the contents of 'doc' as the value of 'upsert' -** *`script` (Optional, { source, id, params, lang, options })*: Script to execute to update the document. -** *`scripted_upsert` (Optional, boolean)*: Set to true to execute the script whether or not the document exists. -** *`_source` (Optional, boolean | { excludes, includes })*: Set to false to disable source retrieval. You can also specify a comma-separated -list of the fields you want to retrieve. -** *`upsert` (Optional, object)*: If the document does not already exist, the contents of 'upsert' are inserted as a -new document. If the document exists, the 'script' is executed. -** *`if_primary_term` (Optional, number)*: Only perform the operation if the document has this primary term. -** *`if_seq_no` (Optional, number)*: Only perform the operation if the document has this sequence number. -** *`lang` (Optional, string)*: The script language. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If 'true', Elasticsearch refreshes the affected shards to make this operation -visible to search, if 'wait_for' then wait for a refresh to make this operation -visible to search, if 'false' do nothing with refreshes. -** *`require_alias` (Optional, boolean)*: If true, the destination must be an index alias. -** *`retry_on_conflict` (Optional, number)*: Specify how many times should the operation be retried when a conflict occurs. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for dynamic mapping updates and active shards. -This guarantees Elasticsearch waits for at least the timeout before failing. -The actual wait time could be longer, particularly when multiple waits occur. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operations. -Set to 'all' or any positive integer up to the total number of shards in the index -(number_of_replicas+1). Defaults to 1 meaning the primary shard. -** *`_source_excludes` (Optional, string | string[])*: Specify the source fields you want to exclude. -** *`_source_includes` (Optional, string | string[])*: Specify the source fields you want to retrieve. - -[discrete] -=== update_by_query -Update documents. -Updates documents that match the specified query. -If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. - -{ref}/docs-update-by-query.html[Endpoint documentation] -[source,ts] ----- -client.updateByQuery({ index }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: List of data streams, indices, and aliases to search. -Supports wildcards (`*`). -To search all data streams or indices, omit this parameter or use `*` or `_all`. -** *`max_docs` (Optional, number)*: The maximum number of documents to update. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies the documents to update using the Query DSL. -** *`script` (Optional, { source, id, params, lang, options })*: The script to run to update the document source or metadata when updating. -** *`slice` (Optional, { field, id, max })*: Slice the request manually using the provided slice ID and total number of slices. -** *`conflicts` (Optional, Enum("abort" | "proceed"))*: What to do if update by query hits version conflicts: `abort` or `proceed`. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -** *`analyzer` (Optional, string)*: Analyzer to use for the query string. -** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. -** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. -** *`df` (Optional, string)*: Field to use as default where no field prefix is given in the query string. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`from` (Optional, number)*: Starting offset (default: 0) -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. -** *`pipeline` (Optional, string)*: ID of the pipeline to use to preprocess incoming documents. -If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. -If a final pipeline is configured it will always run, regardless of the value of this parameter. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. -Random by default. -** *`q` (Optional, string)*: Query in the Lucene query string syntax. -** *`refresh` (Optional, boolean)*: If `true`, Elasticsearch refreshes affected shards to make the operation visible to search. -** *`request_cache` (Optional, boolean)*: If `true`, the request cache is used for this request. -** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`scroll` (Optional, string | -1 | 0)*: Period to retain the search context for scrolling. -** *`scroll_size` (Optional, number)*: Size of the scroll request that powers the operation. -** *`search_timeout` (Optional, string | -1 | 0)*: Explicit timeout for each search request. -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. Available options: `query_then_fetch`, `dfs_query_then_fetch`. -** *`slices` (Optional, number | Enum("auto"))*: The number of slices this task should be divided into. -** *`sort` (Optional, string[])*: A list of <field>:<direction> pairs. -** *`stats` (Optional, string[])*: Specific `tag` of the request for logging and statistical purposes. -** *`terminate_after` (Optional, number)*: Maximum number of documents to collect for each shard. -If a query reaches this limit, Elasticsearch terminates the query early. -Elasticsearch collects documents before sorting. -Use with caution. -Elasticsearch applies this parameter to each shard handling the request. -When possible, let Elasticsearch perform early termination automatically. -Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. -** *`timeout` (Optional, string | -1 | 0)*: Period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. -** *`version` (Optional, boolean)*: If `true`, returns the document version as part of a hit. -** *`version_type` (Optional, boolean)*: Should the document increment the version number (internal) on hit or not (reindex) -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). -** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the operation is complete. - -[discrete] -=== update_by_query_rethrottle -Throttle an update by query operation. - -Change the number of requests per second for a particular update by query operation. -Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. - -{ref}/docs-update-by-query.html[Endpoint documentation] -[source,ts] ----- -client.updateByQueryRethrottle({ task_id }) ----- -[discrete] -==== Arguments - -* *Request (object):* -** *`task_id` (string)*: The ID for the task. -** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. - -[discrete] -=== async_search -[discrete] -==== delete -Delete an async search. - -If the asynchronous search is still running, it is cancelled. -Otherwise, the saved search results are deleted. -If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege. - -{ref}/async-search.html[Endpoint documentation] -[source,ts] ----- -client.asyncSearch.delete({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: A unique identifier for the async search. - -[discrete] -==== get -Get async search results. - -Retrieve the results of a previously submitted asynchronous search request. -If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it. - -{ref}/async-search.html[Endpoint documentation] -[source,ts] ----- -client.asyncSearch.get({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: A unique identifier for the async search. -** *`keep_alive` (Optional, string | -1 | 0)*: Specifies how long the async search should be available in the cluster. -When not specified, the `keep_alive` set with the corresponding submit async request will be used. -Otherwise, it is possible to override the value and extend the validity of the request. -When this period expires, the search, if still running, is cancelled. -If the search is completed, its saved results are deleted. -** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response -** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Specifies to wait for the search to be completed up until the provided timeout. -Final results will be returned if available before the timeout expires, otherwise the currently available results will be returned once the timeout expires. -By default no timeout is set meaning that the currently available results will be returned without any additional wait. - -[discrete] -==== status -Get the async search status. - -Get the status of a previously submitted async search request given its identifier, without retrieving search results. -If the Elasticsearch security features are enabled, use of this API is restricted to the `monitoring_user` role. - -{ref}/async-search.html[Endpoint documentation] -[source,ts] ----- -client.asyncSearch.status({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: A unique identifier for the async search. -** *`keep_alive` (Optional, string | -1 | 0)*: Specifies how long the async search needs to be available. -Ongoing async searches and any saved search results are deleted after this period. - -[discrete] -==== submit -Run an async search. - -When the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field. Partial results become available following the sort criteria that was requested. - -Warning: Asynchronous search does not support scroll or search requests that include only the suggest section. - -By default, Elasticsearch does not allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. -The maximum allowed size for a stored async search response can be set by changing the `search.max_async_search_response_size` cluster level setting. - -{ref}/async-search.html[Endpoint documentation] -[source,ts] ----- -client.asyncSearch.submit({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: A list of index names to search; use `_all` or empty string to perform the operation on all indices -** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, random_sampler, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, time_series, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)* -** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })* -** *`explain` (Optional, boolean)*: If true, returns detailed information about score computation as part of a hit. -** *`ext` (Optional, Record<string, User-defined value>)*: Configuration of search extensions defined by Elasticsearch plugins. -** *`from` (Optional, number)*: Starting document offset. By default, you cannot page through more than 10,000 -hits using the from and size parameters. To page through more hits, use the -search_after parameter. -** *`highlight` (Optional, { encoder, fields })* -** *`track_total_hits` (Optional, boolean | number)*: Number of hits matching the query to count accurately. If true, the exact -number of hits is returned at the cost of some performance. If false, the -response does not include the total number of hits matching the query. -Defaults to 10,000 hits. -** *`indices_boost` (Optional, Record<string, number>[])*: Boosts the _score of documents from specified indices. -** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns doc values for field -names matching these patterns in the hits.fields property of the response. -** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits }[])*: Defines the approximate kNN search to run. -** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are -not included in the search results. -** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })* -** *`profile` (Optional, boolean)* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. -** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])* -** *`script_fields` (Optional, Record<string, { script, ignore_failure }>)*: Retrieve a script evaluation (based on different fields) for each hit. -** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* -** *`size` (Optional, number)*: The number of hits to return. By default, you cannot page through more -than 10,000 hits using the from and size parameters. To page through more -hits, use the search_after parameter. -** *`slice` (Optional, { field, id, max })* -** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])* -** *`_source` (Optional, boolean | { excludes, includes })*: Indicates which source fields are returned for matching documents. These -fields are returned in the hits._source property of the search response. -** *`fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns values for field names -matching these patterns in the hits.fields property of the response. -** *`suggest` (Optional, { text })* -** *`terminate_after` (Optional, number)*: Maximum number of documents to collect for each shard. If a query reaches this -limit, Elasticsearch terminates the query early. Elasticsearch collects documents -before sorting. Defaults to 0, which does not terminate query execution early. -** *`timeout` (Optional, string)*: Specifies the period of time to wait for a response from each shard. If no response -is received before the timeout expires, the request fails and returns an error. -Defaults to no timeout. -** *`track_scores` (Optional, boolean)*: If true, calculate and return document scores, even if the scores are not used for sorting. -** *`version` (Optional, boolean)*: If true, returns document version as part of a hit. -** *`seq_no_primary_term` (Optional, boolean)*: If true, returns sequence number and primary term of the last modification -of each hit. See Optimistic concurrency control. -** *`stored_fields` (Optional, string | string[])*: List of stored fields to return as part of a hit. If no fields are specified, -no stored fields are included in the response. If this field is specified, the _source -parameter defaults to false. You can pass _source: true to return both source fields -and stored fields in the search response. -** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT). If you provide a PIT, you -cannot specify an <index> in the request path. -** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines one or more runtime fields in the search request. These fields take -precedence over mapped fields with the same name. -** *`stats` (Optional, string[])*: Stats groups to associate with the search. Each group maintains a statistics -aggregation for its associated searches. You can retrieve these stats using -the indices stats API. -** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Blocks and waits until the search is completed up to a certain timeout. -When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster. -** *`keep_on_completion` (Optional, boolean)*: If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`allow_partial_search_results` (Optional, boolean)*: Indicate if an error should be returned if there is a partial search failure or timeout -** *`analyzer` (Optional, string)*: The analyzer to use for the query string -** *`analyze_wildcard` (Optional, boolean)*: Specify whether wildcard and prefix queries should be analyzed (default: false) -** *`batched_reduce_size` (Optional, number)*: Affects how often partial results become available, which happens whenever shard results are reduced. -A partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default). -** *`ccs_minimize_roundtrips` (Optional, boolean)*: The default value is the only supported value. -** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query (AND or OR) -** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`ignore_throttled` (Optional, boolean)*: Whether specified concrete, expanded or aliased indices should be ignored when throttled -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`lenient` (Optional, boolean)*: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored -** *`max_concurrent_shard_requests` (Optional, number)*: The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests -** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) -** *`request_cache` (Optional, boolean)*: Specify if request cache should be used for this request or not, defaults to true -** *`routing` (Optional, string)*: A list of specific routing values -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Search operation type -** *`suggest_field` (Optional, string)*: Specifies which field to use for suggestions. -** *`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))*: Specify suggest mode -** *`suggest_size` (Optional, number)*: How many suggestions to return in response -** *`suggest_text` (Optional, string)*: The source text for which the suggestions should be returned. -** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response -** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response -** *`_source_excludes` (Optional, string | string[])*: A list of fields to exclude from the returned _source field -** *`_source_includes` (Optional, string | string[])*: A list of fields to extract and return from the _source field -** *`q` (Optional, string)*: Query in the Lucene query string syntax - -[discrete] -=== autoscaling -[discrete] -==== delete_autoscaling_policy -Delete an autoscaling policy. - -NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - -{ref}/autoscaling-delete-autoscaling-policy.html[Endpoint documentation] -[source,ts] ----- -client.autoscaling.deleteAutoscalingPolicy({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: the name of the autoscaling policy -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get_autoscaling_capacity -Get the autoscaling capacity. - -NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - -This API gets the current autoscaling capacity based on the configured autoscaling policy. -It will return information to size the cluster appropriately to the current workload. - -The `required_capacity` is calculated as the maximum of the `required_capacity` result of all individual deciders that are enabled for the policy. - -The operator should verify that the `current_nodes` match the operator’s knowledge of the cluster to avoid making autoscaling decisions based on stale or incomplete information. - -The response contains decider-specific information you can use to diagnose how and why autoscaling determined a certain capacity was required. -This information is provided for diagnosis only. -Do not use this information to make autoscaling decisions. - -{ref}/autoscaling-get-autoscaling-capacity.html[Endpoint documentation] -[source,ts] ----- -client.autoscaling.getAutoscalingCapacity({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get_autoscaling_policy -Get an autoscaling policy. - -NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - -{ref}/autoscaling-get-autoscaling-capacity.html[Endpoint documentation] -[source,ts] ----- -client.autoscaling.getAutoscalingPolicy({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: the name of the autoscaling policy -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== put_autoscaling_policy -Create or update an autoscaling policy. - -NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - -{ref}/autoscaling-put-autoscaling-policy.html[Endpoint documentation] -[source,ts] ----- -client.autoscaling.putAutoscalingPolicy({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: the name of the autoscaling policy -** *`policy` (Optional, { roles, deciders })* -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -=== cat -[discrete] -==== aliases -Get aliases. -Retrieves the cluster’s index aliases, including filter and routing information. -The API does not return data stream aliases. - -CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. - -{ref}/cat-alias.html[Endpoint documentation] -[source,ts] ----- -client.cat.aliases({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string | string[])*: A list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. - -[discrete] -==== allocation -Get shard allocation information. -Get a snapshot of the number of shards allocated to each data node and their disk space. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. - -{ref}/cat-allocation.html[Endpoint documentation] -[source,ts] ----- -client.cat.allocation({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (Optional, string | string[])*: List of node identifiers or names used to limit the returned information. -** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. -** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the -local cluster state. If `false` the list of selected nodes are computed -from the cluster state of the master node. In both cases the coordinating -node will send requests for further information to each selected node. - -[discrete] -==== component_templates -Get component templates. -Returns information about component templates in a cluster. -Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. - -CAT APIs are only intended for human consumption using the command line or Kibana console. -They are not intended for use by applications. For application consumption, use the get component template API. - -{ref}/cat-component-templates.html[Endpoint documentation] -[source,ts] ----- -client.cat.componentTemplates({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string)*: The name of the component template. Accepts wildcard expressions. If omitted, all component templates are returned. -** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the -local cluster state. If `false` the list of selected nodes are computed -from the cluster state of the master node. In both cases the coordinating -node will send requests for further information to each selected node. - -[discrete] -==== count -Get a document count. -Provides quick access to a document count for a data stream, an index, or an entire cluster. -The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. - -CAT APIs are only intended for human consumption using the command line or Kibana console. -They are not intended for use by applications. For application consumption, use the count API. - -{ref}/cat-count.html[Endpoint documentation] -[source,ts] ----- -client.cat.count({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. -Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - -[discrete] -==== fielddata -Get field data cache information. -Get the amount of heap memory currently used by the field data cache on every data node in the cluster. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. -They are not intended for use by applications. For application consumption, use the nodes stats API. - -{ref}/cat-fielddata.html[Endpoint documentation] -[source,ts] ----- -client.cat.fielddata({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`fields` (Optional, string | string[])*: List of fields used to limit returned information. -To retrieve all fields, omit this parameter. -** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. - -[discrete] -==== health -Get the cluster health status. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. -They are not intended for use by applications. For application consumption, use the cluster health API. -This API is often used to check malfunctioning clusters. -To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: -`HH:MM:SS`, which is human-readable but includes no date information; -`Unix epoch time`, which is machine-sortable and includes date information. -The latter format is useful for cluster recoveries that take multiple days. -You can use the cat health API to verify cluster health across multiple nodes. -You also can use the API to track the recovery of a large cluster over a longer period of time. - -{ref}/cat-health.html[Endpoint documentation] -[source,ts] ----- -client.cat.health({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. -** *`ts` (Optional, boolean)*: If true, returns `HH:MM:SS` and Unix epoch timestamps. - -[discrete] -==== help -Get CAT help. -Returns help for the CAT APIs. - -{ref}/cat.html[Endpoint documentation] -[source,ts] ----- -client.cat.help() ----- - - -[discrete] -==== indices -Get index information. -Returns high-level information about indices in a cluster, including backing indices for data streams. - -Use this request to get the following information for each index in a cluster: -- shard count -- document count -- deleted document count -- primary store size -- total store size of all shards, including shard replicas - -These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. -To get an accurate count of Elasticsearch documents, use the cat count or count APIs. - -CAT APIs are only intended for human consumption using the command line or Kibana console. -They are not intended for use by applications. For application consumption, use an index endpoint. - -{ref}/cat-indices.html[Endpoint documentation] -[source,ts] ----- -client.cat.indices({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. -Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. -** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. -** *`health` (Optional, Enum("green" | "yellow" | "red"))*: The health status used to limit returned indices. By default, the response includes indices of any health status. -** *`include_unloaded_segments` (Optional, boolean)*: If true, the response includes information from segments that are not loaded into memory. -** *`pri` (Optional, boolean)*: If true, the response only includes information from primary shards. -** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. - -[discrete] -==== master -Get master node information. -Get information about the master node, including the ID, bound IP address, and name. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. - -{ref}/cat-master.html[Endpoint documentation] -[source,ts] ----- -client.cat.master({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the -local cluster state. If `false` the list of selected nodes are computed -from the cluster state of the master node. In both cases the coordinating -node will send requests for further information to each selected node. - -[discrete] -==== ml_data_frame_analytics -Get data frame analytics jobs. -Returns configuration and usage information about data frame analytics jobs. - -CAT APIs are only intended for human consumption using the Kibana -console or command line. They are not intended for use by applications. For -application consumption, use the get data frame analytics jobs statistics API. - -{ref}/cat-dfanalytics.html[Endpoint documentation] -[source,ts] ----- -client.cat.mlDataFrameAnalytics({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string)*: The ID of the data frame analytics to fetch -** *`allow_no_match` (Optional, boolean)*: Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified) -** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit in which to display byte values -** *`h` (Optional, Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version") | Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version")[])*: List of column names to display. -** *`s` (Optional, Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version") | Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version")[])*: List of column names or column aliases used to sort the -response. -** *`time` (Optional, string | -1 | 0)*: Unit used to display time values. - -[discrete] -==== ml_datafeeds -Get datafeeds. -Returns configuration and usage information about datafeeds. -This API returns a maximum of 10,000 datafeeds. -If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` -cluster privileges to use this API. - -CAT APIs are only intended for human consumption using the Kibana -console or command line. They are not intended for use by applications. For -application consumption, use the get datafeed statistics API. - -{ref}/cat-datafeeds.html[Endpoint documentation] -[source,ts] ----- -client.cat.mlDatafeeds({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`datafeed_id` (Optional, string)*: A numerical character string that uniquely identifies the datafeed. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -* Contains wildcard expressions and there are no datafeeds that match. -* Contains the `_all` string or no identifiers and there are no matches. -* Contains wildcard expressions and there are only partial matches. - -If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when -there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only -partial matches. -** *`h` (Optional, Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s") | Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s")[])*: List of column names to display. -** *`s` (Optional, Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s") | Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s")[])*: List of column names or column aliases used to sort the response. -** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. - -[discrete] -==== ml_jobs -Get anomaly detection jobs. -Returns configuration and usage information for anomaly detection jobs. -This API returns a maximum of 10,000 jobs. -If the Elasticsearch security features are enabled, you must have `monitor_ml`, -`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. - -CAT APIs are only intended for human consumption using the Kibana -console or command line. They are not intended for use by applications. For -application consumption, use the get anomaly detection job statistics API. - -{ref}/cat-anomaly-detectors.html[Endpoint documentation] -[source,ts] ----- -client.cat.mlJobs({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (Optional, string)*: Identifier for the anomaly detection job. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -* Contains wildcard expressions and there are no jobs that match. -* Contains the `_all` string or no identifiers and there are no matches. -* Contains wildcard expressions and there are only partial matches. - -If `true`, the API returns an empty jobs array when there are no matches and the subset of results when there -are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial -matches. -** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. -** *`h` (Optional, Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state") | Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state")[])*: List of column names to display. -** *`s` (Optional, Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state") | Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state")[])*: List of column names or column aliases used to sort the response. -** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. - -[discrete] -==== ml_trained_models -Get trained models. -Returns configuration and usage information about inference trained models. - -CAT APIs are only intended for human consumption using the Kibana -console or command line. They are not intended for use by applications. For -application consumption, use the get trained models statistics API. - -{ref}/cat-trained-model.html[Endpoint documentation] -[source,ts] ----- -client.cat.mlTrainedModels({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_id` (Optional, string)*: A unique identifier for the trained model. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. -If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. -If `false`, the API returns a 404 status code when there are no matches or only partial matches. -** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. -** *`h` (Optional, Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version") | Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version")[])*: A list of column names to display. -** *`s` (Optional, Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version") | Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version")[])*: A list of column names or aliases used to sort the response. -** *`from` (Optional, number)*: Skips the specified number of transforms. -** *`size` (Optional, number)*: The maximum number of transforms to display. - -[discrete] -==== nodeattrs -Get node attribute information. -Get information about custom node attributes. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. - -{ref}/cat-nodeattrs.html[Endpoint documentation] -[source,ts] ----- -client.cat.nodeattrs({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the -local cluster state. If `false` the list of selected nodes are computed -from the cluster state of the master node. In both cases the coordinating -node will send requests for further information to each selected node. - -[discrete] -==== nodes -Get node information. -Get information about the nodes in a cluster. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. - -{ref}/cat-nodes.html[Endpoint documentation] -[source,ts] ----- -client.cat.nodes({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. -** *`full_id` (Optional, boolean | string)*: If `true`, return the full node ID. If `false`, return the shortened node ID. -** *`include_unloaded_segments` (Optional, boolean)*: If true, the response includes information from segments that are not loaded into memory. - -[discrete] -==== pending_tasks -Get pending task information. -Get information about cluster-level changes that have not yet taken effect. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API. - -{ref}/cat-pending-tasks.html[Endpoint documentation] -[source,ts] ----- -client.cat.pendingTasks({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the -local cluster state. If `false` the list of selected nodes are computed -from the cluster state of the master node. In both cases the coordinating -node will send requests for further information to each selected node. - -[discrete] -==== plugins -Get plugin information. -Get a list of plugins running on each node of a cluster. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. - -{ref}/cat-plugins.html[Endpoint documentation] -[source,ts] ----- -client.cat.plugins({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the -local cluster state. If `false` the list of selected nodes are computed -from the cluster state of the master node. In both cases the coordinating -node will send requests for further information to each selected node. - -[discrete] -==== recovery -Get shard recovery information. -Get information about ongoing and completed shard recoveries. -Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. -For data streams, the API returns information about the stream’s backing indices. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API. - -{ref}/cat-recovery.html[Endpoint documentation] -[source,ts] ----- -client.cat.recovery({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases used to limit the request. -Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. -** *`active_only` (Optional, boolean)*: If `true`, the response only includes ongoing shard recoveries. -** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. -** *`detailed` (Optional, boolean)*: If `true`, the response includes detailed information about shard recoveries. - -[discrete] -==== repositories -Get snapshot repository information. -Get a list of snapshot repositories for a cluster. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API. - -{ref}/cat-repositories.html[Endpoint documentation] -[source,ts] ----- -client.cat.repositories() ----- - - -[discrete] -==== segments -Get segment information. -Get low-level information about the Lucene segments in index shards. -For data streams, the API returns information about the backing indices. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API. - -{ref}/cat-segments.html[Endpoint documentation] -[source,ts] ----- -client.cat.segments({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases used to limit the request. -Supports wildcards (`*`). -To target all data streams and indices, omit this parameter or use `*` or `_all`. -** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. -** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the -local cluster state. If `false` the list of selected nodes are computed -from the cluster state of the master node. In both cases the coordinating -node will send requests for further information to each selected node. - -[discrete] -==== shards -Get shard information. -Get information about the shards in a cluster. -For data streams, the API returns information about the backing indices. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. - -{ref}/cat-shards.html[Endpoint documentation] -[source,ts] ----- -client.cat.shards({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases used to limit the request. -Supports wildcards (`*`). -To target all data streams and indices, omit this parameter or use `*` or `_all`. -** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. - -[discrete] -==== snapshots -Get snapshot information -Get information about the snapshots stored in one or more repositories. -A snapshot is a backup of an index or running Elasticsearch cluster. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API. - -{ref}/cat-snapshots.html[Endpoint documentation] -[source,ts] ----- -client.cat.snapshots({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`repository` (Optional, string | string[])*: A list of snapshot repositories used to limit the request. -Accepts wildcard expressions. -`_all` returns all repositories. -If any repository fails during the request, Elasticsearch returns an error. -** *`ignore_unavailable` (Optional, boolean)*: If `true`, the response does not include information from unavailable snapshots. - -[discrete] -==== tasks -Get task information. -Get information about tasks currently running in the cluster. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API. - -{ref}/tasks.html[Endpoint documentation] -[source,ts] ----- -client.cat.tasks({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`actions` (Optional, string[])*: The task action names, which are used to limit the response. -** *`detailed` (Optional, boolean)*: If `true`, the response includes detailed information about shard recoveries. -** *`node_id` (Optional, string[])*: Unique node identifiers, which are used to limit the response. -** *`parent_task_id` (Optional, string)*: The parent task identifier, which is used to limit the response. - -[discrete] -==== templates -Get index template information. -Get information about the index templates in a cluster. -You can use index templates to apply index settings and field mappings to new indices at creation. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API. - -{ref}/cat-templates.html[Endpoint documentation] -[source,ts] ----- -client.cat.templates({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string)*: The name of the template to return. -Accepts wildcard expressions. If omitted, all templates are returned. -** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the -local cluster state. If `false` the list of selected nodes are computed -from the cluster state of the master node. In both cases the coordinating -node will send requests for further information to each selected node. - -[discrete] -==== thread_pool -Get thread pool statistics. -Get thread pool statistics for each node in a cluster. -Returned information includes all built-in thread pools and custom thread pools. -IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. - -{ref}/cat-thread-pool.html[Endpoint documentation] -[source,ts] ----- -client.cat.threadPool({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`thread_pool_patterns` (Optional, string | string[])*: A list of thread pool names used to limit the request. -Accepts wildcard expressions. -** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. -** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the -local cluster state. If `false` the list of selected nodes are computed -from the cluster state of the master node. In both cases the coordinating -node will send requests for further information to each selected node. - -[discrete] -==== transforms -Get transform information. -Get configuration and usage information about transforms. - -CAT APIs are only intended for human consumption using the Kibana -console or command line. They are not intended for use by applications. For -application consumption, use the get transform statistics API. - -{ref}/cat-transforms.html[Endpoint documentation] -[source,ts] ----- -client.cat.transforms({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`transform_id` (Optional, string)*: A transform identifier or a wildcard expression. -If you do not specify one of these options, the API returns information for all transforms. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. -If `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches. -If `false`, the request returns a 404 status code when there are no matches or only partial matches. -** *`from` (Optional, number)*: Skips the specified number of transforms. -** *`h` (Optional, Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version") | Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version")[])*: List of column names to display. -** *`s` (Optional, Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version") | Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version")[])*: List of column names or column aliases used to sort the response. -** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. -** *`size` (Optional, number)*: The maximum number of transforms to obtain. - -[discrete] -=== ccr -[discrete] -==== delete_auto_follow_pattern -Deletes auto-follow patterns. - -{ref}/ccr-delete-auto-follow-pattern.html[Endpoint documentation] -[source,ts] ----- -client.ccr.deleteAutoFollowPattern({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the auto follow pattern. - -[discrete] -==== follow -Creates a new follower index configured to follow the referenced leader index. - -{ref}/ccr-put-follow.html[Endpoint documentation] -[source,ts] ----- -client.ccr.follow({ index, leader_index, remote_cluster }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: The name of the follower index. -** *`leader_index` (string)*: The name of the index in the leader cluster to follow. -** *`remote_cluster` (string)*: The remote cluster containing the leader index. -** *`data_stream_name` (Optional, string)*: If the leader index is part of a data stream, the name to which the local data stream for the followed index should be renamed. -** *`max_outstanding_read_requests` (Optional, number)*: The maximum number of outstanding reads requests from the remote cluster. -** *`max_outstanding_write_requests` (Optional, number)*: The maximum number of outstanding write requests on the follower. -** *`max_read_request_operation_count` (Optional, number)*: The maximum number of operations to pull per read from the remote cluster. -** *`max_read_request_size` (Optional, number | string)*: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. -** *`max_retry_delay` (Optional, string | -1 | 0)*: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when -retrying. -** *`max_write_buffer_count` (Optional, number)*: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be -deferred until the number of queued operations goes below the limit. -** *`max_write_buffer_size` (Optional, number | string)*: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will -be deferred until the total bytes of queued operations goes below the limit. -** *`max_write_request_operation_count` (Optional, number)*: The maximum number of operations per bulk write request executed on the follower. -** *`max_write_request_size` (Optional, number | string)*: The maximum total bytes of operations per bulk write request executed on the follower. -** *`read_poll_timeout` (Optional, string | -1 | 0)*: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. -When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. -Then the follower will immediately attempt to read from the leader again. -** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })*: Settings to override from the leader index. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Specifies the number of shards to wait on being active before responding. This defaults to waiting on none of the shards to be -active. -A shard must be restored from the leader index before being active. Restoring a follower shard requires transferring all the -remote Lucene segment files to the follower index. - -[discrete] -==== follow_info -Retrieves information about all follower indices, including parameters and status for each follower index - -{ref}/ccr-get-follow-info.html[Endpoint documentation] -[source,ts] ----- -client.ccr.followInfo({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: A list of index patterns; use `_all` to perform the operation on all indices - -[discrete] -==== follow_stats -Retrieves follower stats. return shard-level stats about the following tasks associated with each shard for the specified indices. - -{ref}/ccr-get-follow-stats.html[Endpoint documentation] -[source,ts] ----- -client.ccr.followStats({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: A list of index patterns; use `_all` to perform the operation on all indices - -[discrete] -==== forget_follower -Removes the follower retention leases from the leader. - -{ref}/ccr-post-forget-follower.html[Endpoint documentation] -[source,ts] ----- -client.ccr.forgetFollower({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: the name of the leader index for which specified follower retention leases should be removed -** *`follower_cluster` (Optional, string)* -** *`follower_index` (Optional, string)* -** *`follower_index_uuid` (Optional, string)* -** *`leader_remote_cluster` (Optional, string)* - -[discrete] -==== get_auto_follow_pattern -Gets configured auto-follow patterns. Returns the specified auto-follow pattern collection. - -{ref}/ccr-get-auto-follow-pattern.html[Endpoint documentation] -[source,ts] ----- -client.ccr.getAutoFollowPattern({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string)*: Specifies the auto-follow pattern collection that you want to retrieve. If you do not specify a name, the API returns information for all collections. - -[discrete] -==== pause_auto_follow_pattern -Pauses an auto-follow pattern - -{ref}/ccr-pause-auto-follow-pattern.html[Endpoint documentation] -[source,ts] ----- -client.ccr.pauseAutoFollowPattern({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the auto follow pattern that should pause discovering new indices to follow. - -[discrete] -==== pause_follow -Pauses a follower index. The follower index will not fetch any additional operations from the leader index. - -{ref}/ccr-post-pause-follow.html[Endpoint documentation] -[source,ts] ----- -client.ccr.pauseFollow({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: The name of the follower index that should pause following its leader index. - -[discrete] -==== put_auto_follow_pattern -Creates a new named collection of auto-follow patterns against a specified remote cluster. Newly created indices on the remote cluster matching any of the specified patterns will be automatically configured as follower indices. - -{ref}/ccr-put-auto-follow-pattern.html[Endpoint documentation] -[source,ts] ----- -client.ccr.putAutoFollowPattern({ name, remote_cluster }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the collection of auto-follow patterns. -** *`remote_cluster` (string)*: The remote cluster containing the leader indices to match against. -** *`follow_index_pattern` (Optional, string)*: The name of follower index. The template {{leader_index}} can be used to derive the name of the follower index from the name of the leader index. When following a data stream, use {{leader_index}}; CCR does not support changes to the names of a follower data stream’s backing indices. -** *`leader_index_patterns` (Optional, string[])*: An array of simple index patterns to match against indices in the remote cluster specified by the remote_cluster field. -** *`leader_index_exclusion_patterns` (Optional, string[])*: An array of simple index patterns that can be used to exclude indices from being auto-followed. Indices in the remote cluster whose names are matching one or more leader_index_patterns and one or more leader_index_exclusion_patterns won’t be followed. -** *`max_outstanding_read_requests` (Optional, number)*: The maximum number of outstanding reads requests from the remote cluster. -** *`settings` (Optional, Record<string, User-defined value>)*: Settings to override from the leader index. Note that certain settings can not be overrode (e.g., index.number_of_shards). -** *`max_outstanding_write_requests` (Optional, number)*: The maximum number of outstanding reads requests from the remote cluster. -** *`read_poll_timeout` (Optional, string | -1 | 0)*: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. -** *`max_read_request_operation_count` (Optional, number)*: The maximum number of operations to pull per read from the remote cluster. -** *`max_read_request_size` (Optional, number | string)*: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. -** *`max_retry_delay` (Optional, string | -1 | 0)*: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. -** *`max_write_buffer_count` (Optional, number)*: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit. -** *`max_write_buffer_size` (Optional, number | string)*: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. -** *`max_write_request_operation_count` (Optional, number)*: The maximum number of operations per bulk write request executed on the follower. -** *`max_write_request_size` (Optional, number | string)*: The maximum total bytes of operations per bulk write request executed on the follower. - -[discrete] -==== resume_auto_follow_pattern -Resumes an auto-follow pattern that has been paused - -{ref}/ccr-resume-auto-follow-pattern.html[Endpoint documentation] -[source,ts] ----- -client.ccr.resumeAutoFollowPattern({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the auto follow pattern to resume discovering new indices to follow. - -[discrete] -==== resume_follow -Resumes a follower index that has been paused - -{ref}/ccr-post-resume-follow.html[Endpoint documentation] -[source,ts] ----- -client.ccr.resumeFollow({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: The name of the follow index to resume following. -** *`max_outstanding_read_requests` (Optional, number)* -** *`max_outstanding_write_requests` (Optional, number)* -** *`max_read_request_operation_count` (Optional, number)* -** *`max_read_request_size` (Optional, string)* -** *`max_retry_delay` (Optional, string | -1 | 0)* -** *`max_write_buffer_count` (Optional, number)* -** *`max_write_buffer_size` (Optional, string)* -** *`max_write_request_operation_count` (Optional, number)* -** *`max_write_request_size` (Optional, string)* -** *`read_poll_timeout` (Optional, string | -1 | 0)* - -[discrete] -==== stats -Gets all stats related to cross-cluster replication. - -{ref}/ccr-get-stats.html[Endpoint documentation] -[source,ts] ----- -client.ccr.stats() ----- - - -[discrete] -==== unfollow -Stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. - -{ref}/ccr-post-unfollow.html[Endpoint documentation] -[source,ts] ----- -client.ccr.unfollow({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: The name of the follower index that should be turned into a regular index. - -[discrete] -=== cluster -[discrete] -==== allocation_explain -Provides explanations for shard allocations in the cluster. - -{ref}/cluster-allocation-explain.html[Endpoint documentation] -[source,ts] ----- -client.cluster.allocationExplain({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`current_node` (Optional, string)*: Specifies the node ID or the name of the node to only explain a shard that is currently located on the specified node. -** *`index` (Optional, string)*: Specifies the name of the index that you would like an explanation for. -** *`primary` (Optional, boolean)*: If true, returns explanation for the primary shard for the given shard ID. -** *`shard` (Optional, number)*: Specifies the ID of the shard that you would like an explanation for. -** *`include_disk_info` (Optional, boolean)*: If true, returns information about disk usage and shard sizes. -** *`include_yes_decisions` (Optional, boolean)*: If true, returns YES decisions in explanation. - -[discrete] -==== delete_component_template -Delete component templates. -Deletes component templates. -Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. - -{ref}/indices-component-template.html[Endpoint documentation] -[source,ts] ----- -client.cluster.deleteComponentTemplate({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string | string[])*: List or wildcard expression of component template names used to limit the request. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== delete_voting_config_exclusions -Clears cluster voting config exclusions. - -{ref}/voting-config-exclusions.html[Endpoint documentation] -[source,ts] ----- -client.cluster.deleteVotingConfigExclusions({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`wait_for_removal` (Optional, boolean)*: Specifies whether to wait for all excluded nodes to be removed from the -cluster before clearing the voting configuration exclusions list. -Defaults to true, meaning that all excluded nodes must be removed from -the cluster before this API takes any action. If set to false then the -voting configuration exclusions list is cleared even if some excluded -nodes are still in the cluster. - -[discrete] -==== exists_component_template -Check component templates. -Returns information about whether a particular component template exists. - -{ref}/indices-component-template.html[Endpoint documentation] -[source,ts] ----- -client.cluster.existsComponentTemplate({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string | string[])*: List of component template names used to limit the request. -Wildcard (*) expressions are supported. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is -received before the timeout expires, the request fails and returns an -error. -** *`local` (Optional, boolean)*: If true, the request retrieves information from the local node only. -Defaults to false, which means information is retrieved from the master node. - -[discrete] -==== get_component_template -Get component templates. -Retrieves information about component templates. - -{ref}/indices-component-template.html[Endpoint documentation] -[source,ts] ----- -client.cluster.getComponentTemplate({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string)*: List of component template names used to limit the request. -Wildcard (`*`) expressions are supported. -** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. -** *`include_defaults` (Optional, boolean)*: Return all default configurations for the component template (default: false) -** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. -If `false`, information is retrieved from the master node. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get_settings -Returns cluster-wide settings. -By default, it returns only settings that have been explicitly defined. - -{ref}/cluster-get-settings.html[Endpoint documentation] -[source,ts] ----- -client.cluster.getSettings({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. -** *`include_defaults` (Optional, boolean)*: If `true`, returns default cluster settings from the local node. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== health -The cluster health API returns a simple status on the health of the cluster. You can also use the API to get the health status of only specified data streams and indices. For data streams, the API retrieves the health status of the stream’s backing indices. -The cluster health status is: green, yellow or red. On the shard level, a red status indicates that the specific shard is not allocated in the cluster, yellow means that the primary shard is allocated but replicas are not, and green means that all shards are allocated. The index level status is controlled by the worst shard status. The cluster status is controlled by the worst index status. - -{ref}/cluster-health.html[Endpoint documentation] -[source,ts] ----- -client.cluster.health({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or `*`. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`level` (Optional, Enum("cluster" | "indices" | "shards"))*: Can be one of cluster, indices or shards. Controls the details level of the health information returned. -** *`local` (Optional, boolean)*: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: A number controlling to how many active shards to wait for, all to wait for all shards in the cluster to be active, or 0 to not wait. -** *`wait_for_events` (Optional, Enum("immediate" | "urgent" | "high" | "normal" | "low" | "languid"))*: Can be one of immediate, urgent, high, normal, low, languid. Wait until all currently queued events with the given priority are processed. -** *`wait_for_nodes` (Optional, string | number)*: The request waits until the specified number N of nodes is available. It also accepts >=N, <=N, >N and <N. Alternatively, it is possible to use ge(N), le(N), gt(N) and lt(N) notation. -** *`wait_for_no_initializing_shards` (Optional, boolean)*: A boolean value which controls whether to wait (until the timeout provided) for the cluster to have no shard initializations. Defaults to false, which means it will not wait for initializing shards. -** *`wait_for_no_relocating_shards` (Optional, boolean)*: A boolean value which controls whether to wait (until the timeout provided) for the cluster to have no shard relocations. Defaults to false, which means it will not wait for relocating shards. -** *`wait_for_status` (Optional, Enum("green" | "yellow" | "red"))*: One of green, yellow or red. Will wait (until the timeout provided) until the status of the cluster changes to the one provided or better, i.e. green > yellow > red. By default, will not wait for any status. - -[discrete] -==== info -Get cluster info. -Returns basic information about the cluster. - -{ref}/cluster-info.html[Endpoint documentation] -[source,ts] ----- -client.cluster.info({ target }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`target` (Enum("_all" | "http" | "ingest" | "thread_pool" | "script") | Enum("_all" | "http" | "ingest" | "thread_pool" | "script")[])*: Limits the information returned to the specific target. Supports a list, such as http,ingest. - -[discrete] -==== pending_tasks -Returns cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet been executed. -NOTE: This API returns a list of any pending updates to the cluster state. -These are distinct from the tasks reported by the Task Management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. -However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. - -{ref}/cluster-pending.html[Endpoint documentation] -[source,ts] ----- -client.cluster.pendingTasks({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. -If `false`, information is retrieved from the master node. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== post_voting_config_exclusions -Updates the cluster voting config exclusions by node ids or node names. - -{ref}/voting-config-exclusions.html[Endpoint documentation] -[source,ts] ----- -client.cluster.postVotingConfigExclusions({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_names` (Optional, string | string[])*: A list of the names of the nodes to exclude from the -voting configuration. If specified, you may not also specify node_ids. -** *`node_ids` (Optional, string | string[])*: A list of the persistent ids of the nodes to exclude -from the voting configuration. If specified, you may not also specify node_names. -** *`timeout` (Optional, string | -1 | 0)*: When adding a voting configuration exclusion, the API waits for the -specified nodes to be excluded from the voting configuration before -returning. If the timeout expires before the appropriate condition -is satisfied, the request fails and returns an error. - -[discrete] -==== put_component_template -Create or update a component template. -Creates or updates a component template. -Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. - -An index template can be composed of multiple component templates. -To use a component template, specify it in an index template’s `composed_of` list. -Component templates are only applied to new data streams and indices as part of a matching index template. - -Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template. - -Component templates are only used during index creation. -For data streams, this includes data stream creation and the creation of a stream’s backing indices. -Changes to component templates do not affect existing indices, including a stream’s backing indices. - -You can use C-style `/* *\/` block comments in component templates. -You can include comments anywhere in the request body except before the opening curly bracket. - -{ref}/indices-component-template.html[Endpoint documentation] -[source,ts] ----- -client.cluster.putComponentTemplate({ name, template }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: Name of the component template to create. -Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. -Elastic Agent uses these templates to configure backing indices for its data streams. -If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. -If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API. -** *`template` ({ aliases, mappings, settings, defaults, data_stream, lifecycle })*: The template to be applied which includes mappings, settings, or aliases configuration. -** *`version` (Optional, number)*: Version number used to manage component templates externally. -This number isn't automatically generated or incremented by Elasticsearch. -To unset a version, replace the template without specifying a version. -** *`_meta` (Optional, Record<string, User-defined value>)*: Optional user metadata about the component template. -May have any contents. This map is not automatically generated by Elasticsearch. -This information is stored in the cluster state, so keeping it short is preferable. -To unset `_meta`, replace the template without specifying this information. -** *`deprecated` (Optional, boolean)*: Marks this index template as deprecated. When creating or updating a non-deprecated index template -that uses deprecated components, Elasticsearch will emit a deprecation warning. -** *`create` (Optional, boolean)*: If `true`, this request cannot replace or update existing component templates. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== put_settings -Updates the cluster settings. - -{ref}/cluster-update-settings.html[Endpoint documentation] -[source,ts] ----- -client.cluster.putSettings({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`persistent` (Optional, Record<string, User-defined value>)* -** *`transient` (Optional, Record<string, User-defined value>)* -** *`flat_settings` (Optional, boolean)*: Return settings in flat format (default: false) -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout - -[discrete] -==== remote_info -The cluster remote info API allows you to retrieve all of the configured -remote cluster information. It returns connection and endpoint information -keyed by the configured remote cluster alias. - -{ref}/cluster-remote-info.html[Endpoint documentation] -[source,ts] ----- -client.cluster.remoteInfo() ----- - - -[discrete] -==== reroute -Allows to manually change the allocation of individual shards in the cluster. - -{ref}/cluster-reroute.html[Endpoint documentation] -[source,ts] ----- -client.cluster.reroute({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`commands` (Optional, { cancel, move, allocate_replica, allocate_stale_primary, allocate_empty_primary }[])*: Defines the commands to perform. -** *`dry_run` (Optional, boolean)*: If true, then the request simulates the operation only and returns the resulting state. -** *`explain` (Optional, boolean)*: If true, then the response contains an explanation of why the commands can or cannot be executed. -** *`metric` (Optional, string | string[])*: Limits the information returned to the specified metrics. -** *`retry_failed` (Optional, boolean)*: If true, then retries allocation of shards that are blocked due to too many subsequent allocation failures. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== state -Returns a comprehensive information about the state of the cluster. - -{ref}/cluster-state.html[Endpoint documentation] -[source,ts] ----- -client.cluster.state({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`metric` (Optional, string | string[])*: Limit the information returned to the specified metrics -** *`index` (Optional, string | string[])*: A list of index names; use `_all` or empty string to perform the operation on all indices -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`flat_settings` (Optional, boolean)*: Return settings in flat format (default: false) -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`wait_for_metadata_version` (Optional, number)*: Wait for the metadata version to be equal or greater than the specified metadata version -** *`wait_for_timeout` (Optional, string | -1 | 0)*: The maximum time to wait for wait_for_metadata_version before timing out - -[discrete] -==== stats -Returns cluster statistics. -It returns basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). - -{ref}/cluster-stats.html[Endpoint documentation] -[source,ts] ----- -client.cluster.stats({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (Optional, string | string[])*: List of node filters used to limit returned information. Defaults to all nodes in the cluster. -** *`include_remotes` (Optional, boolean)*: Include remote cluster data into the response -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for each node to respond. -If a node does not respond before its timeout expires, the response does not include its stats. -However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout. - -[discrete] -=== connector -[discrete] -==== check_in -Check in a connector. - -Update the `last_seen` field in the connector and set it to the current timestamp. - -{ref}/check-in-connector-api.html[Endpoint documentation] -[source,ts] ----- -client.connector.checkIn({ connector_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be checked in - -[discrete] -==== delete -Delete a connector. - -Removes a connector and associated sync jobs. -This is a destructive action that is not recoverable. -NOTE: This action doesn’t delete any API keys, ingest pipelines, or data indices associated with the connector. -These need to be removed manually. - -{ref}/delete-connector-api.html[Endpoint documentation] -[source,ts] ----- -client.connector.delete({ connector_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be deleted -** *`delete_sync_jobs` (Optional, boolean)*: A flag indicating if associated sync jobs should be also removed. Defaults to false. - -[discrete] -==== get -Get a connector. - -Get the details about a connector. - -{ref}/get-connector-api.html[Endpoint documentation] -[source,ts] ----- -client.connector.get({ connector_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector - -[discrete] -==== list -Get all connectors. - -Get information about all connectors. - -{ref}/list-connector-api.html[Endpoint documentation] -[source,ts] ----- -client.connector.list({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`from` (Optional, number)*: Starting offset (default: 0) -** *`size` (Optional, number)*: Specifies a max number of results to get -** *`index_name` (Optional, string | string[])*: A list of connector index names to fetch connector documents for -** *`connector_name` (Optional, string | string[])*: A list of connector names to fetch connector documents for -** *`service_type` (Optional, string | string[])*: A list of connector service types to fetch connector documents for -** *`query` (Optional, string)*: A wildcard query string that filters connectors with matching name, description or index name - -[discrete] -==== post -Create a connector. - -Connectors are Elasticsearch integrations that bring content from third-party data sources, which can be deployed on Elastic Cloud or hosted on your own infrastructure. -Elastic managed connectors (Native connectors) are a managed service on Elastic Cloud. -Self-managed connectors (Connector clients) are self-managed on your infrastructure. - -{ref}/create-connector-api.html[Endpoint documentation] -[source,ts] ----- -client.connector.post({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`description` (Optional, string)* -** *`index_name` (Optional, string)* -** *`is_native` (Optional, boolean)* -** *`language` (Optional, string)* -** *`name` (Optional, string)* -** *`service_type` (Optional, string)* - -[discrete] -==== put -Create or update a connector. - -{ref}/create-connector-api.html[Endpoint documentation] -[source,ts] ----- -client.connector.put({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (Optional, string)*: The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. -** *`description` (Optional, string)* -** *`index_name` (Optional, string)* -** *`is_native` (Optional, boolean)* -** *`language` (Optional, string)* -** *`name` (Optional, string)* -** *`service_type` (Optional, string)* - -[discrete] -==== sync_job_cancel -Cancel a connector sync job. - -Cancel a connector sync job, which sets the status to cancelling and updates `cancellation_requested_at` to the current time. -The connector service is then responsible for setting the status of connector sync jobs to cancelled. - -{ref}/cancel-connector-sync-job-api.html[Endpoint documentation] -[source,ts] ----- -client.connector.syncJobCancel({ connector_sync_job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_sync_job_id` (string)*: The unique identifier of the connector sync job - -[discrete] -==== sync_job_check_in -Checks in a connector sync job (refreshes 'last_seen'). - -{ref}/check-in-connector-sync-job-api.html[Endpoint documentation] -[source,ts] ----- -client.connector.syncJobCheckIn() ----- - - -[discrete] -==== sync_job_claim -Claims a connector sync job. -[source,ts] ----- -client.connector.syncJobClaim() ----- - - -[discrete] -==== sync_job_delete -Delete a connector sync job. - -Remove a connector sync job and its associated data. -This is a destructive action that is not recoverable. - -{ref}/delete-connector-sync-job-api.html[Endpoint documentation] -[source,ts] ----- -client.connector.syncJobDelete({ connector_sync_job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_sync_job_id` (string)*: The unique identifier of the connector sync job to be deleted - -[discrete] -==== sync_job_error -Sets an error for a connector sync job. - -{ref}/set-connector-sync-job-error-api.html[Endpoint documentation] -[source,ts] ----- -client.connector.syncJobError() ----- - - -[discrete] -==== sync_job_get -Get a connector sync job. - -{ref}/get-connector-sync-job-api.html[Endpoint documentation] -[source,ts] ----- -client.connector.syncJobGet({ connector_sync_job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_sync_job_id` (string)*: The unique identifier of the connector sync job - -[discrete] -==== sync_job_list -Get all connector sync jobs. - -Get information about all stored connector sync jobs listed by their creation date in ascending order. - -{ref}/list-connector-sync-jobs-api.html[Endpoint documentation] -[source,ts] ----- -client.connector.syncJobList({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`from` (Optional, number)*: Starting offset (default: 0) -** *`size` (Optional, number)*: Specifies a max number of results to get -** *`status` (Optional, Enum("canceling" | "canceled" | "completed" | "error" | "in_progress" | "pending" | "suspended"))*: A sync job status to fetch connector sync jobs for -** *`connector_id` (Optional, string)*: A connector id to fetch connector sync jobs for -** *`job_type` (Optional, Enum("full" | "incremental" | "access_control") | Enum("full" | "incremental" | "access_control")[])*: A list of job types to fetch the sync jobs for - -[discrete] -==== sync_job_post -Create a connector sync job. - -Create a connector sync job document in the internal index and initialize its counters and timestamps with default values. - -{ref}/create-connector-sync-job-api.html[Endpoint documentation] -[source,ts] ----- -client.connector.syncJobPost({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: The id of the associated connector -** *`job_type` (Optional, Enum("full" | "incremental" | "access_control"))* -** *`trigger_method` (Optional, Enum("on_demand" | "scheduled"))* - -[discrete] -==== sync_job_update_stats -Updates the stats fields in the connector sync job document. - -{ref}/set-connector-sync-job-stats-api.html[Endpoint documentation] -[source,ts] ----- -client.connector.syncJobUpdateStats() ----- - - -[discrete] -==== update_active_filtering -Activate the connector draft filter. - -Activates the valid draft filtering for a connector. - -{ref}/update-connector-filtering-api.html[Endpoint documentation] -[source,ts] ----- -client.connector.updateActiveFiltering({ connector_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be updated - -[discrete] -==== update_api_key_id -Update the connector API key ID. - -Update the `api_key_id` and `api_key_secret_id` fields of a connector. -You can specify the ID of the API key used for authorization and the ID of the connector secret where the API key is stored. -The connector secret ID is required only for Elastic managed (native) connectors. -Self-managed connectors (connector clients) do not use this field. - -{ref}/update-connector-api-key-id-api.html[Endpoint documentation] -[source,ts] ----- -client.connector.updateApiKeyId({ connector_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be updated -** *`api_key_id` (Optional, string)* -** *`api_key_secret_id` (Optional, string)* - -[discrete] -==== update_configuration -Update the connector configuration. - -Update the configuration field in the connector document. - -{ref}/update-connector-configuration-api.html[Endpoint documentation] -[source,ts] ----- -client.connector.updateConfiguration({ connector_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be updated -** *`configuration` (Optional, Record<string, { category, default_value, depends_on, display, label, options, order, placeholder, required, sensitive, tooltip, type, ui_restrictions, validations, value }>)* -** *`values` (Optional, Record<string, User-defined value>)* - -[discrete] -==== update_error -Update the connector error field. - -Set the error field for the connector. -If the error provided in the request body is non-null, the connector’s status is updated to error. -Otherwise, if the error is reset to null, the connector status is updated to connected. - -{ref}/update-connector-error-api.html[Endpoint documentation] -[source,ts] ----- -client.connector.updateError({ connector_id, error }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be updated -** *`error` (T | null)* - -[discrete] -==== update_features -Updates the connector features in the connector document. - -{ref}/update-connector-features-api.html[Endpoint documentation] -[source,ts] ----- -client.connector.updateFeatures() ----- - - -[discrete] -==== update_filtering -Update the connector filtering. - -Update the draft filtering configuration of a connector and marks the draft validation state as edited. -The filtering draft is activated once validated by the running Elastic connector service. -The filtering property is used to configure sync rules (both basic and advanced) for a connector. - -{ref}/update-connector-filtering-api.html[Endpoint documentation] -[source,ts] ----- -client.connector.updateFiltering({ connector_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be updated -** *`filtering` (Optional, { active, domain, draft }[])* -** *`rules` (Optional, { created_at, field, id, order, policy, rule, updated_at, value }[])* -** *`advanced_snippet` (Optional, { created_at, updated_at, value })* - -[discrete] -==== update_filtering_validation -Update the connector draft filtering validation. - -Update the draft filtering validation info for a connector. -[source,ts] ----- -client.connector.updateFilteringValidation({ connector_id, validation }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be updated -** *`validation` ({ errors, state })* - -[discrete] -==== update_index_name -Update the connector index name. - -Update the `index_name` field of a connector, specifying the index where the data ingested by the connector is stored. - -{ref}/update-connector-index-name-api.html[Endpoint documentation] -[source,ts] ----- -client.connector.updateIndexName({ connector_id, index_name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be updated -** *`index_name` (T | null)* - -[discrete] -==== update_name -Update the connector name and description. - -{ref}/update-connector-name-description-api.html[Endpoint documentation] -[source,ts] ----- -client.connector.updateName({ connector_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be updated -** *`name` (Optional, string)* -** *`description` (Optional, string)* - -[discrete] -==== update_native -Update the connector is_native flag. -[source,ts] ----- -client.connector.updateNative({ connector_id, is_native }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be updated -** *`is_native` (boolean)* - -[discrete] -==== update_pipeline -Update the connector pipeline. - -When you create a new connector, the configuration of an ingest pipeline is populated with default settings. - -{ref}/update-connector-pipeline-api.html[Endpoint documentation] -[source,ts] ----- -client.connector.updatePipeline({ connector_id, pipeline }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be updated -** *`pipeline` ({ extract_binary_content, name, reduce_whitespace, run_ml_inference })* - -[discrete] -==== update_scheduling -Update the connector scheduling. - -{ref}/update-connector-scheduling-api.html[Endpoint documentation] -[source,ts] ----- -client.connector.updateScheduling({ connector_id, scheduling }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be updated -** *`scheduling` ({ access_control, full, incremental })* - -[discrete] -==== update_service_type -Update the connector service type. - -{ref}/update-connector-service-type-api.html[Endpoint documentation] -[source,ts] ----- -client.connector.updateServiceType({ connector_id, service_type }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be updated -** *`service_type` (string)* - -[discrete] -==== update_status -Update the connector status. - -{ref}/update-connector-status-api.html[Endpoint documentation] -[source,ts] ----- -client.connector.updateStatus({ connector_id, status }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`connector_id` (string)*: The unique identifier of the connector to be updated -** *`status` (Enum("created" | "needs_configuration" | "configured" | "connected" | "error"))* - -[discrete] -=== dangling_indices -[discrete] -==== delete_dangling_index -Delete a dangling index. - -If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. -For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. - -{ref}/modules-gateway-dangling-indices.html[Endpoint documentation] -[source,ts] ----- -client.danglingIndices.deleteDanglingIndex({ index_uuid, accept_data_loss }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index_uuid` (string)*: The UUID of the index to delete. Use the get dangling indices API to find the UUID. -** *`accept_data_loss` (boolean)*: This parameter must be set to true to acknowledge that it will no longer be possible to recove data from the dangling index. -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout - -[discrete] -==== import_dangling_index -Import a dangling index. - -If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. -For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. - -{ref}/modules-gateway-dangling-indices.html[Endpoint documentation] -[source,ts] ----- -client.danglingIndices.importDanglingIndex({ index_uuid, accept_data_loss }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index_uuid` (string)*: The UUID of the index to import. Use the get dangling indices API to locate the UUID. -** *`accept_data_loss` (boolean)*: This parameter must be set to true to import a dangling index. -Because Elasticsearch cannot know where the dangling index data came from or determine which shard copies are fresh and which are stale, it cannot guarantee that the imported data represents the latest state of the index when it was last in the cluster. -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout - -[discrete] -==== list_dangling_indices -Get the dangling indices. - -If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. -For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. - -Use this API to list dangling indices, which you can then import or delete. - -{ref}/modules-gateway-dangling-indices.html[Endpoint documentation] -[source,ts] ----- -client.danglingIndices.listDanglingIndices() ----- - - -[discrete] -=== enrich -[discrete] -==== delete_policy -Delete an enrich policy. -Deletes an existing enrich policy and its enrich index. - -{ref}/delete-enrich-policy-api.html[Endpoint documentation] -[source,ts] ----- -client.enrich.deletePolicy({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: Enrich policy to delete. - -[discrete] -==== execute_policy -Run an enrich policy. -Create the enrich index for an existing enrich policy. - -{ref}/execute-enrich-policy-api.html[Endpoint documentation] -[source,ts] ----- -client.enrich.executePolicy({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: Enrich policy to execute. -** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks other enrich policy execution requests until complete. - -[discrete] -==== get_policy -Get an enrich policy. -Returns information about an enrich policy. - -{ref}/get-enrich-policy-api.html[Endpoint documentation] -[source,ts] ----- -client.enrich.getPolicy({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string | string[])*: List of enrich policy names used to limit the request. -To return information for all enrich policies, omit this parameter. - -[discrete] -==== put_policy -Create an enrich policy. -Creates an enrich policy. - -{ref}/put-enrich-policy-api.html[Endpoint documentation] -[source,ts] ----- -client.enrich.putPolicy({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: Name of the enrich policy to create or update. -** *`geo_match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })*: Matches enrich data to incoming documents based on a `geo_shape` query. -** *`match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })*: Matches enrich data to incoming documents based on a `term` query. -** *`range` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })*: Matches a number, date, or IP address in incoming documents to a range in the enrich index based on a `term` query. - -[discrete] -==== stats -Get enrich stats. -Returns enrich coordinator statistics and information about enrich policies that are currently executing. - -{ref}/enrich-stats-api.html[Endpoint documentation] -[source,ts] ----- -client.enrich.stats() ----- - - -[discrete] -=== eql -[discrete] -==== delete -Delete an async EQL search. -Delete an async EQL search or a stored synchronous EQL search. -The API also deletes results for the search. - -{ref}/eql-search-api.html[Endpoint documentation] -[source,ts] ----- -client.eql.delete({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the search to delete. -A search ID is provided in the EQL search API's response for an async search. -A search ID is also provided if the request’s `keep_on_completion` parameter is `true`. - -[discrete] -==== get -Get async EQL search results. -Get the current status and available results for an async EQL search or a stored synchronous EQL search. - -{ref}/get-async-eql-search-api.html[Endpoint documentation] -[source,ts] ----- -client.eql.get({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the search. -** *`keep_alive` (Optional, string | -1 | 0)*: Period for which the search and its results are stored on the cluster. -Defaults to the keep_alive value set by the search’s EQL search API request. -** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Timeout duration to wait for the request to finish. -Defaults to no timeout, meaning the request waits for complete search results. - -[discrete] -==== get_status -Get the async EQL status. -Get the current status for an async EQL search or a stored synchronous EQL search without returning results. - -{ref}/get-async-eql-status-api.html[Endpoint documentation] -[source,ts] ----- -client.eql.getStatus({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the search. - -[discrete] -==== search -Get EQL search results. -Returns search results for an Event Query Language (EQL) query. -EQL assumes each document in a data stream or index corresponds to an event. - -{ref}/eql-search-api.html[Endpoint documentation] -[source,ts] ----- -client.eql.search({ index, query }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: The name of the index to scope the operation -** *`query` (string)*: EQL query you wish to run. -** *`case_sensitive` (Optional, boolean)* -** *`event_category_field` (Optional, string)*: Field containing the event classification, such as process, file, or network. -** *`tiebreaker_field` (Optional, string)*: Field used to sort hits with the same timestamp in ascending order -** *`timestamp_field` (Optional, string)*: Field containing event timestamp. Default "@timestamp" -** *`fetch_size` (Optional, number)*: Maximum number of events to search at a time for sequence queries. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: Query, written in Query DSL, used to filter the events on which the EQL query runs. -** *`keep_alive` (Optional, string | -1 | 0)* -** *`keep_on_completion` (Optional, boolean)* -** *`wait_for_completion_timeout` (Optional, string | -1 | 0)* -** *`size` (Optional, number)*: For basic queries, the maximum number of matching events to return. Defaults to 10 -** *`fields` (Optional, { field, format, include_unmapped } | { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit. -** *`result_position` (Optional, Enum("tail" | "head"))* -** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)* -** *`max_samples_per_key` (Optional, number)*: By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size` -parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the -`max_samples_per_key` parameter. Pipes are not supported for sample queries. -** *`allow_no_indices` (Optional, boolean)* -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])* -** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. - -[discrete] -=== esql -[discrete] -==== async_query -Executes an ESQL request asynchronously - -{ref}/esql-async-query-api.html[Endpoint documentation] -[source,ts] ----- -client.esql.asyncQuery() ----- - - -[discrete] -==== async_query_get -Retrieves the results of a previously submitted async query request given its ID. - -{ref}/esql-async-query-get-api.html[Endpoint documentation] -[source,ts] ----- -client.esql.asyncQueryGet() ----- - - -[discrete] -==== query -Run an ES|QL query. -Get search results for an ES|QL (Elasticsearch query language) query. - -{ref}/esql-rest.html[Endpoint documentation] -[source,ts] ----- -client.esql.query({ query }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`query` (string)*: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. -** *`columnar` (Optional, boolean)*: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. -** *`locale` (Optional, string)* -** *`params` (Optional, number | number | string | boolean | null | User-defined value[])*: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. -** *`profile` (Optional, boolean)*: If provided and `true` the response will include an extra `profile` object -with information on how the query was executed. This information is for human debugging -and its format can change at any time but it can give some insight into the performance -of each part of the query. -** *`tables` (Optional, Record<string, Record<string, { integer, keyword, long, double }>>)*: Tables to use with the LOOKUP operation. The top level key is the table -name and the next level key is the column name. -** *`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile" | "arrow"))*: A short version of the Accept header, e.g. json, yaml. -** *`delimiter` (Optional, string)*: The character to use between values within a CSV row. Only valid for the CSV format. -** *`drop_null_columns` (Optional, boolean)*: Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? -Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. - -[discrete] -=== features -[discrete] -==== get_features -Gets a list of features which can be included in snapshots using the feature_states field when creating a snapshot - -{ref}/get-features-api.html[Endpoint documentation] -[source,ts] ----- -client.features.getFeatures() ----- - - -[discrete] -==== reset_features -Resets the internal state of features, usually by deleting system indices - -{ref}/modules-snapshots.html[Endpoint documentation] -[source,ts] ----- -client.features.resetFeatures() ----- - - -[discrete] -=== fleet -[discrete] -==== global_checkpoints -Get global checkpoints. -Get the current global checkpoints for an index. -This API is designed for internal use by the Fleet server project. - -{ref}/get-global-checkpoints.html[Endpoint documentation] -[source,ts] ----- -client.fleet.globalCheckpoints({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string)*: A single index or index alias that resolves to a single index. -** *`wait_for_advance` (Optional, boolean)*: A boolean value which controls whether to wait (until the timeout) for the global checkpoints -to advance past the provided `checkpoints`. -** *`wait_for_index` (Optional, boolean)*: A boolean value which controls whether to wait (until the timeout) for the target index to exist -and all primary shards be active. Can only be true when `wait_for_advance` is true. -** *`checkpoints` (Optional, number[])*: A comma separated list of previous global checkpoints. When used in combination with `wait_for_advance`, -the API will only return once the global checkpoints advances past the checkpoints. Providing an empty list -will cause Elasticsearch to immediately return the current global checkpoints. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a global checkpoints to advance past `checkpoints`. - -[discrete] -==== msearch -Run multiple Fleet searches. -Run several Fleet searches with a single API request. -The API follows the same structure as the multi search API. -However, similar to the Fleet search API, it supports the `wait_for_checkpoints` parameter. -[source,ts] ----- -client.fleet.msearch({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string)*: A single target to search. If the target is an index alias, it must resolve to a single index. -** *`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])* -** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. -** *`ccs_minimize_roundtrips` (Optional, boolean)*: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -** *`ignore_throttled` (Optional, boolean)*: If true, concrete, expanded or aliased indices are ignored when frozen. -** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. -** *`max_concurrent_searches` (Optional, number)*: Maximum number of concurrent searches the multi search API can execute. -** *`max_concurrent_shard_requests` (Optional, number)*: Maximum number of concurrent shard requests that each sub-search request executes per node. -** *`pre_filter_shard_size` (Optional, number)*: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Indicates whether global term and document frequencies should be used when scoring returned documents. -** *`rest_total_hits_as_int` (Optional, boolean)*: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. -** *`typed_keys` (Optional, boolean)*: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. -** *`wait_for_checkpoints` (Optional, number[])*: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard -after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause -Elasticsearch to immediately execute the search. -** *`allow_partial_search_results` (Optional, boolean)*: If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns -an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` -which is true by default. - -[discrete] -==== search -Run a Fleet search. -The purpose of the Fleet search API is to provide an API where the search will be run only -after the provided checkpoint has been processed and is visible for searches inside of Elasticsearch. -[source,ts] ----- -client.fleet.search({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string)*: A single target to search. If the target is an index alias, it must resolve to a single index. -** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, random_sampler, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, time_series, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)* -** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })* -** *`explain` (Optional, boolean)*: If true, returns detailed information about score computation as part of a hit. -** *`ext` (Optional, Record<string, User-defined value>)*: Configuration of search extensions defined by Elasticsearch plugins. -** *`from` (Optional, number)*: Starting document offset. By default, you cannot page through more than 10,000 -hits using the from and size parameters. To page through more hits, use the -search_after parameter. -** *`highlight` (Optional, { encoder, fields })* -** *`track_total_hits` (Optional, boolean | number)*: Number of hits matching the query to count accurately. If true, the exact -number of hits is returned at the cost of some performance. If false, the -response does not include the total number of hits matching the query. -Defaults to 10,000 hits. -** *`indices_boost` (Optional, Record<string, number>[])*: Boosts the _score of documents from specified indices. -** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns doc values for field -names matching these patterns in the hits.fields property of the response. -** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are -not included in the search results. -** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })* -** *`profile` (Optional, boolean)* -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. -** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])* -** *`script_fields` (Optional, Record<string, { script, ignore_failure }>)*: Retrieve a script evaluation (based on different fields) for each hit. -** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])* -** *`size` (Optional, number)*: The number of hits to return. By default, you cannot page through more -than 10,000 hits using the from and size parameters. To page through more -hits, use the search_after parameter. -** *`slice` (Optional, { field, id, max })* -** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])* -** *`_source` (Optional, boolean | { excludes, includes })*: Indicates which source fields are returned for matching documents. These -fields are returned in the hits._source property of the search response. -** *`fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns values for field names -matching these patterns in the hits.fields property of the response. -** *`suggest` (Optional, { text })* -** *`terminate_after` (Optional, number)*: Maximum number of documents to collect for each shard. If a query reaches this -limit, Elasticsearch terminates the query early. Elasticsearch collects documents -before sorting. Defaults to 0, which does not terminate query execution early. -** *`timeout` (Optional, string)*: Specifies the period of time to wait for a response from each shard. If no response -is received before the timeout expires, the request fails and returns an error. -Defaults to no timeout. -** *`track_scores` (Optional, boolean)*: If true, calculate and return document scores, even if the scores are not used for sorting. -** *`version` (Optional, boolean)*: If true, returns document version as part of a hit. -** *`seq_no_primary_term` (Optional, boolean)*: If true, returns sequence number and primary term of the last modification -of each hit. See Optimistic concurrency control. -** *`stored_fields` (Optional, string | string[])*: List of stored fields to return as part of a hit. If no fields are specified, -no stored fields are included in the response. If this field is specified, the _source -parameter defaults to false. You can pass _source: true to return both source fields -and stored fields in the search response. -** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT). If you provide a PIT, you -cannot specify an <index> in the request path. -** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines one or more runtime fields in the search request. These fields take -precedence over mapped fields with the same name. -** *`stats` (Optional, string[])*: Stats groups to associate with the search. Each group maintains a statistics -aggregation for its associated searches. You can retrieve these stats using -the indices stats API. -** *`allow_no_indices` (Optional, boolean)* -** *`analyzer` (Optional, string)* -** *`analyze_wildcard` (Optional, boolean)* -** *`batched_reduce_size` (Optional, number)* -** *`ccs_minimize_roundtrips` (Optional, boolean)* -** *`default_operator` (Optional, Enum("and" | "or"))* -** *`df` (Optional, string)* -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])* -** *`ignore_throttled` (Optional, boolean)* -** *`ignore_unavailable` (Optional, boolean)* -** *`lenient` (Optional, boolean)* -** *`max_concurrent_shard_requests` (Optional, number)* -** *`preference` (Optional, string)* -** *`pre_filter_shard_size` (Optional, number)* -** *`request_cache` (Optional, boolean)* -** *`routing` (Optional, string)* -** *`scroll` (Optional, string | -1 | 0)* -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))* -** *`suggest_field` (Optional, string)*: Specifies which field to use for suggestions. -** *`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))* -** *`suggest_size` (Optional, number)* -** *`suggest_text` (Optional, string)*: The source text for which the suggestions should be returned. -** *`typed_keys` (Optional, boolean)* -** *`rest_total_hits_as_int` (Optional, boolean)* -** *`_source_excludes` (Optional, string | string[])* -** *`_source_includes` (Optional, string | string[])* -** *`q` (Optional, string)* -** *`wait_for_checkpoints` (Optional, number[])*: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard -after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause -Elasticsearch to immediately execute the search. -** *`allow_partial_search_results` (Optional, boolean)*: If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns -an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` -which is true by default. - -[discrete] -=== graph -[discrete] -==== explore -Explore graph analytics. -Extract and summarize information about the documents and terms in an Elasticsearch data stream or index. -The easiest way to understand the behavior of this API is to use the Graph UI to explore connections. -An initial request to the `_explore` API contains a seed query that identifies the documents of interest and specifies the fields that define the vertices and connections you want to include in the graph. -Subsequent requests enable you to spider out from one more vertices of interest. -You can exclude vertices that have already been returned. - -{ref}/graph-explore-api.html[Endpoint documentation] -[source,ts] ----- -client.graph.explore({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: Name of the index. -** *`connections` (Optional, { connections, query, vertices })*: Specifies or more fields from which you want to extract terms that are associated with the specified vertices. -** *`controls` (Optional, { sample_diversity, sample_size, timeout, use_significance })*: Direct the Graph API how to build the graph. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A seed query that identifies the documents of interest. Can be any valid Elasticsearch query. -** *`vertices` (Optional, { exclude, field, include, min_doc_count, shard_min_doc_count, size }[])*: Specifies one or more fields that contain the terms you want to include in the graph as vertices. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`timeout` (Optional, string | -1 | 0)*: Specifies the period of time to wait for a response from each shard. -If no response is received before the timeout expires, the request fails and returns an error. -Defaults to no timeout. - -[discrete] -=== ilm -[discrete] -==== delete_lifecycle -Deletes the specified lifecycle policy definition. You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. - -{ref}/ilm-delete-lifecycle.html[Endpoint documentation] -[source,ts] ----- -client.ilm.deleteLifecycle({ policy }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`policy` (string)*: Identifier for the policy. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== explain_lifecycle -Retrieves information about the index’s current lifecycle state, such as the currently executing phase, action, and step. Shows when the index entered each one, the definition of the running phase, and information about any failures. - -{ref}/ilm-explain-lifecycle.html[Endpoint documentation] -[source,ts] ----- -client.ilm.explainLifecycle({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: List of data streams, indices, and aliases to target. Supports wildcards (`*`). -To target all data streams and indices, use `*` or `_all`. -** *`only_errors` (Optional, boolean)*: Filters the returned indices to only indices that are managed by ILM and are in an error state, either due to an encountering an error while executing the policy, or attempting to use a policy that does not exist. -** *`only_managed` (Optional, boolean)*: Filters the returned indices to only indices that are managed by ILM. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get_lifecycle -Retrieves a lifecycle policy. - -{ref}/ilm-get-lifecycle.html[Endpoint documentation] -[source,ts] ----- -client.ilm.getLifecycle({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`policy` (Optional, string)*: Identifier for the policy. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get_status -Retrieves the current index lifecycle management (ILM) status. - -{ref}/ilm-get-status.html[Endpoint documentation] -[source,ts] ----- -client.ilm.getStatus() ----- - - -[discrete] -==== migrate_to_data_tiers -Switches the indices, ILM policies, and legacy, composable and component templates from using custom node attributes and -attribute-based allocation filters to using data tiers, and optionally deletes one legacy index template.+ -Using node roles enables ILM to automatically move the indices between data tiers. - -{ref}/ilm-migrate-to-data-tiers.html[Endpoint documentation] -[source,ts] ----- -client.ilm.migrateToDataTiers({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`legacy_template_to_delete` (Optional, string)* -** *`node_attribute` (Optional, string)* -** *`dry_run` (Optional, boolean)*: If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. -This provides a way to retrieve the indices and ILM policies that need to be migrated. - -[discrete] -==== move_to_step -Manually moves an index into the specified step and executes that step. - -{ref}/ilm-move-to-step.html[Endpoint documentation] -[source,ts] ----- -client.ilm.moveToStep({ index, current_step, next_step }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: The name of the index whose lifecycle step is to change -** *`current_step` ({ action, name, phase })* -** *`next_step` ({ action, name, phase })* - -[discrete] -==== put_lifecycle -Creates a lifecycle policy. If the specified policy exists, the policy is replaced and the policy version is incremented. - -{ref}/ilm-put-lifecycle.html[Endpoint documentation] -[source,ts] ----- -client.ilm.putLifecycle({ policy }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`policy` (string)*: Identifier for the policy. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== remove_policy -Removes the assigned lifecycle policy and stops managing the specified index - -{ref}/ilm-remove-policy.html[Endpoint documentation] -[source,ts] ----- -client.ilm.removePolicy({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: The name of the index to remove policy on - -[discrete] -==== retry -Retries executing the policy for an index that is in the ERROR step. - -{ref}/ilm-retry-policy.html[Endpoint documentation] -[source,ts] ----- -client.ilm.retry({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: The name of the indices (comma-separated) whose failed lifecycle step is to be retry - -[discrete] -==== start -Start the index lifecycle management (ILM) plugin. - -{ref}/ilm-start.html[Endpoint documentation] -[source,ts] ----- -client.ilm.start({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)* -** *`timeout` (Optional, string | -1 | 0)* - -[discrete] -==== stop -Halts all lifecycle management operations and stops the index lifecycle management (ILM) plugin - -{ref}/ilm-stop.html[Endpoint documentation] -[source,ts] ----- -client.ilm.stop({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)* -** *`timeout` (Optional, string | -1 | 0)* - -[discrete] -=== indices -[discrete] -==== add_block -Add an index block. -Limits the operations allowed on an index by blocking specific operation types. - -{ref}/index-modules-blocks.html[Endpoint documentation] -[source,ts] ----- -client.indices.addBlock({ index, block }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: A comma separated list of indices to add a block to -** *`block` (Enum("metadata" | "read" | "read_only" | "write"))*: The block to add (one of read, write, read_only or metadata) -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout - -[discrete] -==== analyze -Get tokens from text analysis. -The analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) on a text string and returns the resulting tokens. - -{ref}/indices-analyze.html[Endpoint documentation] -[source,ts] ----- -client.indices.analyze({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string)*: Index used to derive the analyzer. -If specified, the `analyzer` or field parameter overrides this value. -If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer. -** *`analyzer` (Optional, string)*: The name of the analyzer that should be applied to the provided `text`. -This could be a built-in analyzer, or an analyzer that’s been configured in the index. -** *`attributes` (Optional, string[])*: Array of token attributes used to filter the output of the `explain` parameter. -** *`char_filter` (Optional, string | { type, escaped_tags } | { type, mappings, mappings_path } | { type, flags, pattern, replacement } | { type, mode, name } | { type, normalize_kana, normalize_kanji }[])*: Array of character filters used to preprocess characters before the tokenizer. -** *`explain` (Optional, boolean)*: If `true`, the response includes token attributes and additional details. -** *`field` (Optional, string)*: Field used to derive the analyzer. -To use this parameter, you must specify an index. -If specified, the `analyzer` parameter overrides this value. -** *`filter` (Optional, string | { type, preserve_original } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type, dedup, dictionary, locale, longest_only } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, flags, pattern, replacement } | { type } | { type, script } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, ignore_keywords, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, alternate, case_first, case_level, country, decomposition, hiragana_quaternary_mode, language, numeric, rules, strength, variable_top, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])*: Array of token filters used to apply after the tokenizer. -** *`normalizer` (Optional, string)*: Normalizer to use to convert text into a single token. -** *`text` (Optional, string | string[])*: Text to analyze. -If an array of strings is provided, it is analyzed as a multi-value field. -** *`tokenizer` (Optional, string | { type, tokenize_on_chars, max_token_length } | { type, max_token_length } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size } | { type } | { type } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size, delimiter, replacement, reverse, skip } | { type, flags, group, pattern } | { type, pattern } | { type, pattern } | { type, max_token_length } | { type } | { type, max_token_length } | { type, max_token_length } | { type, rule_files } | { type, discard_punctuation, mode, nbest_cost, nbest_examples, user_dictionary, user_dictionary_rules, discard_compound_token } | { type, decompound_mode, discard_punctuation, user_dictionary, user_dictionary_rules })*: Tokenizer to use to convert text into tokens. - -[discrete] -==== clear_cache -Clears the caches of one or more indices. -For data streams, the API clears the caches of the stream’s backing indices. - -{ref}/indices-clearcache.html[Endpoint documentation] -[source,ts] ----- -client.indices.clearCache({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. -Supports wildcards (`*`). -To target all data streams and indices, omit this parameter or use `*` or `_all`. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`fielddata` (Optional, boolean)*: If `true`, clears the fields cache. -Use the `fields` parameter to clear the cache of specific fields only. -** *`fields` (Optional, string | string[])*: List of field names used to limit the `fielddata` parameter. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`query` (Optional, boolean)*: If `true`, clears the query cache. -** *`request` (Optional, boolean)*: If `true`, clears the request cache. - -[discrete] -==== clone -Clones an existing index. - -{ref}/indices-clone-index.html[Endpoint documentation] -[source,ts] ----- -client.indices.clone({ index, target }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: Name of the source index to clone. -** *`target` (string)*: Name of the target index to create. -** *`aliases` (Optional, Record<string, { filter, index_routing, is_hidden, is_write_index, routing, search_routing }>)*: Aliases for the resulting index. -** *`settings` (Optional, Record<string, User-defined value>)*: Configuration options for the target index. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - -[discrete] -==== close -Closes an index. - -{ref}/indices-close.html[Endpoint documentation] -[source,ts] ----- -client.indices.close({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: List or wildcard expression of index names used to limit the request. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - -[discrete] -==== create -Create an index. -Creates a new index. - -{ref}/indices-create-index.html[Endpoint documentation] -[source,ts] ----- -client.indices.create({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: Name of the index you wish to create. -** *`aliases` (Optional, Record<string, { filter, index_routing, is_hidden, is_write_index, routing, search_routing }>)*: Aliases for the index. -** *`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })*: Mapping for fields in the index. If specified, this mapping can include: -- Field names -- Field data types -- Mapping parameters -** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })*: Configuration options for the index. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - -[discrete] -==== create_data_stream -Create a data stream. -Creates a data stream. -You must have a matching index template with data stream enabled. - -{ref}/data-streams.html[Endpoint documentation] -[source,ts] ----- -client.indices.createDataStream({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: Name of the data stream, which must meet the following criteria: -Lowercase only; -Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space character; -Cannot start with `-`, `_`, `+`, or `.ds-`; -Cannot be `.` or `..`; -Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== data_streams_stats -Get data stream stats. -Retrieves statistics for one or more data streams. - -{ref}/data-streams.html[Endpoint documentation] -[source,ts] ----- -client.indices.dataStreamsStats({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string)*: List of data streams used to limit the request. -Wildcard expressions (`*`) are supported. -To target all data streams in a cluster, omit this parameter or use `*`. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. -Supports a list of values, such as `open,hidden`. - -[discrete] -==== delete -Delete indices. -Deletes one or more indices. - -{ref}/indices-delete-index.html[Endpoint documentation] -[source,ts] ----- -client.indices.delete({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: List of indices to delete. -You cannot specify index aliases. -By default, this parameter does not support wildcards (`*`) or `_all`. -To use wildcards or `_all`, set the `action.destructive_requires_name` cluster setting to `false`. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== delete_alias -Delete an alias. -Removes a data stream or index from an alias. - -{ref}/indices-aliases.html[Endpoint documentation] -[source,ts] ----- -client.indices.deleteAlias({ index, name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: List of data streams or indices used to limit the request. -Supports wildcards (`*`). -** *`name` (string | string[])*: List of aliases to remove. -Supports wildcards (`*`). To remove all aliases, use `*` or `_all`. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== delete_data_lifecycle -Delete data stream lifecycles. -Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle. - -{ref}/data-streams-delete-lifecycle.html[Endpoint documentation] -[source,ts] ----- -client.indices.deleteDataLifecycle({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string | string[])*: A list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether wildcard expressions should get expanded to open or closed indices (default: open) -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit timestamp for the document - -[discrete] -==== delete_data_stream -Delete data streams. -Deletes one or more data streams and their backing indices. - -{ref}/data-streams.html[Endpoint documentation] -[source,ts] ----- -client.indices.deleteDataStream({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string | string[])*: List of data streams to delete. Wildcard (`*`) expressions are supported. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. Supports a list of values,such as `open,hidden`. - -[discrete] -==== delete_index_template -Delete an index template. -The provided <index-template> may contain multiple template names separated by a comma. If multiple template -names are specified then there is no wildcard support and the provided names should match completely with -existing templates. - -{ref}/indices-delete-template.html[Endpoint documentation] -[source,ts] ----- -client.indices.deleteIndexTemplate({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string | string[])*: List of index template names used to limit the request. Wildcard (*) expressions are supported. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== delete_template -Deletes a legacy index template. - -{ref}/indices-delete-template-v1.html[Endpoint documentation] -[source,ts] ----- -client.indices.deleteTemplate({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the legacy index template to delete. -Wildcard (`*`) expressions are supported. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== disk_usage -Analyzes the disk usage of each field of an index or data stream. - -{ref}/indices-disk-usage.html[Endpoint documentation] -[source,ts] ----- -client.indices.diskUsage({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: List of data streams, indices, and aliases used to limit the request. -It’s recommended to execute this API with a single index (or the latest backing index of a data stream) as the API consumes resources significantly. -** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -** *`flush` (Optional, boolean)*: If `true`, the API performs a flush before analysis. -If `false`, the response may not include uncommitted data. -** *`ignore_unavailable` (Optional, boolean)*: If `true`, missing or closed indices are not included in the response. -** *`run_expensive_tasks` (Optional, boolean)*: Analyzing field disk usage is resource-intensive. -To use the API, this parameter must be set to `true`. - -[discrete] -==== downsample -Aggregates a time series (TSDS) index and stores pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. - -{ref}/indices-downsample-data-stream.html[Endpoint documentation] -[source,ts] ----- -client.indices.downsample({ index, target_index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: Name of the time series index to downsample. -** *`target_index` (string)*: Name of the index to create. -** *`config` (Optional, { fixed_interval })* - -[discrete] -==== exists -Check indices. -Checks if one or more indices, index aliases, or data streams exist. - -{ref}/indices-exists.html[Endpoint documentation] -[source,ts] ----- -client.indices.exists({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: List of data streams, indices, and aliases. Supports wildcards (`*`). -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`include_defaults` (Optional, boolean)*: If `true`, return all default settings in the response. -** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. - -[discrete] -==== exists_alias -Check aliases. -Checks if one or more data stream or index aliases exist. - -{ref}/indices-aliases.html[Endpoint documentation] -[source,ts] ----- -client.indices.existsAlias({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string | string[])*: List of aliases to check. Supports wildcards (`*`). -** *`index` (Optional, string | string[])*: List of data streams or indices used to limit the request. Supports wildcards (`*`). -To target all data streams and indices, omit this parameter or use `*` or `_all`. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== exists_index_template -Check index templates. -Check whether index templates exist. - -{ref}/index-templates.html[Endpoint documentation] -[source,ts] ----- -client.indices.existsIndexTemplate({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: List of index template names used to limit the request. Wildcard (*) expressions are supported. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== exists_template -Check existence of index templates. -Returns information about whether a particular index template exists. - -{ref}/indices-template-exists-v1.html[Endpoint documentation] -[source,ts] ----- -client.indices.existsTemplate({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string | string[])*: The comma separated names of the index templates -** *`flat_settings` (Optional, boolean)*: Return settings in flat format (default: false) -** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node - -[discrete] -==== explain_data_lifecycle -Get the status for a data stream lifecycle. -Retrieves information about an index or data stream’s current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. - -{ref}/data-streams-explain-lifecycle.html[Endpoint documentation] -[source,ts] ----- -client.indices.explainDataLifecycle({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: The name of the index to explain -** *`include_defaults` (Optional, boolean)*: indicates if the API should return the default values the system uses for the index's lifecycle -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master - -[discrete] -==== field_usage_stats -Returns field usage information for each shard and field of an index. - -{ref}/field-usage-stats.html[Endpoint documentation] -[source,ts] ----- -client.indices.fieldUsageStats({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: List or wildcard expression of index names used to limit the request. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -** *`ignore_unavailable` (Optional, boolean)*: If `true`, missing or closed indices are not included in the response. -** *`fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in the statistics. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - -[discrete] -==== flush -Flushes one or more data streams or indices. - -{ref}/indices-flush.html[Endpoint documentation] -[source,ts] ----- -client.indices.flush({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to flush. -Supports wildcards (`*`). -To flush all data streams and indices, omit this parameter or use `*` or `_all`. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`force` (Optional, boolean)*: If `true`, the request forces a flush even if there are no changes to commit to the index. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`wait_if_ongoing` (Optional, boolean)*: If `true`, the flush operation blocks until execution when another flush operation is running. -If `false`, Elasticsearch returns an error if you request a flush when another flush operation is running. - -[discrete] -==== forcemerge -Performs the force merge operation on one or more indices. - -{ref}/indices-forcemerge.html[Endpoint documentation] -[source,ts] ----- -client.indices.forcemerge({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: A list of index names; use `_all` or empty string to perform the operation on all indices -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`flush` (Optional, boolean)*: Specify whether the index should be flushed after performing the operation (default: true) -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`max_num_segments` (Optional, number)*: The number of segments the index should be merged into (default: dynamic) -** *`only_expunge_deletes` (Optional, boolean)*: Specify whether the operation should only expunge deleted documents -** *`wait_for_completion` (Optional, boolean)*: Should the request wait until the force merge is completed. - -[discrete] -==== get -Get index information. -Returns information about one or more indices. For data streams, the API returns information about the -stream’s backing indices. - -{ref}/indices-get-index.html[Endpoint documentation] -[source,ts] ----- -client.indices.get({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: List of data streams, indices, and index aliases used to limit the request. -Wildcard expressions (*) are supported. -** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only -missing or closed indices. This behavior applies even if the request targets other open indices. For example, -a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard expressions can match. If the request can target data streams, this argument -determines whether wildcard expressions match hidden data streams. Supports a list of values, -such as open,hidden. -** *`flat_settings` (Optional, boolean)*: If true, returns settings in flat format. -** *`ignore_unavailable` (Optional, boolean)*: If false, requests that target a missing index return an error. -** *`include_defaults` (Optional, boolean)*: If true, return all default settings in the response. -** *`local` (Optional, boolean)*: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`features` (Optional, { name, description } | { name, description }[])*: Return only information on specified index features - -[discrete] -==== get_alias -Get aliases. -Retrieves information for one or more data stream or index aliases. - -{ref}/indices-aliases.html[Endpoint documentation] -[source,ts] ----- -client.indices.getAlias({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string | string[])*: List of aliases to retrieve. -Supports wildcards (`*`). -To retrieve all aliases, omit this parameter or use `*` or `_all`. -** *`index` (Optional, string | string[])*: List of data streams or indices used to limit the request. -Supports wildcards (`*`). -To target all data streams and indices, omit this parameter or use `*` or `_all`. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get_data_lifecycle -Get data stream lifecycles. -Retrieves the data stream lifecycle configuration of one or more data streams. - -{ref}/data-streams-get-lifecycle.html[Endpoint documentation] -[source,ts] ----- -client.indices.getDataLifecycle({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string | string[])*: List of data streams to limit the request. -Supports wildcards (`*`). -To target all data streams, omit this parameter or use `*` or `_all`. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`include_defaults` (Optional, boolean)*: If `true`, return all default settings in the response. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get_data_stream -Get data streams. -Retrieves information about one or more data streams. - -{ref}/data-streams.html[Endpoint documentation] -[source,ts] ----- -client.indices.getDataStream({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string | string[])*: List of data stream names used to limit the request. -Wildcard (`*`) expressions are supported. If omitted, all data streams are returned. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. -Supports a list of values, such as `open,hidden`. -** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`verbose` (Optional, boolean)*: Whether the maximum timestamp for each data stream should be calculated and returned. - -[discrete] -==== get_field_mapping -Get mapping definitions. -Retrieves mapping definitions for one or more fields. -For data streams, the API retrieves field mappings for the stream’s backing indices. - -{ref}/indices-get-field-mapping.html[Endpoint documentation] -[source,ts] ----- -client.indices.getFieldMapping({ fields }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`fields` (string | string[])*: List or wildcard expression of fields used to limit returned information. -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. -Supports wildcards (`*`). -To target all data streams and indices, omit this parameter or use `*` or `_all`. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`include_defaults` (Optional, boolean)*: If `true`, return all default settings in the response. -** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. - -[discrete] -==== get_index_template -Get index templates. -Returns information about one or more index templates. - -{ref}/indices-get-template.html[Endpoint documentation] -[source,ts] ----- -client.indices.getIndexTemplate({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string)*: List of index template names used to limit the request. Wildcard (*) expressions are supported. -** *`local` (Optional, boolean)*: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. -** *`flat_settings` (Optional, boolean)*: If true, returns settings in flat format. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template. - -[discrete] -==== get_mapping -Get mapping definitions. -Retrieves mapping definitions for one or more indices. -For data streams, the API retrieves mappings for the stream’s backing indices. - -{ref}/indices-get-mapping.html[Endpoint documentation] -[source,ts] ----- -client.indices.getMapping({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. -Supports wildcards (`*`). -To target all data streams and indices, omit this parameter or use `*` or `_all`. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get_settings -Get index settings. -Returns setting information for one or more indices. For data streams, -returns setting information for the stream’s backing indices. - -{ref}/indices-get-settings.html[Endpoint documentation] -[source,ts] ----- -client.indices.getSettings({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit -the request. Supports wildcards (`*`). To target all data streams and -indices, omit this parameter or use `*` or `_all`. -** *`name` (Optional, string | string[])*: List or wildcard expression of settings to retrieve. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index -alias, or `_all` value targets only missing or closed indices. This -behavior applies even if the request targets other open indices. For -example, a request targeting `foo*,bar*` returns an error if an index -starts with foo but no index starts with `bar`. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`include_defaults` (Optional, boolean)*: If `true`, return all default settings in the response. -** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. If -`false`, information is retrieved from the master node. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is -received before the timeout expires, the request fails and returns an -error. - -[discrete] -==== get_template -Get index templates. -Retrieves information about one or more index templates. - -{ref}/indices-get-template-v1.html[Endpoint documentation] -[source,ts] ----- -client.indices.getTemplate({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string | string[])*: List of index template names used to limit the request. -Wildcard (`*`) expressions are supported. -To return all index templates, omit this parameter or use a value of `_all` or `*`. -** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. -** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== migrate_to_data_stream -Convert an index alias to a data stream. -Converts an index alias to a data stream. -You must have a matching index template that is data stream enabled. -The alias must meet the following criteria: -The alias must have a write index; -All indices for the alias must have a `@timestamp` field mapping of a `date` or `date_nanos` field type; -The alias must not have any filters; -The alias must not use custom routing. -If successful, the request removes the alias and creates a data stream with the same name. -The indices for the alias become hidden backing indices for the stream. -The write index for the alias becomes the write index for the stream. - -{ref}/data-streams.html[Endpoint documentation] -[source,ts] ----- -client.indices.migrateToDataStream({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: Name of the index alias to convert to a data stream. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== modify_data_stream -Update data streams. -Performs one or more data stream modification actions in a single atomic operation. - -{ref}/data-streams.html[Endpoint documentation] -[source,ts] ----- -client.indices.modifyDataStream({ actions }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`actions` ({ add_backing_index, remove_backing_index }[])*: Actions to perform. - -[discrete] -==== open -Opens a closed index. -For data streams, the API opens any closed backing indices. - -{ref}/indices-open-close.html[Endpoint documentation] -[source,ts] ----- -client.indices.open({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: List of data streams, indices, and aliases used to limit the request. -Supports wildcards (`*`). -By default, you must explicitly name the indices you using to limit the request. -To limit a request using `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to false. -You can update this setting in the `elasticsearch.yml` file or using the cluster update settings API. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - -[discrete] -==== promote_data_stream -Promotes a data stream from a replicated data stream managed by CCR to a regular data stream - -{ref}/data-streams.html[Endpoint documentation] -[source,ts] ----- -client.indices.promoteDataStream({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the data stream -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== put_alias -Create or update an alias. -Adds a data stream or index to an alias. - -{ref}/indices-aliases.html[Endpoint documentation] -[source,ts] ----- -client.indices.putAlias({ index, name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: List of data streams or indices to add. -Supports wildcards (`*`). -Wildcard patterns that match both data streams and indices return an error. -** *`name` (string)*: Alias to update. -If the alias doesn’t exist, the request creates it. -Index alias names support date math. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query used to limit documents the alias can access. -** *`index_routing` (Optional, string)*: Value used to route indexing operations to a specific shard. -If specified, this overwrites the `routing` value for indexing operations. -Data stream aliases don’t support this parameter. -** *`is_write_index` (Optional, boolean)*: If `true`, sets the write index or data stream for the alias. -If an alias points to multiple indices or data streams and `is_write_index` isn’t set, the alias rejects write requests. -If an index alias points to one index and `is_write_index` isn’t set, the index automatically acts as the write index. -Data stream aliases don’t automatically set a write data stream, even if the alias points to one data stream. -** *`routing` (Optional, string)*: Value used to route indexing and search operations to a specific shard. -Data stream aliases don’t support this parameter. -** *`search_routing` (Optional, string)*: Value used to route search operations to a specific shard. -If specified, this overwrites the `routing` value for search operations. -Data stream aliases don’t support this parameter. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== put_data_lifecycle -Update data stream lifecycles. -Update the data stream lifecycle of the specified data streams. - -{ref}/data-streams-put-lifecycle.html[Endpoint documentation] -[source,ts] ----- -client.indices.putDataLifecycle({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string | string[])*: List of data streams used to limit the request. -Supports wildcards (`*`). -To target all data streams use `*` or `_all`. -** *`lifecycle` (Optional, { data_retention, downsampling, enabled })* -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `hidden`, `open`, `closed`, `none`. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is -received before the timeout expires, the request fails and returns an -error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== put_index_template -Create or update an index template. -Index templates define settings, mappings, and aliases that can be applied automatically to new indices. - -{ref}/indices-put-template.html[Endpoint documentation] -[source,ts] ----- -client.indices.putIndexTemplate({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: Index or template name -** *`index_patterns` (Optional, string | string[])*: Name of the index template to create. -** *`composed_of` (Optional, string[])*: An ordered list of component template names. -Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. -** *`template` (Optional, { aliases, mappings, settings, lifecycle })*: Template to be applied. -It may optionally include an `aliases`, `mappings`, or `settings` configuration. -** *`data_stream` (Optional, { hidden, allow_custom_routing })*: If this object is included, the template is used to create data streams and their backing indices. -Supports an empty object. -Data streams require a matching index template with a `data_stream` object. -** *`priority` (Optional, number)*: Priority to determine index template precedence when a new data stream or index is created. -The index template with the highest priority is chosen. -If no priority is specified the template is treated as though it is of priority 0 (lowest priority). -This number is not automatically generated by Elasticsearch. -** *`version` (Optional, number)*: Version number used to manage index templates externally. -This number is not automatically generated by Elasticsearch. -** *`_meta` (Optional, Record<string, User-defined value>)*: Optional user metadata about the index template. -May have any contents. -This map is not automatically generated by Elasticsearch. -** *`allow_auto_create` (Optional, boolean)*: This setting overrides the value of the `action.auto_create_index` cluster setting. -If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. -If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. -** *`ignore_missing_component_templates` (Optional, string[])*: The configuration option ignore_missing_component_templates can be used when an index template -references a component template that might not exist -** *`deprecated` (Optional, boolean)*: Marks this index template as deprecated. When creating or updating a non-deprecated index template -that uses deprecated components, Elasticsearch will emit a deprecation warning. -** *`create` (Optional, boolean)*: If `true`, this request cannot replace or update existing index templates. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`cause` (Optional, string)*: User defined reason for creating/updating the index template - -[discrete] -==== put_mapping -Update field mappings. -Adds new fields to an existing data stream or index. -You can also use this API to change the search settings of existing fields. -For data streams, these changes are applied to all backing indices by default. - -{ref}/indices-put-mapping.html[Endpoint documentation] -[source,ts] ----- -client.indices.putMapping({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: A list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. -** *`date_detection` (Optional, boolean)*: Controls whether dynamic date detection is enabled. -** *`dynamic` (Optional, Enum("strict" | "runtime" | true | false))*: Controls whether new fields are added dynamically. -** *`dynamic_date_formats` (Optional, string[])*: If date detection is enabled then new string fields are checked -against 'dynamic_date_formats' and if the value matches then -a new date field is added instead of string. -** *`dynamic_templates` (Optional, Record<string, { mapping, runtime, match, path_match, unmatch, path_unmatch, match_mapping_type, unmatch_mapping_type, match_pattern }> | Record<string, { mapping, runtime, match, path_match, unmatch, path_unmatch, match_mapping_type, unmatch_mapping_type, match_pattern }>[])*: Specify dynamic templates for the mapping. -** *`_field_names` (Optional, { enabled })*: Control whether field names are enabled for the index. -** *`_meta` (Optional, Record<string, User-defined value>)*: A mapping type can have custom meta data associated with it. These are -not used at all by Elasticsearch, but can be used to store -application-specific metadata. -** *`numeric_detection` (Optional, boolean)*: Automatically map strings into numeric data types for all fields. -** *`properties` (Optional, Record<string, { type } | { boost, fielddata, index, null_value, type } | { type, enabled, null_value, boost, coerce, script, on_script_error, ignore_malformed, time_series_metric, analyzer, eager_global_ordinals, index, index_options, index_phrases, index_prefixes, norms, position_increment_gap, search_analyzer, search_quote_analyzer, term_vector, format, precision_step, locale } | { relations, eager_global_ordinals, type } | { boost, eager_global_ordinals, index, index_options, script, on_script_error, normalizer, norms, null_value, similarity, split_queries_on_whitespace, time_series_dimension, type } | { type, fields, meta, copy_to } | { type } | { positive_score_impact, type } | { positive_score_impact, type } | { analyzer, index, index_options, max_shingle_size, norms, search_analyzer, search_quote_analyzer, similarity, term_vector, type } | { analyzer, boost, eager_global_ordinals, fielddata, fielddata_frequency_filter, index, index_options, index_phrases, index_prefixes, norms, position_increment_gap, search_analyzer, search_quote_analyzer, similarity, term_vector, type } | { type } | { type, null_value } | { boost, format, ignore_malformed, index, null_value, precision_step, type } | { boost, fielddata, format, ignore_malformed, index, null_value, precision_step, locale, type } | { type, default_metric, metrics, time_series_metric } | { type, dims, element_type, index, index_options, similarity } | { boost, depth_limit, doc_values, eager_global_ordinals, index, index_options, null_value, similarity, split_queries_on_whitespace, type } | { enabled, include_in_parent, include_in_root, type } | { enabled, subobjects, type } | { type, meta, inference_id } | { type } | { analyzer, contexts, max_input_length, preserve_position_increments, preserve_separators, search_analyzer, type } | { value, type } | { path, type } | { ignore_malformed, type } | { boost, index, ignore_malformed, null_value, on_script_error, script, time_series_dimension, type } | { type } | { analyzer, boost, index, null_value, enable_position_increments, type } | { ignore_malformed, ignore_z_value, null_value, index, on_script_error, script, type } | { coerce, ignore_malformed, ignore_z_value, orientation, strategy, type } | { ignore_malformed, ignore_z_value, null_value, type } | { coerce, ignore_malformed, ignore_z_value, orientation, type } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value } | { type, null_value, scaling_factor } | { type, null_value } | { type, null_value } | { format, type } | { type } | { type } | { type } | { type } | { type } | { type, norms, index_options, index, null_value, rules, language, country, variant, strength, decomposition, alternate, case_level, case_first, numeric, variable_top, hiragana_quaternary_mode }>)*: Mapping for a field. For new fields, this mapping can include: - -- Field name -- Field data type -- Mapping parameters -** *`_routing` (Optional, { required })*: Enable making a routing value required on indexed documents. -** *`_source` (Optional, { compress, compress_threshold, enabled, excludes, includes, mode })*: Control whether the _source field is enabled on the index. -** *`runtime` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Mapping of runtime fields for the index. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. -** *`write_index_only` (Optional, boolean)*: If `true`, the mappings are applied only to the current write index for the target. - -[discrete] -==== put_settings -Update index settings. -Changes dynamic index settings in real time. For data streams, index setting -changes are applied to all backing indices by default. - -{ref}/indices-update-settings.html[Endpoint documentation] -[source,ts] ----- -client.indices.putSettings({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit -the request. Supports wildcards (`*`). To target all data streams and -indices, omit this parameter or use `*` or `_all`. -** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })* -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index -alias, or `_all` value targets only missing or closed indices. This -behavior applies even if the request targets other open indices. For -example, a request targeting `foo*,bar*` returns an error if an index -starts with `foo` but no index starts with `bar`. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target -data streams, this argument determines whether wildcard expressions match -hidden data streams. Supports a list of values, such as -`open,hidden`. -** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. -** *`ignore_unavailable` (Optional, boolean)*: If `true`, returns settings in flat format. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is -received before the timeout expires, the request fails and returns an -error. -** *`preserve_existing` (Optional, boolean)*: If `true`, existing index settings remain unchanged. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the - timeout expires, the request fails and returns an error. - -[discrete] -==== put_template -Create or update an index template. -Index templates define settings, mappings, and aliases that can be applied automatically to new indices. - -{ref}/indices-templates-v1.html[Endpoint documentation] -[source,ts] ----- -client.indices.putTemplate({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the template -** *`aliases` (Optional, Record<string, { filter, index_routing, is_hidden, is_write_index, routing, search_routing }>)*: Aliases for the index. -** *`index_patterns` (Optional, string | string[])*: Array of wildcard expressions used to match the names -of indices during creation. -** *`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })*: Mapping for fields in the index. -** *`order` (Optional, number)*: Order in which Elasticsearch applies this template if index -matches multiple templates. - -Templates with lower 'order' values are merged first. Templates with higher -'order' values are merged later, overriding templates with lower values. -** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })*: Configuration options for the index. -** *`version` (Optional, number)*: Version number used to manage index templates externally. This number -is not automatically generated by Elasticsearch. -** *`create` (Optional, boolean)*: If true, this request cannot replace or update existing index templates. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is -received before the timeout expires, the request fails and returns an error. -** *`cause` (Optional, string)* - -[discrete] -==== recovery -Returns information about ongoing and completed shard recoveries for one or more indices. -For data streams, the API returns information for the stream’s backing indices. - -{ref}/indices-recovery.html[Endpoint documentation] -[source,ts] ----- -client.indices.recovery({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. -Supports wildcards (`*`). -To target all data streams and indices, omit this parameter or use `*` or `_all`. -** *`active_only` (Optional, boolean)*: If `true`, the response only includes ongoing shard recoveries. -** *`detailed` (Optional, boolean)*: If `true`, the response includes detailed information about shard recoveries. - -[discrete] -==== refresh -Refresh an index. -A refresh makes recent operations performed on one or more indices available for search. -For data streams, the API runs the refresh operation on the stream’s backing indices. - -{ref}/indices-refresh.html[Endpoint documentation] -[source,ts] ----- -client.indices.refresh({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. -Supports wildcards (`*`). -To target all data streams and indices, omit this parameter or use `*` or `_all`. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. - -[discrete] -==== reload_search_analyzers -Reloads an index's search analyzers and their resources. - -{ref}/indices-reload-analyzers.html[Endpoint documentation] -[source,ts] ----- -client.indices.reloadSearchAnalyzers({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: A list of index names to reload analyzers for -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) - -[discrete] -==== resolve_cluster -Resolves the specified index expressions to return information about each cluster, including -the local cluster, if included. -Multiple patterns and remote clusters are supported. - -{ref}/indices-resolve-cluster-api.html[Endpoint documentation] -[source,ts] ----- -client.indices.resolveCluster({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string | string[])*: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. -Resources on remote clusters can be specified using the `<cluster>`:`<name>` syntax. -** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing -or closed indices. This behavior applies even if the request targets other open indices. For example, a request -targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_throttled` (Optional, boolean)*: If true, concrete, expanded or aliased indices are ignored when frozen. Defaults to false. -** *`ignore_unavailable` (Optional, boolean)*: If false, the request returns an error if it targets a missing or closed index. Defaults to false. - -[discrete] -==== resolve_index -Resolve indices. -Resolve the names and/or index patterns for indices, aliases, and data streams. -Multiple patterns and remote clusters are supported. - -{ref}/indices-resolve-index-api.html[Endpoint documentation] -[source,ts] ----- -client.indices.resolveIndex({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string | string[])*: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. -Resources on remote clusters can be specified using the `<cluster>`:`<name>` syntax. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - -[discrete] -==== rollover -Roll over to a new index. -Creates a new index for a data stream or index alias. - -{ref}/indices-rollover-index.html[Endpoint documentation] -[source,ts] ----- -client.indices.rollover({ alias }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`alias` (string)*: Name of the data stream or index alias to roll over. -** *`new_index` (Optional, string)*: Name of the index to create. -Supports date math. -Data streams do not support this parameter. -** *`aliases` (Optional, Record<string, { filter, index_routing, is_hidden, is_write_index, routing, search_routing }>)*: Aliases for the target index. -Data streams do not support this parameter. -** *`conditions` (Optional, { min_age, max_age, max_age_millis, min_docs, max_docs, max_size, max_size_bytes, min_size, min_size_bytes, max_primary_shard_size, max_primary_shard_size_bytes, min_primary_shard_size, min_primary_shard_size_bytes, max_primary_shard_docs, min_primary_shard_docs })*: Conditions for the rollover. -If specified, Elasticsearch only performs the rollover if the current index satisfies these conditions. -If this parameter is not specified, Elasticsearch performs the rollover unconditionally. -If conditions are specified, at least one of them must be a `max_*` condition. -The index will rollover if any `max_*` condition is satisfied and all `min_*` conditions are satisfied. -** *`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })*: Mapping for fields in the index. -If specified, this mapping can include field names, field data types, and mapping paramaters. -** *`settings` (Optional, Record<string, User-defined value>)*: Configuration options for the index. -Data streams do not support this parameter. -** *`dry_run` (Optional, boolean)*: If `true`, checks whether the current index satisfies the specified conditions but does not perform a rollover. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - -[discrete] -==== segments -Returns low-level information about the Lucene segments in index shards. -For data streams, the API returns information about the stream’s backing indices. - -{ref}/indices-segments.html[Endpoint documentation] -[source,ts] ----- -client.indices.segments({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. -Supports wildcards (`*`). -To target all data streams and indices, omit this parameter or use `*` or `_all`. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. - -[discrete] -==== shard_stores -Retrieves store information about replica shards in one or more indices. -For data streams, the API retrieves store information for the stream’s backing indices. - -{ref}/indices-shards-stores.html[Endpoint documentation] -[source,ts] ----- -client.indices.shardStores({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. -** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or _all -value targets only missing or closed indices. This behavior applies even if the request -targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, -this argument determines whether wildcard expressions match hidden data streams. -** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. -** *`status` (Optional, Enum("green" | "yellow" | "red" | "all") | Enum("green" | "yellow" | "red" | "all")[])*: List of shard health statuses used to limit the request. - -[discrete] -==== shrink -Shrinks an existing index into a new index with fewer primary shards. - -{ref}/indices-shrink-index.html[Endpoint documentation] -[source,ts] ----- -client.indices.shrink({ index, target }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: Name of the source index to shrink. -** *`target` (string)*: Name of the target index to create. -** *`aliases` (Optional, Record<string, { filter, index_routing, is_hidden, is_write_index, routing, search_routing }>)*: The key is the alias name. -Index alias names support date math. -** *`settings` (Optional, Record<string, User-defined value>)*: Configuration options for the target index. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - -[discrete] -==== simulate_index_template -Simulate an index. -Returns the index configuration that would be applied to the specified index from an existing index template. - -{ref}/indices-simulate-index.html[Endpoint documentation] -[source,ts] ----- -client.indices.simulateIndexTemplate({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: Name of the index to simulate -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template. - -[discrete] -==== simulate_template -Simulate an index template. -Returns the index configuration that would be applied by a particular index template. - -{ref}/indices-simulate-template.html[Endpoint documentation] -[source,ts] ----- -client.indices.simulateTemplate({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string)*: Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit -this parameter and specify the template configuration in the request body. -** *`allow_auto_create` (Optional, boolean)*: This setting overrides the value of the `action.auto_create_index` cluster setting. -If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. -If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. -** *`index_patterns` (Optional, string | string[])*: Array of wildcard (`*`) expressions used to match the names of data streams and indices during creation. -** *`composed_of` (Optional, string[])*: An ordered list of component template names. -Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. -** *`template` (Optional, { aliases, mappings, settings, lifecycle })*: Template to be applied. -It may optionally include an `aliases`, `mappings`, or `settings` configuration. -** *`data_stream` (Optional, { hidden, allow_custom_routing })*: If this object is included, the template is used to create data streams and their backing indices. -Supports an empty object. -Data streams require a matching index template with a `data_stream` object. -** *`priority` (Optional, number)*: Priority to determine index template precedence when a new data stream or index is created. -The index template with the highest priority is chosen. -If no priority is specified the template is treated as though it is of priority 0 (lowest priority). -This number is not automatically generated by Elasticsearch. -** *`version` (Optional, number)*: Version number used to manage index templates externally. -This number is not automatically generated by Elasticsearch. -** *`_meta` (Optional, Record<string, User-defined value>)*: Optional user metadata about the index template. -May have any contents. -This map is not automatically generated by Elasticsearch. -** *`ignore_missing_component_templates` (Optional, string[])*: The configuration option ignore_missing_component_templates can be used when an index template -references a component template that might not exist -** *`deprecated` (Optional, boolean)*: Marks this index template as deprecated. When creating or updating a non-deprecated index template -that uses deprecated components, Elasticsearch will emit a deprecation warning. -** *`create` (Optional, boolean)*: If true, the template passed in the body is only used if no existing templates match the same index patterns. If false, the simulation uses the template with the highest priority. Note that the template is not permanently added or updated in either case; it is only used for the simulation. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template. - -[discrete] -==== split -Splits an existing index into a new index with more primary shards. - -{ref}/indices-split-index.html[Endpoint documentation] -[source,ts] ----- -client.indices.split({ index, target }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: Name of the source index to split. -** *`target` (string)*: Name of the target index to create. -** *`aliases` (Optional, Record<string, { filter, index_routing, is_hidden, is_write_index, routing, search_routing }>)*: Aliases for the resulting index. -** *`settings` (Optional, Record<string, User-defined value>)*: Configuration options for the target index. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. -Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - -[discrete] -==== stats -Returns statistics for one or more indices. -For data streams, the API retrieves statistics for the stream’s backing indices. - -{ref}/indices-stats.html[Endpoint documentation] -[source,ts] ----- -client.indices.stats({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`metric` (Optional, string | string[])*: Limit the information returned the specific metrics. -** *`index` (Optional, string | string[])*: A list of index names; use `_all` or empty string to perform the operation on all indices -** *`completion_fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in fielddata and suggest statistics. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument -determines whether wildcard expressions match hidden data streams. Supports a list of values, -such as `open,hidden`. -** *`fielddata_fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in fielddata statistics. -** *`fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in the statistics. -** *`forbid_closed_indices` (Optional, boolean)*: If true, statistics are not collected from closed indices. -** *`groups` (Optional, string | string[])*: List of search groups to include in the search statistics. -** *`include_segment_file_sizes` (Optional, boolean)*: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). -** *`include_unloaded_segments` (Optional, boolean)*: If true, the response includes information from segments that are not loaded into memory. -** *`level` (Optional, Enum("cluster" | "indices" | "shards"))*: Indicates whether statistics are aggregated at the cluster, index, or shard level. - -[discrete] -==== unfreeze -Unfreezes an index. - -{ref}/unfreeze-index-api.html[Endpoint documentation] -[source,ts] ----- -client.indices.unfreeze({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string)*: Identifier for the index. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_active_shards` (Optional, string)*: The number of shard copies that must be active before proceeding with the operation. -Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). - -[discrete] -==== update_aliases -Create or update an alias. -Adds a data stream or index to an alias. - -{ref}/indices-aliases.html[Endpoint documentation] -[source,ts] ----- -client.indices.updateAliases({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`actions` (Optional, { add_backing_index, remove_backing_index }[])*: Actions to perform. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== validate_query -Validate a query. -Validates a query without running it. - -{ref}/search-validate.html[Endpoint documentation] -[source,ts] ----- -client.indices.validateQuery({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. -Supports wildcards (`*`). -To search all data streams or indices, omit this parameter or use `*` or `_all`. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query in the Lucene query string syntax. -** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. -This behavior applies even if the request targets other open indices. -** *`all_shards` (Optional, boolean)*: If `true`, the validation is executed on all shards instead of one random shard per index. -** *`analyzer` (Optional, string)*: Analyzer to use for the query string. -This parameter can only be used when the `q` query string parameter is specified. -** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. -** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. -** *`df` (Optional, string)*: Field to use as default where no field prefix is given in the query string. -This parameter can only be used when the `q` query string parameter is specified. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. -If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. -Supports a list of values, such as `open,hidden`. -Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`explain` (Optional, boolean)*: If `true`, the response returns detailed information if an error has occurred. -** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. -** *`rewrite` (Optional, boolean)*: If `true`, returns a more detailed explanation showing the actual Lucene query that will be executed. -** *`q` (Optional, string)*: Query in the Lucene query string syntax. - -[discrete] -=== inference -[discrete] -==== delete -Delete an inference endpoint - -{ref}/delete-inference-api.html[Endpoint documentation] -[source,ts] ----- -client.inference.delete({ inference_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`inference_id` (string)*: The inference Id -** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The task type -** *`dry_run` (Optional, boolean)*: When true, the endpoint is not deleted, and a list of ingest processors which reference this endpoint is returned -** *`force` (Optional, boolean)*: When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields - -[discrete] -==== get -Get an inference endpoint - -{ref}/get-inference-api.html[Endpoint documentation] -[source,ts] ----- -client.inference.get({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The task type -** *`inference_id` (Optional, string)*: The inference Id - -[discrete] -==== inference -Perform inference on the service - -{ref}/post-inference-api.html[Endpoint documentation] -[source,ts] ----- -client.inference.inference({ inference_id, input }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`inference_id` (string)*: The inference Id -** *`input` (string | string[])*: Inference input. -Either a string or an array of strings. -** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The task type -** *`query` (Optional, string)*: Query input, required for rerank task. -Not required for other tasks. -** *`task_settings` (Optional, User-defined value)*: Optional task settings -** *`timeout` (Optional, string | -1 | 0)*: Specifies the amount of time to wait for the inference request to complete. - -[discrete] -==== put -Create an inference endpoint - -{ref}/put-inference-api.html[Endpoint documentation] -[source,ts] ----- -client.inference.put({ inference_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`inference_id` (string)*: The inference Id -** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The task type -** *`inference_config` (Optional, { service, service_settings, task_settings })* - -[discrete] -==== stream_inference -Perform streaming inference -[source,ts] ----- -client.inference.streamInference() ----- - - -[discrete] -=== ingest -[discrete] -==== delete_geoip_database -Delete GeoIP database configurations. -Delete one or more IP geolocation database configurations. - -{ref}/delete-geoip-database-api.html[Endpoint documentation] -[source,ts] ----- -client.ingest.deleteGeoipDatabase({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string | string[])*: A list of geoip database configurations to delete -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== delete_ip_location_database -Deletes an ip location database configuration - -{ref}/delete-ip-location-database-api.html[Endpoint documentation] -[source,ts] ----- -client.ingest.deleteIpLocationDatabase() ----- - - -[discrete] -==== delete_pipeline -Delete pipelines. -Delete one or more ingest pipelines. - -{ref}/delete-pipeline-api.html[Endpoint documentation] -[source,ts] ----- -client.ingest.deletePipeline({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Pipeline ID or wildcard expression of pipeline IDs used to limit the request. -To delete all ingest pipelines in a cluster, use a value of `*`. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== geo_ip_stats -Get GeoIP statistics. -Get download statistics for GeoIP2 databases that are used with the GeoIP processor. - -{ref}/geoip-processor.html[Endpoint documentation] -[source,ts] ----- -client.ingest.geoIpStats() ----- - - -[discrete] -==== get_geoip_database -Get GeoIP database configurations. -Get information about one or more IP geolocation database configurations. - -{ref}/get-geoip-database-api.html[Endpoint documentation] -[source,ts] ----- -client.ingest.getGeoipDatabase({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string | string[])*: List of database configuration IDs to retrieve. -Wildcard (`*`) expressions are supported. -To get all database configurations, omit this parameter or use `*`. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get_ip_location_database -Returns the specified ip location database configuration - -{ref}/get-ip-location-database-api.html[Endpoint documentation] -[source,ts] ----- -client.ingest.getIpLocationDatabase() ----- - - -[discrete] -==== get_pipeline -Get pipelines. -Get information about one or more ingest pipelines. -This API returns a local reference of the pipeline. - -{ref}/get-pipeline-api.html[Endpoint documentation] -[source,ts] ----- -client.ingest.getPipeline({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string)*: List of pipeline IDs to retrieve. -Wildcard (`*`) expressions are supported. -To get all ingest pipelines, omit this parameter or use `*`. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`summary` (Optional, boolean)*: Return pipelines without their definitions (default: false) - -[discrete] -==== processor_grok -Run a grok processor. -Extract structured fields out of a single text field within a document. -You must choose which field to extract matched fields from, as well as the grok pattern you expect will match. -A grok pattern is like a regular expression that supports aliased expressions that can be reused. - -{ref}/grok-processor.html[Endpoint documentation] -[source,ts] ----- -client.ingest.processorGrok() ----- - - -[discrete] -==== put_geoip_database -Create or update GeoIP database configurations. -Create or update IP geolocation database configurations. - -{ref}/put-geoip-database-api.html[Endpoint documentation] -[source,ts] ----- -client.ingest.putGeoipDatabase({ id, name, maxmind }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: ID of the database configuration to create or update. -** *`name` (string)*: The provider-assigned name of the IP geolocation database to download. -** *`maxmind` ({ account_id })*: The configuration necessary to identify which IP geolocation provider to use to download the database, as well as any provider-specific configuration necessary for such downloading. -At present, the only supported provider is maxmind, and the maxmind provider requires that an account_id (string) is configured. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== put_ip_location_database -Puts the configuration for a ip location database to be downloaded - -{ref}/put-ip-location-database-api.html[Endpoint documentation] -[source,ts] ----- -client.ingest.putIpLocationDatabase() ----- - - -[discrete] -==== put_pipeline -Create or update a pipeline. -Changes made using this API take effect immediately. - -{ref}/ingest.html[Endpoint documentation] -[source,ts] ----- -client.ingest.putPipeline({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: ID of the ingest pipeline to create or update. -** *`_meta` (Optional, Record<string, User-defined value>)*: Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch. -** *`description` (Optional, string)*: Description of the ingest pipeline. -** *`on_failure` (Optional, { append, attachment, bytes, circle, community_id, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, fingerprint, foreach, ip_location, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, network_direction, pipeline, redact, registered_domain, remove, rename, reroute, script, set, set_security_user, sort, split, terminate, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors. -** *`processors` (Optional, { append, attachment, bytes, circle, community_id, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, fingerprint, foreach, ip_location, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, network_direction, pipeline, redact, registered_domain, remove, rename, reroute, script, set, set_security_user, sort, split, terminate, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. -** *`version` (Optional, number)*: Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. -** *`deprecated` (Optional, boolean)*: Marks this ingest pipeline as deprecated. -When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -** *`if_version` (Optional, number)*: Required version for optimistic concurrency control for pipeline updates - -[discrete] -==== simulate -Simulate a pipeline. -Run an ingest pipeline against a set of provided documents. -You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request. - -{ref}/simulate-pipeline-api.html[Endpoint documentation] -[source,ts] ----- -client.ingest.simulate({ docs }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`docs` ({ _id, _index, _source }[])*: Sample documents to test in the pipeline. -** *`id` (Optional, string)*: Pipeline to test. -If you don’t specify a `pipeline` in the request body, this parameter is required. -** *`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })*: Pipeline to test. -If you don’t specify the `pipeline` request path parameter, this parameter is required. -If you specify both this and the request path parameter, the API only uses the request path parameter. -** *`verbose` (Optional, boolean)*: If `true`, the response includes output data for each processor in the executed pipeline. - -[discrete] -=== license -[discrete] -==== delete -Deletes licensing information for the cluster - -{ref}/delete-license.html[Endpoint documentation] -[source,ts] ----- -client.license.delete() ----- - - -[discrete] -==== get -Get license information. -Returns information about your Elastic license, including its type, its status, when it was issued, and when it expires. -For more information about the different types of licenses, refer to [Elastic Stack subscriptions](https://www.elastic.co/subscriptions). - -{ref}/get-license.html[Endpoint documentation] -[source,ts] ----- -client.license.get({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`accept_enterprise` (Optional, boolean)*: If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility. -This parameter is deprecated and will always be set to true in 8.x. -** *`local` (Optional, boolean)*: Specifies whether to retrieve local information. The default value is `false`, which means the information is retrieved from the master node. - -[discrete] -==== get_basic_status -Retrieves information about the status of the basic license. - -{ref}/get-basic-status.html[Endpoint documentation] -[source,ts] ----- -client.license.getBasicStatus() ----- - - -[discrete] -==== get_trial_status -Retrieves information about the status of the trial license. - -{ref}/get-trial-status.html[Endpoint documentation] -[source,ts] ----- -client.license.getTrialStatus() ----- - - -[discrete] -==== post -Updates the license for the cluster. - -{ref}/update-license.html[Endpoint documentation] -[source,ts] ----- -client.license.post({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`license` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid })* -** *`licenses` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid }[])*: A sequence of one or more JSON documents containing the license information. -** *`acknowledge` (Optional, boolean)*: Specifies whether you acknowledge the license changes. - -[discrete] -==== post_start_basic -The start basic API enables you to initiate an indefinite basic license, which gives access to all the basic features. If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true. -To check the status of your basic license, use the following API: [Get basic status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html). - -{ref}/start-basic.html[Endpoint documentation] -[source,ts] ----- -client.license.postStartBasic({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`acknowledge` (Optional, boolean)*: whether the user has acknowledged acknowledge messages (default: false) - -[discrete] -==== post_start_trial -The start trial API enables you to start a 30-day trial, which gives access to all subscription features. - -{ref}/start-trial.html[Endpoint documentation] -[source,ts] ----- -client.license.postStartTrial({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`acknowledge` (Optional, boolean)*: whether the user has acknowledged acknowledge messages (default: false) -** *`type_query_string` (Optional, string)* - -[discrete] -=== logstash -[discrete] -==== delete_pipeline -Deletes a pipeline used for Logstash Central Management. - -{ref}/logstash-api-delete-pipeline.html[Endpoint documentation] -[source,ts] ----- -client.logstash.deletePipeline({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the pipeline. - -[discrete] -==== get_pipeline -Retrieves pipelines used for Logstash Central Management. - -{ref}/logstash-api-get-pipeline.html[Endpoint documentation] -[source,ts] ----- -client.logstash.getPipeline({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string | string[])*: List of pipeline identifiers. - -[discrete] -==== put_pipeline -Creates or updates a pipeline used for Logstash Central Management. - -{ref}/logstash-api-put-pipeline.html[Endpoint documentation] -[source,ts] ----- -client.logstash.putPipeline({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the pipeline. -** *`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })* - -[discrete] -=== migration -[discrete] -==== deprecations -Retrieves information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. - -{ref}/migration-api-deprecation.html[Endpoint documentation] -[source,ts] ----- -client.migration.deprecations({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string)*: Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported. - -[discrete] -==== get_feature_upgrade_status -Find out whether system features need to be upgraded or not - -{ref}/migration-api-feature-upgrade.html[Endpoint documentation] -[source,ts] ----- -client.migration.getFeatureUpgradeStatus() ----- - - -[discrete] -==== post_feature_upgrade -Begin upgrades for system features - -{ref}/migration-api-feature-upgrade.html[Endpoint documentation] -[source,ts] ----- -client.migration.postFeatureUpgrade() ----- - - -[discrete] -=== ml -[discrete] -==== clear_trained_model_deployment_cache -Clear trained model deployment cache. -Cache will be cleared on all nodes where the trained model is assigned. -A trained model deployment may have an inference cache enabled. -As requests are handled by each allocated node, their responses may be cached on that individual node. -Calling this API clears the caches without restarting the deployment. - -{ref}/clear-trained-model-deployment-cache.html[Endpoint documentation] -[source,ts] ----- -client.ml.clearTrainedModelDeploymentCache({ model_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_id` (string)*: The unique identifier of the trained model. - -[discrete] -==== close_job -Close anomaly detection jobs. -A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. -When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. -If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. -When a datafeed that has a specified end date stops, it automatically closes its associated job. - -{ref}/ml-close-job.html[Endpoint documentation] -[source,ts] ----- -client.ml.closeJob({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the job identifier. -** *`allow_no_match` (Optional, boolean)*: Refer to the description for the `allow_no_match` query parameter. -** *`force` (Optional, boolean)*: Refer to the descriptiion for the `force` query parameter. -** *`timeout` (Optional, string | -1 | 0)*: Refer to the description for the `timeout` query parameter. - -[discrete] -==== delete_calendar -Delete a calendar. -Removes all scheduled events from a calendar, then deletes it. - -{ref}/ml-delete-calendar.html[Endpoint documentation] -[source,ts] ----- -client.ml.deleteCalendar({ calendar_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`calendar_id` (string)*: A string that uniquely identifies a calendar. - -[discrete] -==== delete_calendar_event -Delete events from a calendar. - -{ref}/ml-delete-calendar-event.html[Endpoint documentation] -[source,ts] ----- -client.ml.deleteCalendarEvent({ calendar_id, event_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`calendar_id` (string)*: A string that uniquely identifies a calendar. -** *`event_id` (string)*: Identifier for the scheduled event. -You can obtain this identifier by using the get calendar events API. - -[discrete] -==== delete_calendar_job -Delete anomaly jobs from a calendar. - -{ref}/ml-delete-calendar-job.html[Endpoint documentation] -[source,ts] ----- -client.ml.deleteCalendarJob({ calendar_id, job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`calendar_id` (string)*: A string that uniquely identifies a calendar. -** *`job_id` (string | string[])*: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a -list of jobs or groups. - -[discrete] -==== delete_data_frame_analytics -Delete a data frame analytics job. - -{ref}/delete-dfanalytics.html[Endpoint documentation] -[source,ts] ----- -client.ml.deleteDataFrameAnalytics({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the data frame analytics job. -** *`force` (Optional, boolean)*: If `true`, it deletes a job that is not stopped; this method is quicker than stopping and deleting the job. -** *`timeout` (Optional, string | -1 | 0)*: The time to wait for the job to be deleted. - -[discrete] -==== delete_datafeed -Delete a datafeed. - -{ref}/ml-delete-datafeed.html[Endpoint documentation] -[source,ts] ----- -client.ml.deleteDatafeed({ datafeed_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`datafeed_id` (string)*: A numerical character string that uniquely identifies the datafeed. This -identifier can contain lowercase alphanumeric characters (a-z and 0-9), -hyphens, and underscores. It must start and end with alphanumeric -characters. -** *`force` (Optional, boolean)*: Use to forcefully delete a started datafeed; this method is quicker than -stopping and deleting the datafeed. - -[discrete] -==== delete_expired_data -Delete expired ML data. -Deletes all job results, model snapshots and forecast data that have exceeded -their retention days period. Machine learning state documents that are not -associated with any job are also deleted. -You can limit the request to a single or set of anomaly detection jobs by -using a job identifier, a group name, a comma-separated list of jobs, or a -wildcard expression. You can delete expired data for all anomaly detection -jobs by using _all, by specifying * as the <job_id>, or by omitting the -<job_id>. - -{ref}/ml-delete-expired-data.html[Endpoint documentation] -[source,ts] ----- -client.ml.deleteExpiredData({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (Optional, string)*: Identifier for an anomaly detection job. It can be a job identifier, a -group name, or a wildcard expression. -** *`requests_per_second` (Optional, float)*: The desired requests per second for the deletion processes. The default -behavior is no throttling. -** *`timeout` (Optional, string | -1 | 0)*: How long can the underlying delete processes run until they are canceled. - -[discrete] -==== delete_filter -Delete a filter. -If an anomaly detection job references the filter, you cannot delete the -filter. You must update or delete the job before you can delete the filter. - -{ref}/ml-delete-filter.html[Endpoint documentation] -[source,ts] ----- -client.ml.deleteFilter({ filter_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`filter_id` (string)*: A string that uniquely identifies a filter. - -[discrete] -==== delete_forecast -Delete forecasts from a job. -By default, forecasts are retained for 14 days. You can specify a -different retention period with the `expires_in` parameter in the forecast -jobs API. The delete forecast API enables you to delete one or more -forecasts before they expire. - -{ref}/ml-delete-forecast.html[Endpoint documentation] -[source,ts] ----- -client.ml.deleteForecast({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`forecast_id` (Optional, string)*: A list of forecast identifiers. If you do not specify -this optional parameter or if you specify `_all` or `*` the API deletes -all forecasts from the job. -** *`allow_no_forecasts` (Optional, boolean)*: Specifies whether an error occurs when there are no forecasts. In -particular, if this parameter is set to `false` and there are no -forecasts associated with the job, attempts to delete all forecasts -return an error. -** *`timeout` (Optional, string | -1 | 0)*: Specifies the period of time to wait for the completion of the delete -operation. When this period of time elapses, the API fails and returns an -error. - -[discrete] -==== delete_job -Delete an anomaly detection job. -All job configuration, model state and results are deleted. -It is not currently possible to delete multiple jobs using wildcards or a -comma separated list. If you delete a job that has a datafeed, the request -first tries to delete the datafeed. This behavior is equivalent to calling -the delete datafeed API with the same timeout and force parameters as the -delete job request. - -{ref}/ml-delete-job.html[Endpoint documentation] -[source,ts] ----- -client.ml.deleteJob({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`force` (Optional, boolean)*: Use to forcefully delete an opened job; this method is quicker than -closing and deleting the job. -** *`delete_user_annotations` (Optional, boolean)*: Specifies whether annotations that have been added by the -user should be deleted along with any auto-generated annotations when the job is -reset. -** *`wait_for_completion` (Optional, boolean)*: Specifies whether the request should return immediately or wait until the -job deletion completes. - -[discrete] -==== delete_model_snapshot -Delete a model snapshot. -You cannot delete the active model snapshot. To delete that snapshot, first -revert to a different one. To identify the active model snapshot, refer to -the `model_snapshot_id` in the results from the get jobs API. - -{ref}/ml-delete-snapshot.html[Endpoint documentation] -[source,ts] ----- -client.ml.deleteModelSnapshot({ job_id, snapshot_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`snapshot_id` (string)*: Identifier for the model snapshot. - -[discrete] -==== delete_trained_model -Delete an unreferenced trained model. -The request deletes a trained inference model that is not referenced by an ingest pipeline. - -{ref}/delete-trained-models.html[Endpoint documentation] -[source,ts] ----- -client.ml.deleteTrainedModel({ model_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_id` (string)*: The unique identifier of the trained model. -** *`force` (Optional, boolean)*: Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment. - -[discrete] -==== delete_trained_model_alias -Delete a trained model alias. -This API deletes an existing model alias that refers to a trained model. If -the model alias is missing or refers to a model other than the one identified -by the `model_id`, this API returns an error. - -{ref}/delete-trained-models-aliases.html[Endpoint documentation] -[source,ts] ----- -client.ml.deleteTrainedModelAlias({ model_alias, model_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_alias` (string)*: The model alias to delete. -** *`model_id` (string)*: The trained model ID to which the model alias refers. - -[discrete] -==== estimate_model_memory -Estimate job model memory usage. -Makes an estimation of the memory usage for an anomaly detection job model. -It is based on analysis configuration details for the job and cardinality -estimates for the fields it references. - -{ref}/ml-apis.html[Endpoint documentation] -[source,ts] ----- -client.ml.estimateModelMemory({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`analysis_config` (Optional, { bucket_span, categorization_analyzer, categorization_field_name, categorization_filters, detectors, influencers, latency, model_prune_window, multivariate_by_fields, per_partition_categorization, summary_count_field_name })*: For a list of the properties that you can specify in the -`analysis_config` component of the body of this API. -** *`max_bucket_cardinality` (Optional, Record<string, number>)*: Estimates of the highest cardinality in a single bucket that is observed -for influencer fields over the time period that the job analyzes data. -To produce a good answer, values must be provided for all influencer -fields. Providing values for fields that are not listed as `influencers` -has no effect on the estimation. -** *`overall_cardinality` (Optional, Record<string, number>)*: Estimates of the cardinality that is observed for fields over the whole -time period that the job analyzes data. To produce a good answer, values -must be provided for fields referenced in the `by_field_name`, -`over_field_name` and `partition_field_name` of any detectors. Providing -values for other fields has no effect on the estimation. It can be -omitted from the request if no detectors have a `by_field_name`, -`over_field_name` or `partition_field_name`. - -[discrete] -==== evaluate_data_frame -Evaluate data frame analytics. -The API packages together commonly used evaluation metrics for various types -of machine learning features. This has been designed for use on indexes -created by data frame analytics. Evaluation requires both a ground truth -field and an analytics result field to be present. - -{ref}/evaluate-dfanalytics.html[Endpoint documentation] -[source,ts] ----- -client.ml.evaluateDataFrame({ evaluation, index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`evaluation` ({ classification, outlier_detection, regression })*: Defines the type of evaluation you want to perform. -** *`index` (string)*: Defines the `index` in which the evaluation will be performed. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A query clause that retrieves a subset of data from the source index. - -[discrete] -==== explain_data_frame_analytics -Explain data frame analytics config. -This API provides explanations for a data frame analytics config that either -exists already or one that has not been created yet. The following -explanations are provided: -* which fields are included or not in the analysis and why, -* how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on. -If you have object fields or fields that are excluded via source filtering, they are not included in the explanation. - -{ref}/explain-dfanalytics.html[Endpoint documentation] -[source,ts] ----- -client.ml.explainDataFrameAnalytics({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string)*: Identifier for the data frame analytics job. This identifier can contain -lowercase alphanumeric characters (a-z and 0-9), hyphens, and -underscores. It must start and end with alphanumeric characters. -** *`source` (Optional, { index, query, runtime_mappings, _source })*: The configuration of how to source the analysis data. It requires an -index. Optionally, query and _source may be specified. -** *`dest` (Optional, { index, results_field })*: The destination configuration, consisting of index and optionally -results_field (ml by default). -** *`analysis` (Optional, { classification, outlier_detection, regression })*: The analysis configuration, which contains the information necessary to -perform one of the following types of analysis: classification, outlier -detection, or regression. -** *`description` (Optional, string)*: A description of the job. -** *`model_memory_limit` (Optional, string)*: The approximate maximum amount of memory resources that are permitted for -analytical processing. If your `elasticsearch.yml` file contains an -`xpack.ml.max_model_memory_limit` setting, an error occurs when you try to -create data frame analytics jobs that have `model_memory_limit` values -greater than that setting. -** *`max_num_threads` (Optional, number)*: The maximum number of threads to be used by the analysis. Using more -threads may decrease the time necessary to complete the analysis at the -cost of using more CPU. Note that the process may use additional threads -for operational functionality other than the analysis itself. -** *`analyzed_fields` (Optional, { includes, excludes })*: Specify includes and/or excludes patterns to select which fields will be -included in the analysis. The patterns specified in excludes are applied -last, therefore excludes takes precedence. In other words, if the same -field is specified in both includes and excludes, then the field will not -be included in the analysis. -** *`allow_lazy_start` (Optional, boolean)*: Specifies whether this job can start when there is insufficient machine -learning node capacity for it to be immediately assigned to a node. - -[discrete] -==== flush_job -Force buffered data to be processed. -The flush jobs API is only applicable when sending data for analysis using -the post data API. Depending on the content of the buffer, then it might -additionally calculate new results. Both flush and close operations are -similar, however the flush is more efficient if you are expecting to send -more data for analysis. When flushing, the job remains open and is available -to continue analyzing data. A close operation additionally prunes and -persists the model state to disk and the job must be opened again before -analyzing further data. - -{ref}/ml-flush-job.html[Endpoint documentation] -[source,ts] ----- -client.ml.flushJob({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`advance_time` (Optional, string | Unit)*: Refer to the description for the `advance_time` query parameter. -** *`calc_interim` (Optional, boolean)*: Refer to the description for the `calc_interim` query parameter. -** *`end` (Optional, string | Unit)*: Refer to the description for the `end` query parameter. -** *`skip_time` (Optional, string | Unit)*: Refer to the description for the `skip_time` query parameter. -** *`start` (Optional, string | Unit)*: Refer to the description for the `start` query parameter. - -[discrete] -==== forecast -Predict future behavior of a time series. - -Forecasts are not supported for jobs that perform population analysis; an -error occurs if you try to create a forecast for a job that has an -`over_field_name` in its configuration. Forcasts predict future behavior -based on historical data. - -{ref}/ml-forecast.html[Endpoint documentation] -[source,ts] ----- -client.ml.forecast({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. The job must be open when you -create a forecast; otherwise, an error occurs. -** *`duration` (Optional, string | -1 | 0)*: Refer to the description for the `duration` query parameter. -** *`expires_in` (Optional, string | -1 | 0)*: Refer to the description for the `expires_in` query parameter. -** *`max_model_memory` (Optional, string)*: Refer to the description for the `max_model_memory` query parameter. - -[discrete] -==== get_buckets -Get anomaly detection job results for buckets. -The API presents a chronological view of the records, grouped by bucket. - -{ref}/ml-get-bucket.html[Endpoint documentation] -[source,ts] ----- -client.ml.getBuckets({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`timestamp` (Optional, string | Unit)*: The timestamp of a single bucket result. If you do not specify this -parameter, the API returns information about all buckets. -** *`anomaly_score` (Optional, number)*: Refer to the description for the `anomaly_score` query parameter. -** *`desc` (Optional, boolean)*: Refer to the description for the `desc` query parameter. -** *`end` (Optional, string | Unit)*: Refer to the description for the `end` query parameter. -** *`exclude_interim` (Optional, boolean)*: Refer to the description for the `exclude_interim` query parameter. -** *`expand` (Optional, boolean)*: Refer to the description for the `expand` query parameter. -** *`page` (Optional, { from, size })* -** *`sort` (Optional, string)*: Refer to the desription for the `sort` query parameter. -** *`start` (Optional, string | Unit)*: Refer to the description for the `start` query parameter. -** *`from` (Optional, number)*: Skips the specified number of buckets. -** *`size` (Optional, number)*: Specifies the maximum number of buckets to obtain. - -[discrete] -==== get_calendar_events -Get info about events in calendars. - -{ref}/ml-get-calendar-event.html[Endpoint documentation] -[source,ts] ----- -client.ml.getCalendarEvents({ calendar_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`calendar_id` (string)*: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. -** *`end` (Optional, string | Unit)*: Specifies to get events with timestamps earlier than this time. -** *`from` (Optional, number)*: Skips the specified number of events. -** *`job_id` (Optional, string)*: Specifies to get events for a specific anomaly detection job identifier or job group. It must be used with a calendar identifier of `_all` or `*`. -** *`size` (Optional, number)*: Specifies the maximum number of events to obtain. -** *`start` (Optional, string | Unit)*: Specifies to get events with timestamps after this time. - -[discrete] -==== get_calendars -Get calendar configuration info. - -{ref}/ml-get-calendar.html[Endpoint documentation] -[source,ts] ----- -client.ml.getCalendars({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`calendar_id` (Optional, string)*: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. -** *`page` (Optional, { from, size })*: This object is supported only when you omit the calendar identifier. -** *`from` (Optional, number)*: Skips the specified number of calendars. This parameter is supported only when you omit the calendar identifier. -** *`size` (Optional, number)*: Specifies the maximum number of calendars to obtain. This parameter is supported only when you omit the calendar identifier. - -[discrete] -==== get_categories -Get anomaly detection job results for categories. - -{ref}/ml-get-category.html[Endpoint documentation] -[source,ts] ----- -client.ml.getCategories({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`category_id` (Optional, string)*: Identifier for the category, which is unique in the job. If you specify -neither the category ID nor the partition_field_value, the API returns -information about all categories. If you specify only the -partition_field_value, it returns information about all categories for -the specified partition. -** *`page` (Optional, { from, size })*: Configures pagination. -This parameter has the `from` and `size` properties. -** *`from` (Optional, number)*: Skips the specified number of categories. -** *`partition_field_value` (Optional, string)*: Only return categories for the specified partition. -** *`size` (Optional, number)*: Specifies the maximum number of categories to obtain. - -[discrete] -==== get_data_frame_analytics -Get data frame analytics job configuration info. -You can get information for multiple data frame analytics jobs in a single -API request by using a comma-separated list of data frame analytics jobs or a -wildcard expression. - -{ref}/get-dfanalytics.html[Endpoint documentation] -[source,ts] ----- -client.ml.getDataFrameAnalytics({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string)*: Identifier for the data frame analytics job. If you do not specify this -option, the API returns information for the first hundred data frame -analytics jobs. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -1. Contains wildcard expressions and there are no data frame analytics -jobs that match. -2. Contains the `_all` string or no identifiers and there are no matches. -3. Contains wildcard expressions and there are only partial matches. - -The default value returns an empty data_frame_analytics array when there -are no matches and the subset of results when there are partial matches. -If this parameter is `false`, the request returns a 404 status code when -there are no matches or only partial matches. -** *`from` (Optional, number)*: Skips the specified number of data frame analytics jobs. -** *`size` (Optional, number)*: Specifies the maximum number of data frame analytics jobs to obtain. -** *`exclude_generated` (Optional, boolean)*: Indicates if certain fields should be removed from the configuration on -retrieval. This allows the configuration to be in an acceptable format to -be retrieved and then added to another cluster. - -[discrete] -==== get_data_frame_analytics_stats -Get data frame analytics jobs usage info. - -{ref}/get-dfanalytics-stats.html[Endpoint documentation] -[source,ts] ----- -client.ml.getDataFrameAnalyticsStats({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string)*: Identifier for the data frame analytics job. If you do not specify this -option, the API returns information for the first hundred data frame -analytics jobs. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -1. Contains wildcard expressions and there are no data frame analytics -jobs that match. -2. Contains the `_all` string or no identifiers and there are no matches. -3. Contains wildcard expressions and there are only partial matches. - -The default value returns an empty data_frame_analytics array when there -are no matches and the subset of results when there are partial matches. -If this parameter is `false`, the request returns a 404 status code when -there are no matches or only partial matches. -** *`from` (Optional, number)*: Skips the specified number of data frame analytics jobs. -** *`size` (Optional, number)*: Specifies the maximum number of data frame analytics jobs to obtain. -** *`verbose` (Optional, boolean)*: Defines whether the stats response should be verbose. - -[discrete] -==== get_datafeed_stats -Get datafeeds usage info. -You can get statistics for multiple datafeeds in a single API request by -using a comma-separated list of datafeeds or a wildcard expression. You can -get statistics for all datafeeds by using `_all`, by specifying `*` as the -`<feed_id>`, or by omitting the `<feed_id>`. If the datafeed is stopped, the -only information you receive is the `datafeed_id` and the `state`. -This API returns a maximum of 10,000 datafeeds. - -{ref}/ml-get-datafeed-stats.html[Endpoint documentation] -[source,ts] ----- -client.ml.getDatafeedStats({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`datafeed_id` (Optional, string | string[])*: Identifier for the datafeed. It can be a datafeed identifier or a -wildcard expression. If you do not specify one of these options, the API -returns information about all datafeeds. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -1. Contains wildcard expressions and there are no datafeeds that match. -2. Contains the `_all` string or no identifiers and there are no matches. -3. Contains wildcard expressions and there are only partial matches. - -The default value is `true`, which returns an empty `datafeeds` array -when there are no matches and the subset of results when there are -partial matches. If this parameter is `false`, the request returns a -`404` status code when there are no matches or only partial matches. - -[discrete] -==== get_datafeeds -Get datafeeds configuration info. -You can get information for multiple datafeeds in a single API request by -using a comma-separated list of datafeeds or a wildcard expression. You can -get information for all datafeeds by using `_all`, by specifying `*` as the -`<feed_id>`, or by omitting the `<feed_id>`. -This API returns a maximum of 10,000 datafeeds. - -{ref}/ml-get-datafeed.html[Endpoint documentation] -[source,ts] ----- -client.ml.getDatafeeds({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`datafeed_id` (Optional, string | string[])*: Identifier for the datafeed. It can be a datafeed identifier or a -wildcard expression. If you do not specify one of these options, the API -returns information about all datafeeds. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -1. Contains wildcard expressions and there are no datafeeds that match. -2. Contains the `_all` string or no identifiers and there are no matches. -3. Contains wildcard expressions and there are only partial matches. - -The default value is `true`, which returns an empty `datafeeds` array -when there are no matches and the subset of results when there are -partial matches. If this parameter is `false`, the request returns a -`404` status code when there are no matches or only partial matches. -** *`exclude_generated` (Optional, boolean)*: Indicates if certain fields should be removed from the configuration on -retrieval. This allows the configuration to be in an acceptable format to -be retrieved and then added to another cluster. - -[discrete] -==== get_filters -Get filters. -You can get a single filter or all filters. - -{ref}/ml-get-filter.html[Endpoint documentation] -[source,ts] ----- -client.ml.getFilters({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`filter_id` (Optional, string | string[])*: A string that uniquely identifies a filter. -** *`from` (Optional, number)*: Skips the specified number of filters. -** *`size` (Optional, number)*: Specifies the maximum number of filters to obtain. - -[discrete] -==== get_influencers -Get anomaly detection job results for influencers. -Influencers are the entities that have contributed to, or are to blame for, -the anomalies. Influencer results are available only if an -`influencer_field_name` is specified in the job configuration. - -{ref}/ml-get-influencer.html[Endpoint documentation] -[source,ts] ----- -client.ml.getInfluencers({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`page` (Optional, { from, size })*: Configures pagination. -This parameter has the `from` and `size` properties. -** *`desc` (Optional, boolean)*: If true, the results are sorted in descending order. -** *`end` (Optional, string | Unit)*: Returns influencers with timestamps earlier than this time. -The default value means it is unset and results are not limited to -specific timestamps. -** *`exclude_interim` (Optional, boolean)*: If true, the output excludes interim results. By default, interim results -are included. -** *`influencer_score` (Optional, number)*: Returns influencers with anomaly scores greater than or equal to this -value. -** *`from` (Optional, number)*: Skips the specified number of influencers. -** *`size` (Optional, number)*: Specifies the maximum number of influencers to obtain. -** *`sort` (Optional, string)*: Specifies the sort field for the requested influencers. By default, the -influencers are sorted by the `influencer_score` value. -** *`start` (Optional, string | Unit)*: Returns influencers with timestamps after this time. The default value -means it is unset and results are not limited to specific timestamps. - -[discrete] -==== get_job_stats -Get anomaly detection jobs usage info. - -{ref}/ml-get-job-stats.html[Endpoint documentation] -[source,ts] ----- -client.ml.getJobStats({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (Optional, string)*: Identifier for the anomaly detection job. It can be a job identifier, a -group name, a list of jobs, or a wildcard expression. If -you do not specify one of these options, the API returns information for -all anomaly detection jobs. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -1. Contains wildcard expressions and there are no jobs that match. -2. Contains the _all string or no identifiers and there are no matches. -3. Contains wildcard expressions and there are only partial matches. - -If `true`, the API returns an empty `jobs` array when -there are no matches and the subset of results when there are partial -matches. If `false`, the API returns a `404` status -code when there are no matches or only partial matches. - -[discrete] -==== get_jobs -Get anomaly detection jobs configuration info. -You can get information for multiple anomaly detection jobs in a single API -request by using a group name, a comma-separated list of jobs, or a wildcard -expression. You can get information for all anomaly detection jobs by using -`_all`, by specifying `*` as the `<job_id>`, or by omitting the `<job_id>`. - -{ref}/ml-get-job.html[Endpoint documentation] -[source,ts] ----- -client.ml.getJobs({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (Optional, string | string[])*: Identifier for the anomaly detection job. It can be a job identifier, a -group name, or a wildcard expression. If you do not specify one of these -options, the API returns information for all anomaly detection jobs. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -1. Contains wildcard expressions and there are no jobs that match. -2. Contains the _all string or no identifiers and there are no matches. -3. Contains wildcard expressions and there are only partial matches. - -The default value is `true`, which returns an empty `jobs` array when -there are no matches and the subset of results when there are partial -matches. If this parameter is `false`, the request returns a `404` status -code when there are no matches or only partial matches. -** *`exclude_generated` (Optional, boolean)*: Indicates if certain fields should be removed from the configuration on -retrieval. This allows the configuration to be in an acceptable format to -be retrieved and then added to another cluster. - -[discrete] -==== get_memory_stats -Get machine learning memory usage info. -Get information about how machine learning jobs and trained models are using memory, -on each node, both within the JVM heap, and natively, outside of the JVM. - -{ref}/get-ml-memory.html[Endpoint documentation] -[source,ts] ----- -client.ml.getMemoryStats({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (Optional, string)*: The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or -`ml:true` -** *`human` (Optional, boolean)*: Specify this query parameter to include the fields with units in the response. Otherwise only -the `_in_bytes` sizes are returned in the response. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout -expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request -fails and returns an error. - -[discrete] -==== get_model_snapshot_upgrade_stats -Get anomaly detection job model snapshot upgrade usage info. - -{ref}/ml-get-job-model-snapshot-upgrade-stats.html[Endpoint documentation] -[source,ts] ----- -client.ml.getModelSnapshotUpgradeStats({ job_id, snapshot_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`snapshot_id` (string)*: A numerical character string that uniquely identifies the model snapshot. You can get information for multiple -snapshots by using a list or a wildcard expression. You can get all snapshots by using `_all`, -by specifying `*` as the snapshot ID, or by omitting the snapshot ID. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - - - Contains wildcard expressions and there are no jobs that match. - - Contains the _all string or no identifiers and there are no matches. - - Contains wildcard expressions and there are only partial matches. - -The default value is true, which returns an empty jobs array when there are no matches and the subset of results -when there are partial matches. If this parameter is false, the request returns a 404 status code when there are -no matches or only partial matches. - -[discrete] -==== get_model_snapshots -Get model snapshots info. - -{ref}/ml-get-snapshot.html[Endpoint documentation] -[source,ts] ----- -client.ml.getModelSnapshots({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`snapshot_id` (Optional, string)*: A numerical character string that uniquely identifies the model snapshot. You can get information for multiple -snapshots by using a list or a wildcard expression. You can get all snapshots by using `_all`, -by specifying `*` as the snapshot ID, or by omitting the snapshot ID. -** *`desc` (Optional, boolean)*: Refer to the description for the `desc` query parameter. -** *`end` (Optional, string | Unit)*: Refer to the description for the `end` query parameter. -** *`page` (Optional, { from, size })* -** *`sort` (Optional, string)*: Refer to the description for the `sort` query parameter. -** *`start` (Optional, string | Unit)*: Refer to the description for the `start` query parameter. -** *`from` (Optional, number)*: Skips the specified number of snapshots. -** *`size` (Optional, number)*: Specifies the maximum number of snapshots to obtain. - -[discrete] -==== get_overall_buckets -Get overall bucket results. - -Retrievs overall bucket results that summarize the bucket results of -multiple anomaly detection jobs. - -The `overall_score` is calculated by combining the scores of all the -buckets within the overall bucket span. First, the maximum -`anomaly_score` per anomaly detection job in the overall bucket is -calculated. Then the `top_n` of those scores are averaged to result in -the `overall_score`. This means that you can fine-tune the -`overall_score` so that it is more or less sensitive to the number of -jobs that detect an anomaly at the same time. For example, if you set -`top_n` to `1`, the `overall_score` is the maximum bucket score in the -overall bucket. Alternatively, if you set `top_n` to the number of jobs, -the `overall_score` is high only when all jobs detect anomalies in that -overall bucket. If you set the `bucket_span` parameter (to a value -greater than its default), the `overall_score` is the maximum -`overall_score` of the overall buckets that have a span equal to the -jobs' largest bucket span. - -{ref}/ml-get-overall-buckets.html[Endpoint documentation] -[source,ts] ----- -client.ml.getOverallBuckets({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. It can be a job identifier, a -group name, a list of jobs or groups, or a wildcard -expression. - -You can summarize the bucket results for all anomaly detection jobs by -using `_all` or by specifying `*` as the `<job_id>`. -** *`allow_no_match` (Optional, boolean)*: Refer to the description for the `allow_no_match` query parameter. -** *`bucket_span` (Optional, string | -1 | 0)*: Refer to the description for the `bucket_span` query parameter. -** *`end` (Optional, string | Unit)*: Refer to the description for the `end` query parameter. -** *`exclude_interim` (Optional, boolean)*: Refer to the description for the `exclude_interim` query parameter. -** *`overall_score` (Optional, number | string)*: Refer to the description for the `overall_score` query parameter. -** *`start` (Optional, string | Unit)*: Refer to the description for the `start` query parameter. -** *`top_n` (Optional, number)*: Refer to the description for the `top_n` query parameter. - -[discrete] -==== get_records -Get anomaly records for an anomaly detection job. -Records contain the detailed analytical results. They describe the anomalous -activity that has been identified in the input data based on the detector -configuration. -There can be many anomaly records depending on the characteristics and size -of the input data. In practice, there are often too many to be able to -manually process them. The machine learning features therefore perform a -sophisticated aggregation of the anomaly records into buckets. -The number of record results depends on the number of anomalies found in each -bucket, which relates to the number of time series being modeled and the -number of detectors. - -{ref}/ml-get-record.html[Endpoint documentation] -[source,ts] ----- -client.ml.getRecords({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`desc` (Optional, boolean)*: Refer to the description for the `desc` query parameter. -** *`end` (Optional, string | Unit)*: Refer to the description for the `end` query parameter. -** *`exclude_interim` (Optional, boolean)*: Refer to the description for the `exclude_interim` query parameter. -** *`page` (Optional, { from, size })* -** *`record_score` (Optional, number)*: Refer to the description for the `record_score` query parameter. -** *`sort` (Optional, string)*: Refer to the description for the `sort` query parameter. -** *`start` (Optional, string | Unit)*: Refer to the description for the `start` query parameter. -** *`from` (Optional, number)*: Skips the specified number of records. -** *`size` (Optional, number)*: Specifies the maximum number of records to obtain. - -[discrete] -==== get_trained_models -Get trained model configuration info. - -{ref}/get-trained-models.html[Endpoint documentation] -[source,ts] ----- -client.ml.getTrainedModels({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_id` (Optional, string | string[])*: The unique identifier of the trained model or a model alias. - -You can get information for multiple trained models in a single API -request by using a list of model IDs or a wildcard -expression. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -- Contains wildcard expressions and there are no models that match. -- Contains the _all string or no identifiers and there are no matches. -- Contains wildcard expressions and there are only partial matches. - -If true, it returns an empty array when there are no matches and the -subset of results when there are partial matches. -** *`decompress_definition` (Optional, boolean)*: Specifies whether the included model definition should be returned as a -JSON map (true) or in a custom compressed format (false). -** *`exclude_generated` (Optional, boolean)*: Indicates if certain fields should be removed from the configuration on -retrieval. This allows the configuration to be in an acceptable format to -be retrieved and then added to another cluster. -** *`from` (Optional, number)*: Skips the specified number of models. -** *`include` (Optional, Enum("definition" | "feature_importance_baseline" | "hyperparameters" | "total_feature_importance" | "definition_status"))*: A comma delimited string of optional fields to include in the response -body. -** *`size` (Optional, number)*: Specifies the maximum number of models to obtain. -** *`tags` (Optional, string | string[])*: A comma delimited string of tags. A trained model can have many tags, or -none. When supplied, only trained models that contain all the supplied -tags are returned. - -[discrete] -==== get_trained_models_stats -Get trained models usage info. -You can get usage information for multiple trained -models in a single API request by using a comma-separated list of model IDs or a wildcard expression. - -{ref}/get-trained-models-stats.html[Endpoint documentation] -[source,ts] ----- -client.ml.getTrainedModelsStats({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_id` (Optional, string | string[])*: The unique identifier of the trained model or a model alias. It can be a -list or a wildcard expression. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -- Contains wildcard expressions and there are no models that match. -- Contains the _all string or no identifiers and there are no matches. -- Contains wildcard expressions and there are only partial matches. - -If true, it returns an empty array when there are no matches and the -subset of results when there are partial matches. -** *`from` (Optional, number)*: Skips the specified number of models. -** *`size` (Optional, number)*: Specifies the maximum number of models to obtain. - -[discrete] -==== infer_trained_model -Evaluate a trained model. - -{ref}/infer-trained-model.html[Endpoint documentation] -[source,ts] ----- -client.ml.inferTrainedModel({ model_id, docs }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_id` (string)*: The unique identifier of the trained model. -** *`docs` (Record<string, User-defined value>[])*: An array of objects to pass to the model for inference. The objects should contain a fields matching your -configured trained model input. Typically, for NLP models, the field name is `text_field`. -Currently, for NLP models, only a single value is allowed. -** *`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, ner, pass_through, text_embedding, text_expansion, question_answering })*: The inference configuration updates to apply on the API call -** *`timeout` (Optional, string | -1 | 0)*: Controls the amount of time to wait for inference results. - -[discrete] -==== info -Return ML defaults and limits. -Returns defaults and limits used by machine learning. -This endpoint is designed to be used by a user interface that needs to fully -understand machine learning configurations where some options are not -specified, meaning that the defaults should be used. This endpoint may be -used to find out what those defaults are. It also provides information about -the maximum size of machine learning jobs that could run in the current -cluster configuration. - -{ref}/get-ml-info.html[Endpoint documentation] -[source,ts] ----- -client.ml.info() ----- - - -[discrete] -==== open_job -Open anomaly detection jobs. -An anomaly detection job must be opened to be ready to receive and analyze -data. It can be opened and closed multiple times throughout its lifecycle. -When you open a new job, it starts with an empty model. -When you open an existing job, the most recent model state is automatically -loaded. The job is ready to resume its analysis from where it left off, once -new data is received. - -{ref}/ml-open-job.html[Endpoint documentation] -[source,ts] ----- -client.ml.openJob({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`timeout` (Optional, string | -1 | 0)*: Refer to the description for the `timeout` query parameter. - -[discrete] -==== post_calendar_events -Add scheduled events to the calendar. - -{ref}/ml-post-calendar-event.html[Endpoint documentation] -[source,ts] ----- -client.ml.postCalendarEvents({ calendar_id, events }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`calendar_id` (string)*: A string that uniquely identifies a calendar. -** *`events` ({ calendar_id, event_id, description, end_time, start_time, skip_result, skip_model_update, force_time_shift }[])*: A list of one of more scheduled events. The event’s start and end times can be specified as integer milliseconds since the epoch or as a string in ISO 8601 format. - -[discrete] -==== post_data -Send data to an anomaly detection job for analysis. - -IMPORTANT: For each job, data can be accepted from only a single connection at a time. -It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list. - -{ref}/ml-post-data.html[Endpoint documentation] -[source,ts] ----- -client.ml.postData({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. -** *`data` (Optional, TData[])* -** *`reset_end` (Optional, string | Unit)*: Specifies the end of the bucket resetting range. -** *`reset_start` (Optional, string | Unit)*: Specifies the start of the bucket resetting range. - -[discrete] -==== preview_data_frame_analytics -Preview features used by data frame analytics. -Previews the extracted features used by a data frame analytics config. - -{ref}/preview-dfanalytics.html[Endpoint documentation] -[source,ts] ----- -client.ml.previewDataFrameAnalytics({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string)*: Identifier for the data frame analytics job. -** *`config` (Optional, { source, analysis, model_memory_limit, max_num_threads, analyzed_fields })*: A data frame analytics config as described in create data frame analytics -jobs. Note that `id` and `dest` don’t need to be provided in the context of -this API. - -[discrete] -==== preview_datafeed -Preview a datafeed. -This API returns the first "page" of search results from a datafeed. -You can preview an existing datafeed or provide configuration details for a datafeed -and anomaly detection job in the API. The preview shows the structure of the data -that will be passed to the anomaly detection engine. -IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that -called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the -datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. -You can also use secondary authorization headers to supply the credentials. - -{ref}/ml-preview-datafeed.html[Endpoint documentation] -[source,ts] ----- -client.ml.previewDatafeed({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`datafeed_id` (Optional, string)*: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase -alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric -characters. NOTE: If you use this path parameter, you cannot provide datafeed or anomaly detection job -configuration details in the request body. -** *`datafeed_config` (Optional, { aggregations, chunking_config, datafeed_id, delayed_data_check_config, frequency, indices, indices_options, job_id, max_empty_searches, query, query_delay, runtime_mappings, script_fields, scroll_size })*: The datafeed definition to preview. -** *`job_config` (Optional, { allow_lazy_open, analysis_config, analysis_limits, background_persist_interval, custom_settings, daily_model_snapshot_retention_after_days, data_description, datafeed_config, description, groups, job_id, job_type, model_plot_config, model_snapshot_retention_days, renormalization_window_days, results_index_name, results_retention_days })*: The configuration details for the anomaly detection job that is associated with the datafeed. If the -`datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must -supply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is -used. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object. -** *`start` (Optional, string | Unit)*: The start time from where the datafeed preview should begin -** *`end` (Optional, string | Unit)*: The end time when the datafeed preview should stop - -[discrete] -==== put_calendar -Create a calendar. - -{ref}/ml-put-calendar.html[Endpoint documentation] -[source,ts] ----- -client.ml.putCalendar({ calendar_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`calendar_id` (string)*: A string that uniquely identifies a calendar. -** *`job_ids` (Optional, string[])*: An array of anomaly detection job identifiers. -** *`description` (Optional, string)*: A description of the calendar. - -[discrete] -==== put_calendar_job -Add anomaly detection job to calendar. - -{ref}/ml-put-calendar-job.html[Endpoint documentation] -[source,ts] ----- -client.ml.putCalendarJob({ calendar_id, job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`calendar_id` (string)*: A string that uniquely identifies a calendar. -** *`job_id` (string | string[])*: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a list of jobs or groups. - -[discrete] -==== put_data_frame_analytics -Create a data frame analytics job. -This API creates a data frame analytics job that performs an analysis on the -source indices and stores the outcome in a destination index. - -{ref}/put-dfanalytics.html[Endpoint documentation] -[source,ts] ----- -client.ml.putDataFrameAnalytics({ id, analysis, dest, source }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the data frame analytics job. This identifier can contain -lowercase alphanumeric characters (a-z and 0-9), hyphens, and -underscores. It must start and end with alphanumeric characters. -** *`analysis` ({ classification, outlier_detection, regression })*: The analysis configuration, which contains the information necessary to -perform one of the following types of analysis: classification, outlier -detection, or regression. -** *`dest` ({ index, results_field })*: The destination configuration. -** *`source` ({ index, query, runtime_mappings, _source })*: The configuration of how to source the analysis data. -** *`allow_lazy_start` (Optional, boolean)*: Specifies whether this job can start when there is insufficient machine -learning node capacity for it to be immediately assigned to a node. If -set to `false` and a machine learning node with capacity to run the job -cannot be immediately found, the API returns an error. If set to `true`, -the API does not return an error; the job waits in the `starting` state -until sufficient machine learning node capacity is available. This -behavior is also affected by the cluster-wide -`xpack.ml.max_lazy_ml_nodes` setting. -** *`analyzed_fields` (Optional, { includes, excludes })*: Specifies `includes` and/or `excludes` patterns to select which fields -will be included in the analysis. The patterns specified in `excludes` -are applied last, therefore `excludes` takes precedence. In other words, -if the same field is specified in both `includes` and `excludes`, then -the field will not be included in the analysis. If `analyzed_fields` is -not set, only the relevant fields will be included. For example, all the -numeric fields for outlier detection. -The supported fields vary for each type of analysis. Outlier detection -requires numeric or `boolean` data to analyze. The algorithms don’t -support missing values therefore fields that have data types other than -numeric or boolean are ignored. Documents where included fields contain -missing values, null values, or an array are also ignored. Therefore the -`dest` index may contain documents that don’t have an outlier score. -Regression supports fields that are numeric, `boolean`, `text`, -`keyword`, and `ip` data types. It is also tolerant of missing values. -Fields that are supported are included in the analysis, other fields are -ignored. Documents where included fields contain an array with two or -more values are also ignored. Documents in the `dest` index that don’t -contain a results field are not included in the regression analysis. -Classification supports fields that are numeric, `boolean`, `text`, -`keyword`, and `ip` data types. It is also tolerant of missing values. -Fields that are supported are included in the analysis, other fields are -ignored. Documents where included fields contain an array with two or -more values are also ignored. Documents in the `dest` index that don’t -contain a results field are not included in the classification analysis. -Classification analysis can be improved by mapping ordinal variable -values to a single number. For example, in case of age ranges, you can -model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. -** *`description` (Optional, string)*: A description of the job. -** *`max_num_threads` (Optional, number)*: The maximum number of threads to be used by the analysis. Using more -threads may decrease the time necessary to complete the analysis at the -cost of using more CPU. Note that the process may use additional threads -for operational functionality other than the analysis itself. -** *`model_memory_limit` (Optional, string)*: The approximate maximum amount of memory resources that are permitted for -analytical processing. If your `elasticsearch.yml` file contains an -`xpack.ml.max_model_memory_limit` setting, an error occurs when you try -to create data frame analytics jobs that have `model_memory_limit` values -greater than that setting. -** *`headers` (Optional, Record<string, string | string[]>)* -** *`version` (Optional, string)* - -[discrete] -==== put_datafeed -Create a datafeed. -Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. -You can associate only one datafeed with each anomaly detection job. -The datafeed contains a query that runs at a defined interval (`frequency`). -If you are concerned about delayed data, you can add a delay (`query_delay`) at each interval. -When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had -at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, -those credentials are used instead. -You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed -directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. - -{ref}/ml-put-datafeed.html[Endpoint documentation] -[source,ts] ----- -client.ml.putDatafeed({ datafeed_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`datafeed_id` (string)*: A numerical character string that uniquely identifies the datafeed. -This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. -It must start and end with alphanumeric characters. -** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, random_sampler, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, time_series, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*: If set, the datafeed performs aggregation searches. -Support for aggregations is limited and should be used only with low cardinality data. -** *`chunking_config` (Optional, { mode, time_span })*: Datafeeds might be required to search over long time periods, for several months or years. -This search is split into time chunks in order to ensure the load on Elasticsearch is managed. -Chunking configuration controls how the size of these time chunks are calculated; -it is an advanced configuration option. -** *`delayed_data_check_config` (Optional, { check_window, enabled })*: Specifies whether the datafeed checks for missing data and the size of the window. -The datafeed can optionally search over indices that have already been read in an effort to determine whether -any data has subsequently been added to the index. If missing data is found, it is a good indication that the -`query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. -This check runs only on real-time datafeeds. -** *`frequency` (Optional, string | -1 | 0)*: The interval at which scheduled queries are made while the datafeed runs in real time. -The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible -fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last -(partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses -aggregations, this value must be divisible by the interval of the date histogram aggregation. -** *`indices` (Optional, string | string[])*: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine -learning nodes must have the `remote_cluster_client` role. -** *`indices_options` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, ignore_throttled })*: Specifies index expansion options that are used during search -** *`job_id` (Optional, string)*: Identifier for the anomaly detection job. -** *`max_empty_searches` (Optional, number)*: If a real-time datafeed has never seen any data (including during any initial training period), it automatically -stops and closes the associated job after this many real-time searches return no documents. In other words, -it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no -end time that sees no data remains started until it is explicitly stopped. By default, it is not set. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an -Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this -object is passed verbatim to Elasticsearch. -** *`query_delay` (Optional, string | -1 | 0)*: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might -not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default -value is randomly selected between `60s` and `120s`. This randomness improves the query performance -when there are multiple jobs running on the same node. -** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Specifies runtime fields for the datafeed search. -** *`script_fields` (Optional, Record<string, { script, ignore_failure }>)*: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. -The detector configuration objects in a job can contain functions that use these script fields. -** *`scroll_size` (Optional, number)*: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. -The maximum value is the value of `index.max_result_window`, which is 10,000 by default. -** *`headers` (Optional, Record<string, string | string[]>)* -** *`allow_no_indices` (Optional, boolean)*: If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` -string or when no indices are specified. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines -whether wildcard expressions match hidden data streams. Supports a list of values. -** *`ignore_throttled` (Optional, boolean)*: If true, concrete, expanded, or aliased indices are ignored when frozen. -** *`ignore_unavailable` (Optional, boolean)*: If true, unavailable indices (missing or closed) are ignored. - -[discrete] -==== put_filter -Create a filter. -A filter contains a list of strings. It can be used by one or more anomaly detection jobs. -Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. - -{ref}/ml-put-filter.html[Endpoint documentation] -[source,ts] ----- -client.ml.putFilter({ filter_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`filter_id` (string)*: A string that uniquely identifies a filter. -** *`description` (Optional, string)*: A description of the filter. -** *`items` (Optional, string[])*: The items of the filter. A wildcard `*` can be used at the beginning or the end of an item. -Up to 10000 items are allowed in each filter. - -[discrete] -==== put_job -Create an anomaly detection job. -If you include a `datafeed_config`, you must have read index privileges on the source index. - -{ref}/ml-put-job.html[Endpoint documentation] -[source,ts] ----- -client.ml.putJob({ job_id, analysis_config, data_description }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. -** *`analysis_config` ({ bucket_span, categorization_analyzer, categorization_field_name, categorization_filters, detectors, influencers, latency, model_prune_window, multivariate_by_fields, per_partition_categorization, summary_count_field_name })*: Specifies how to analyze the data. After you create a job, you cannot change the analysis configuration; all the properties are informational. -** *`data_description` ({ format, time_field, time_format, field_delimiter })*: Defines the format of the input data when you send data to the job by using the post data API. Note that when configure a datafeed, these properties are automatically set. When data is received via the post data API, it is not stored in Elasticsearch. Only the results for anomaly detection are retained. -** *`allow_lazy_open` (Optional, boolean)*: Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. By default, if a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. -** *`analysis_limits` (Optional, { categorization_examples_limit, model_memory_limit })*: Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. -** *`background_persist_interval` (Optional, string | -1 | 0)*: Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the `background_persist_interval` value too low. -** *`custom_settings` (Optional, User-defined value)*: Advanced configuration option. Contains custom meta data about the job. -** *`daily_model_snapshot_retention_after_days` (Optional, number)*: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. -** *`datafeed_config` (Optional, { aggregations, chunking_config, datafeed_id, delayed_data_check_config, frequency, indices, indices_options, job_id, max_empty_searches, query, query_delay, runtime_mappings, script_fields, scroll_size })*: Defines a datafeed for the anomaly detection job. If Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. -** *`description` (Optional, string)*: A description of the job. -** *`groups` (Optional, string[])*: A list of job groups. A job can belong to no groups or many. -** *`model_plot_config` (Optional, { annotations_enabled, enabled, terms })*: This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. If you enable model plot it can add considerable overhead to the performance of the system; it is not feasible for jobs with many entities. Model plot provides a simplified and indicative view of the model and its bounds. It does not display complex features such as multivariate correlations or multimodal data. As such, anomalies may occasionally be reported which cannot be seen in the model plot. Model plot config can be configured when the job is created or updated later. It must be disabled if performance issues are experienced. -** *`model_snapshot_retention_days` (Optional, number)*: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. By default, snapshots ten days older than the newest snapshot are deleted. -** *`renormalization_window_days` (Optional, number)*: Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. The default value is the longer of 30 days or 100 bucket spans. -** *`results_index_name` (Optional, string)*: A text string that affects the name of the machine learning results index. By default, the job generates an index named `.ml-anomalies-shared`. -** *`results_retention_days` (Optional, number)*: Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. Annotations added by users are retained forever. - -[discrete] -==== put_trained_model -Create a trained model. -Enable you to supply a trained model that is not created by data frame analytics. - -{ref}/put-trained-models.html[Endpoint documentation] -[source,ts] ----- -client.ml.putTrainedModel({ model_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_id` (string)*: The unique identifier of the trained model. -** *`compressed_definition` (Optional, string)*: The compressed (GZipped and Base64 encoded) inference definition of the -model. If compressed_definition is specified, then definition cannot be -specified. -** *`definition` (Optional, { preprocessors, trained_model })*: The inference definition for the model. If definition is specified, then -compressed_definition cannot be specified. -** *`description` (Optional, string)*: A human-readable description of the inference trained model. -** *`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, ner, pass_through, text_embedding, text_expansion, question_answering })*: The default configuration for inference. This can be either a regression -or classification configuration. It must match the underlying -definition.trained_model's target_type. For pre-packaged models such as -ELSER the config is not required. -** *`input` (Optional, { field_names })*: The input field names for the model definition. -** *`metadata` (Optional, User-defined value)*: An object map that contains metadata about the model. -** *`model_type` (Optional, Enum("tree_ensemble" | "lang_ident" | "pytorch"))*: The model type. -** *`model_size_bytes` (Optional, number)*: The estimated memory usage in bytes to keep the trained model in memory. -This property is supported only if defer_definition_decompression is true -or the model definition is not supplied. -** *`platform_architecture` (Optional, string)*: The platform architecture (if applicable) of the trained mode. If the model -only works on one platform, because it is heavily optimized for a particular -processor architecture and OS combination, then this field specifies which. -The format of the string must match the platform identifiers used by Elasticsearch, -so one of, `linux-x86_64`, `linux-aarch64`, `darwin-x86_64`, `darwin-aarch64`, -or `windows-x86_64`. For portable models (those that work independent of processor -architecture or OS features), leave this field unset. -** *`tags` (Optional, string[])*: An array of tags to organize the model. -** *`prefix_strings` (Optional, { ingest, search })*: Optional prefix strings applied at inference -** *`defer_definition_decompression` (Optional, boolean)*: If set to `true` and a `compressed_definition` is provided, -the request defers definition decompression and skips relevant -validations. -** *`wait_for_completion` (Optional, boolean)*: Whether to wait for all child operations (e.g. model download) -to complete. - -[discrete] -==== put_trained_model_alias -Create or update a trained model alias. -A trained model alias is a logical name used to reference a single trained -model. -You can use aliases instead of trained model identifiers to make it easier to -reference your models. For example, you can use aliases in inference -aggregations and processors. -An alias must be unique and refer to only a single trained model. However, -you can have multiple aliases for each trained model. -If you use this API to update an alias such that it references a different -trained model ID and the model uses a different type of data frame analytics, -an error occurs. For example, this situation occurs if you have a trained -model for regression analysis and a trained model for classification -analysis; you cannot reassign an alias from one type of trained model to -another. -If you use this API to update an alias and there are very few input fields in -common between the old and new trained models for the model alias, the API -returns a warning. - -{ref}/put-trained-models-aliases.html[Endpoint documentation] -[source,ts] ----- -client.ml.putTrainedModelAlias({ model_alias, model_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_alias` (string)*: The alias to create or update. This value cannot end in numbers. -** *`model_id` (string)*: The identifier for the trained model that the alias refers to. -** *`reassign` (Optional, boolean)*: Specifies whether the alias gets reassigned to the specified trained -model if it is already assigned to a different model. If the alias is -already assigned and this parameter is false, the API returns an error. - -[discrete] -==== put_trained_model_definition_part -Create part of a trained model definition. - -{ref}/put-trained-model-definition-part.html[Endpoint documentation] -[source,ts] ----- -client.ml.putTrainedModelDefinitionPart({ model_id, part, definition, total_definition_length, total_parts }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_id` (string)*: The unique identifier of the trained model. -** *`part` (number)*: The definition part number. When the definition is loaded for inference the definition parts are streamed in the -order of their part number. The first part must be `0` and the final part must be `total_parts - 1`. -** *`definition` (string)*: The definition part for the model. Must be a base64 encoded string. -** *`total_definition_length` (number)*: The total uncompressed definition length in bytes. Not base64 encoded. -** *`total_parts` (number)*: The total number of parts that will be uploaded. Must be greater than 0. - -[discrete] -==== put_trained_model_vocabulary -Create a trained model vocabulary. -This API is supported only for natural language processing (NLP) models. -The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. - -{ref}/put-trained-model-vocabulary.html[Endpoint documentation] -[source,ts] ----- -client.ml.putTrainedModelVocabulary({ model_id, vocabulary }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_id` (string)*: The unique identifier of the trained model. -** *`vocabulary` (string[])*: The model vocabulary, which must not be empty. -** *`merges` (Optional, string[])*: The optional model merges if required by the tokenizer. -** *`scores` (Optional, number[])*: The optional vocabulary value scores if required by the tokenizer. - -[discrete] -==== reset_job -Reset an anomaly detection job. -All model state and results are deleted. The job is ready to start over as if -it had just been created. -It is not currently possible to reset multiple jobs using wildcards or a -comma separated list. - -{ref}/ml-reset-job.html[Endpoint documentation] -[source,ts] ----- -client.ml.resetJob({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: The ID of the job to reset. -** *`wait_for_completion` (Optional, boolean)*: Should this request wait until the operation has completed before -returning. -** *`delete_user_annotations` (Optional, boolean)*: Specifies whether annotations that have been added by the -user should be deleted along with any auto-generated annotations when the job is -reset. - -[discrete] -==== revert_model_snapshot -Revert to a snapshot. -The machine learning features react quickly to anomalous input, learning new -behaviors in data. Highly anomalous input increases the variance in the -models whilst the system learns whether this is a new step-change in behavior -or a one-off event. In the case where this anomalous input is known to be a -one-off, then it might be appropriate to reset the model state to a time -before this event. For example, you might consider reverting to a saved -snapshot after Black Friday or a critical system failure. - -{ref}/ml-revert-snapshot.html[Endpoint documentation] -[source,ts] ----- -client.ml.revertModelSnapshot({ job_id, snapshot_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`snapshot_id` (string)*: You can specify `empty` as the <snapshot_id>. Reverting to the empty -snapshot means the anomaly detection job starts learning a new model from -scratch when it is started. -** *`delete_intervening_results` (Optional, boolean)*: Refer to the description for the `delete_intervening_results` query parameter. - -[discrete] -==== set_upgrade_mode -Set upgrade_mode for ML indices. -Sets a cluster wide upgrade_mode setting that prepares machine learning -indices for an upgrade. -When upgrading your cluster, in some circumstances you must restart your -nodes and reindex your machine learning indices. In those circumstances, -there must be no machine learning jobs running. You can close the machine -learning jobs, do the upgrade, then open all the jobs again. Alternatively, -you can use this API to temporarily halt tasks associated with the jobs and -datafeeds and prevent new jobs from opening. You can also use this API -during upgrades that do not require you to reindex your machine learning -indices, though stopping jobs is not a requirement in that case. -You can see the current value for the upgrade_mode setting by using the get -machine learning info API. - -{ref}/ml-set-upgrade-mode.html[Endpoint documentation] -[source,ts] ----- -client.ml.setUpgradeMode({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`enabled` (Optional, boolean)*: When `true`, it enables `upgrade_mode` which temporarily halts all job -and datafeed tasks and prohibits new job and datafeed tasks from -starting. -** *`timeout` (Optional, string | -1 | 0)*: The time to wait for the request to be completed. - -[discrete] -==== start_data_frame_analytics -Start a data frame analytics job. -A data frame analytics job can be started and stopped multiple times -throughout its lifecycle. -If the destination index does not exist, it is created automatically the -first time you start the data frame analytics job. The -`index.number_of_shards` and `index.number_of_replicas` settings for the -destination index are copied from the source index. If there are multiple -source indices, the destination index copies the highest setting values. The -mappings for the destination index are also copied from the source indices. -If there are any mapping conflicts, the job fails to start. -If the destination index exists, it is used as is. You can therefore set up -the destination index in advance with custom settings and mappings. - -{ref}/start-dfanalytics.html[Endpoint documentation] -[source,ts] ----- -client.ml.startDataFrameAnalytics({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the data frame analytics job. This identifier can contain -lowercase alphanumeric characters (a-z and 0-9), hyphens, and -underscores. It must start and end with alphanumeric characters. -** *`timeout` (Optional, string | -1 | 0)*: Controls the amount of time to wait until the data frame analytics job -starts. - -[discrete] -==== start_datafeed -Start datafeeds. - -A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped -multiple times throughout its lifecycle. - -Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs. - -If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. -If new data was indexed for that exact millisecond between stopping and starting, it will be ignored. - -When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or -update it had at the time of creation or update and runs the query using those same roles. If you provided secondary -authorization headers when you created or updated the datafeed, those credentials are used instead. - -{ref}/ml-start-datafeed.html[Endpoint documentation] -[source,ts] ----- -client.ml.startDatafeed({ datafeed_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`datafeed_id` (string)*: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase -alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric -characters. -** *`end` (Optional, string | Unit)*: Refer to the description for the `end` query parameter. -** *`start` (Optional, string | Unit)*: Refer to the description for the `start` query parameter. -** *`timeout` (Optional, string | -1 | 0)*: Refer to the description for the `timeout` query parameter. - -[discrete] -==== start_trained_model_deployment -Start a trained model deployment. -It allocates the model to every machine learning node. - -{ref}/start-trained-model-deployment.html[Endpoint documentation] -[source,ts] ----- -client.ml.startTrainedModelDeployment({ model_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_id` (string)*: The unique identifier of the trained model. Currently, only PyTorch models are supported. -** *`cache_size` (Optional, number | string)*: The inference cache size (in memory outside the JVM heap) per node for the model. -The default value is the same size as the `model_size_bytes`. To disable the cache, -`0b` can be provided. -** *`deployment_id` (Optional, string)*: A unique identifier for the deployment of the model. -** *`number_of_allocations` (Optional, number)*: The number of model allocations on each node where the model is deployed. -All allocations on a node share the same copy of the model in memory but use -a separate set of threads to evaluate the model. -Increasing this value generally increases the throughput. -If this setting is greater than the number of hardware threads -it will automatically be changed to a value less than the number of hardware threads. -** *`priority` (Optional, Enum("normal" | "low"))*: The deployment priority. -** *`queue_capacity` (Optional, number)*: Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds -this value, new requests are rejected with a 429 error. -** *`threads_per_allocation` (Optional, number)*: Sets the number of threads used by each model allocation during inference. This generally increases -the inference speed. The inference process is a compute-bound process; any number -greater than the number of available hardware threads on the machine does not increase the -inference speed. If this setting is greater than the number of hardware threads -it will automatically be changed to a value less than the number of hardware threads. -** *`timeout` (Optional, string | -1 | 0)*: Specifies the amount of time to wait for the model to deploy. -** *`wait_for` (Optional, Enum("started" | "starting" | "fully_allocated"))*: Specifies the allocation status to wait for before returning. - -[discrete] -==== stop_data_frame_analytics -Stop data frame analytics jobs. -A data frame analytics job can be started and stopped multiple times -throughout its lifecycle. - -{ref}/stop-dfanalytics.html[Endpoint documentation] -[source,ts] ----- -client.ml.stopDataFrameAnalytics({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the data frame analytics job. This identifier can contain -lowercase alphanumeric characters (a-z and 0-9), hyphens, and -underscores. It must start and end with alphanumeric characters. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -1. Contains wildcard expressions and there are no data frame analytics -jobs that match. -2. Contains the _all string or no identifiers and there are no matches. -3. Contains wildcard expressions and there are only partial matches. - -The default value is true, which returns an empty data_frame_analytics -array when there are no matches and the subset of results when there are -partial matches. If this parameter is false, the request returns a 404 -status code when there are no matches or only partial matches. -** *`force` (Optional, boolean)*: If true, the data frame analytics job is stopped forcefully. -** *`timeout` (Optional, string | -1 | 0)*: Controls the amount of time to wait until the data frame analytics job -stops. Defaults to 20 seconds. - -[discrete] -==== stop_datafeed -Stop datafeeds. -A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped -multiple times throughout its lifecycle. - -{ref}/ml-stop-datafeed.html[Endpoint documentation] -[source,ts] ----- -client.ml.stopDatafeed({ datafeed_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`datafeed_id` (string)*: Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated -list of datafeeds or a wildcard expression. You can close all datafeeds by using `_all` or by specifying `*` as -the identifier. -** *`allow_no_match` (Optional, boolean)*: Refer to the description for the `allow_no_match` query parameter. -** *`force` (Optional, boolean)*: Refer to the description for the `force` query parameter. -** *`timeout` (Optional, string | -1 | 0)*: Refer to the description for the `timeout` query parameter. - -[discrete] -==== stop_trained_model_deployment -Stop a trained model deployment. - -{ref}/stop-trained-model-deployment.html[Endpoint documentation] -[source,ts] ----- -client.ml.stopTrainedModelDeployment({ model_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_id` (string)*: The unique identifier of the trained model. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: contains wildcard expressions and there are no deployments that match; -contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and -there are only partial matches. By default, it returns an empty array when there are no matches and the subset of results when there are partial matches. -If `false`, the request returns a 404 status code when there are no matches or only partial matches. -** *`force` (Optional, boolean)*: Forcefully stops the deployment, even if it is used by ingest pipelines. You can't use these pipelines until you -restart the model deployment. - -[discrete] -==== update_data_frame_analytics -Update a data frame analytics job. - -{ref}/update-dfanalytics.html[Endpoint documentation] -[source,ts] ----- -client.ml.updateDataFrameAnalytics({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the data frame analytics job. This identifier can contain -lowercase alphanumeric characters (a-z and 0-9), hyphens, and -underscores. It must start and end with alphanumeric characters. -** *`description` (Optional, string)*: A description of the job. -** *`model_memory_limit` (Optional, string)*: The approximate maximum amount of memory resources that are permitted for -analytical processing. If your `elasticsearch.yml` file contains an -`xpack.ml.max_model_memory_limit` setting, an error occurs when you try -to create data frame analytics jobs that have `model_memory_limit` values -greater than that setting. -** *`max_num_threads` (Optional, number)*: The maximum number of threads to be used by the analysis. Using more -threads may decrease the time necessary to complete the analysis at the -cost of using more CPU. Note that the process may use additional threads -for operational functionality other than the analysis itself. -** *`allow_lazy_start` (Optional, boolean)*: Specifies whether this job can start when there is insufficient machine -learning node capacity for it to be immediately assigned to a node. - -[discrete] -==== update_datafeed -Update a datafeed. -You must stop and start the datafeed for the changes to be applied. -When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at -the time of the update and runs the query using those same roles. If you provide secondary authorization headers, -those credentials are used instead. - -{ref}/ml-update-datafeed.html[Endpoint documentation] -[source,ts] ----- -client.ml.updateDatafeed({ datafeed_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`datafeed_id` (string)*: A numerical character string that uniquely identifies the datafeed. -This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. -It must start and end with alphanumeric characters. -** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, random_sampler, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, time_series, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only -with low cardinality data. -** *`chunking_config` (Optional, { mode, time_span })*: Datafeeds might search over long time periods, for several months or years. This search is split into time -chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of -these time chunks are calculated; it is an advanced configuration option. -** *`delayed_data_check_config` (Optional, { check_window, enabled })*: Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally -search over indices that have already been read in an effort to determine whether any data has subsequently been -added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and -the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time -datafeeds. -** *`frequency` (Optional, string | -1 | 0)*: The interval at which scheduled queries are made while the datafeed runs in real time. The default value is -either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket -span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are -written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value -must be divisible by the interval of the date histogram aggregation. -** *`indices` (Optional, string[])*: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine -learning nodes must have the `remote_cluster_client` role. -** *`indices_options` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, ignore_throttled })*: Specifies index expansion options that are used during search. -** *`job_id` (Optional, string)* -** *`max_empty_searches` (Optional, number)*: If a real-time datafeed has never seen any data (including during any initial training period), it automatically -stops and closes the associated job after this many real-time searches return no documents. In other words, -it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no -end time that sees no data remains started until it is explicitly stopped. By default, it is not set. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an -Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this -object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also -changed. Therefore, the time required to learn might be long and the understandability of the results is -unpredictable. If you want to make significant changes to the source data, it is recommended that you -clone the job and datafeed and make the amendments in the clone. Let both run in parallel and close one -when you are satisfied with the results of the job. -** *`query_delay` (Optional, string | -1 | 0)*: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might -not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default -value is randomly selected between `60s` and `120s`. This randomness improves the query performance -when there are multiple jobs running on the same node. -** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Specifies runtime fields for the datafeed search. -** *`script_fields` (Optional, Record<string, { script, ignore_failure }>)*: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. -The detector configuration objects in a job can contain functions that use these script fields. -** *`scroll_size` (Optional, number)*: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. -The maximum value is the value of `index.max_result_window`. -** *`allow_no_indices` (Optional, boolean)*: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the -`_all` string or when no indices are specified. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines -whether wildcard expressions match hidden data streams. Supports a list of values. Valid values are: - -* `all`: Match any data stream or index, including hidden ones. -* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. -* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. -* `none`: Wildcard patterns are not accepted. -* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. -** *`ignore_throttled` (Optional, boolean)*: If `true`, concrete, expanded or aliased indices are ignored when frozen. -** *`ignore_unavailable` (Optional, boolean)*: If `true`, unavailable indices (missing or closed) are ignored. - -[discrete] -==== update_filter -Update a filter. -Updates the description of a filter, adds items, or removes items from the list. - -{ref}/ml-update-filter.html[Endpoint documentation] -[source,ts] ----- -client.ml.updateFilter({ filter_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`filter_id` (string)*: A string that uniquely identifies a filter. -** *`add_items` (Optional, string[])*: The items to add to the filter. -** *`description` (Optional, string)*: A description for the filter. -** *`remove_items` (Optional, string[])*: The items to remove from the filter. - -[discrete] -==== update_job -Update an anomaly detection job. -Updates certain properties of an anomaly detection job. - -{ref}/ml-update-job.html[Endpoint documentation] -[source,ts] ----- -client.ml.updateJob({ job_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the job. -** *`allow_lazy_open` (Optional, boolean)*: Advanced configuration option. Specifies whether this job can open when -there is insufficient machine learning node capacity for it to be -immediately assigned to a node. If `false` and a machine learning node -with capacity to run the job cannot immediately be found, the open -anomaly detection jobs API returns an error. However, this is also -subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this -option is set to `true`, the open anomaly detection jobs API does not -return an error and the job waits in the opening state until sufficient -machine learning node capacity is available. -** *`analysis_limits` (Optional, { model_memory_limit })* -** *`background_persist_interval` (Optional, string | -1 | 0)*: Advanced configuration option. The time between each periodic persistence -of the model. -The default value is a randomized value between 3 to 4 hours, which -avoids all jobs persisting at exactly the same time. The smallest allowed -value is 1 hour. -For very large models (several GB), persistence could take 10-20 minutes, -so do not set the value too low. -If the job is open when you make the update, you must stop the datafeed, -close the job, then reopen the job and restart the datafeed for the -changes to take effect. -** *`custom_settings` (Optional, Record<string, User-defined value>)*: Advanced configuration option. Contains custom meta data about the job. -For example, it can contain custom URL information as shown in Adding -custom URLs to machine learning results. -** *`categorization_filters` (Optional, string[])* -** *`description` (Optional, string)*: A description of the job. -** *`model_plot_config` (Optional, { annotations_enabled, enabled, terms })* -** *`model_prune_window` (Optional, string | -1 | 0)* -** *`daily_model_snapshot_retention_after_days` (Optional, number)*: Advanced configuration option, which affects the automatic removal of old -model snapshots for this job. It specifies a period of time (in days) -after which only the first snapshot per day is retained. This period is -relative to the timestamp of the most recent snapshot for this job. Valid -values range from 0 to `model_snapshot_retention_days`. For jobs created -before version 7.8.0, the default value matches -`model_snapshot_retention_days`. -** *`model_snapshot_retention_days` (Optional, number)*: Advanced configuration option, which affects the automatic removal of old -model snapshots for this job. It specifies the maximum period of time (in -days) that snapshots are retained. This period is relative to the -timestamp of the most recent snapshot for this job. -** *`renormalization_window_days` (Optional, number)*: Advanced configuration option. The period over which adjustments to the -score are applied, as new data is seen. -** *`results_retention_days` (Optional, number)*: Advanced configuration option. The period of time (in days) that results -are retained. Age is calculated relative to the timestamp of the latest -bucket result. If this property has a non-null value, once per day at -00:30 (server time), results that are the specified number of days older -than the latest bucket result are deleted from Elasticsearch. The default -value is null, which means all results are retained. -** *`groups` (Optional, string[])*: A list of job groups. A job can belong to no groups or many. -** *`detectors` (Optional, { by_field_name, custom_rules, detector_description, detector_index, exclude_frequent, field_name, function, over_field_name, partition_field_name, use_null }[])*: An array of detector update objects. -** *`per_partition_categorization` (Optional, { enabled, stop_on_warn })*: Settings related to how categorization interacts with partition fields. - -[discrete] -==== update_model_snapshot -Update a snapshot. -Updates certain properties of a snapshot. - -{ref}/ml-update-snapshot.html[Endpoint documentation] -[source,ts] ----- -client.ml.updateModelSnapshot({ job_id, snapshot_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`snapshot_id` (string)*: Identifier for the model snapshot. -** *`description` (Optional, string)*: A description of the model snapshot. -** *`retain` (Optional, boolean)*: If `true`, this snapshot will not be deleted during automatic cleanup of -snapshots older than `model_snapshot_retention_days`. However, this -snapshot will be deleted when the job is deleted. - -[discrete] -==== update_trained_model_deployment -Update a trained model deployment. - -{ref}/update-trained-model-deployment.html[Endpoint documentation] -[source,ts] ----- -client.ml.updateTrainedModelDeployment({ model_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`model_id` (string)*: The unique identifier of the trained model. Currently, only PyTorch models are supported. -** *`number_of_allocations` (Optional, number)*: The number of model allocations on each node where the model is deployed. -All allocations on a node share the same copy of the model in memory but use -a separate set of threads to evaluate the model. -Increasing this value generally increases the throughput. -If this setting is greater than the number of hardware threads -it will automatically be changed to a value less than the number of hardware threads. - -[discrete] -==== upgrade_job_snapshot -Upgrade a snapshot. -Upgrades an anomaly detection model snapshot to the latest major version. -Over time, older snapshot formats are deprecated and removed. Anomaly -detection jobs support only snapshots that are from the current or previous -major version. -This API provides a means to upgrade a snapshot to the current major version. -This aids in preparing the cluster for an upgrade to the next major version. -Only one snapshot per anomaly detection job can be upgraded at a time and the -upgraded snapshot cannot be the current snapshot of the anomaly detection -job. - -{ref}/ml-upgrade-job-model-snapshot.html[Endpoint documentation] -[source,ts] ----- -client.ml.upgradeJobSnapshot({ job_id, snapshot_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`job_id` (string)*: Identifier for the anomaly detection job. -** *`snapshot_id` (string)*: A numerical character string that uniquely identifies the model snapshot. -** *`wait_for_completion` (Optional, boolean)*: When true, the API won’t respond until the upgrade is complete. -Otherwise, it responds as soon as the upgrade task is assigned to a node. -** *`timeout` (Optional, string | -1 | 0)*: Controls the time to wait for the request to complete. - -[discrete] -=== monitoring -[discrete] -==== bulk -Used by the monitoring features to send monitoring data. - -{ref}/monitor-elasticsearch-cluster.html[Endpoint documentation] -[source,ts] ----- -client.monitoring.bulk({ system_id, system_api_version, interval }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`system_id` (string)*: Identifier of the monitored system -** *`system_api_version` (string)* -** *`interval` (string | -1 | 0)*: Collection interval (e.g., '10s' or '10000ms') of the payload -** *`type` (Optional, string)*: Default document type for items which don't provide one -** *`operations` (Optional, { index, create, update, delete } | { detect_noop, doc, doc_as_upsert, script, scripted_upsert, _source, upsert } | object[])* - -[discrete] -=== nodes -[discrete] -==== clear_repositories_metering_archive -You can use this API to clear the archived repositories metering information in the cluster. - -{ref}/clear-repositories-metering-archive-api.html[Endpoint documentation] -[source,ts] ----- -client.nodes.clearRepositoriesMeteringArchive({ node_id, max_archive_version }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (string | string[])*: List of node IDs or names used to limit returned information. -All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). -** *`max_archive_version` (number)*: Specifies the maximum [archive_version](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-repositories-metering-api.html#get-repositories-metering-api-response-body) to be cleared from the archive. - -[discrete] -==== get_repositories_metering_info -You can use the cluster repositories metering API to retrieve repositories metering information in a cluster. -This API exposes monotonically non-decreasing counters and it’s expected that clients would durably store the -information needed to compute aggregations over a period of time. Additionally, the information exposed by this -API is volatile, meaning that it won’t be present after node restarts. - -{ref}/get-repositories-metering-api.html[Endpoint documentation] -[source,ts] ----- -client.nodes.getRepositoriesMeteringInfo({ node_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (string | string[])*: List of node IDs or names used to limit returned information. -All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). - -[discrete] -==== hot_threads -This API yields a breakdown of the hot threads on each selected node in the cluster. -The output is plain text with a breakdown of each node’s top hot threads. - -{ref}/cluster-nodes-hot-threads.html[Endpoint documentation] -[source,ts] ----- -client.nodes.hotThreads({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (Optional, string | string[])*: List of node IDs or names used to limit returned information. -** *`ignore_idle_threads` (Optional, boolean)*: If true, known idle threads (e.g. waiting in a socket select, or to get -a task from an empty queue) are filtered out. -** *`interval` (Optional, string | -1 | 0)*: The interval to do the second sampling of threads. -** *`snapshots` (Optional, number)*: Number of samples of thread stacktrace. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response -is received before the timeout expires, the request fails and -returns an error. -** *`threads` (Optional, number)*: Specifies the number of hot threads to provide information for. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received -before the timeout expires, the request fails and returns an error. -** *`type` (Optional, Enum("cpu" | "wait" | "block" | "gpu" | "mem"))*: The type to sample. -** *`sort` (Optional, Enum("cpu" | "wait" | "block" | "gpu" | "mem"))*: The sort order for 'cpu' type (default: total) - -[discrete] -==== info -Returns cluster nodes information. - -{ref}/cluster-nodes-info.html[Endpoint documentation] -[source,ts] ----- -client.nodes.info({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (Optional, string | string[])*: List of node IDs or names used to limit returned information. -** *`metric` (Optional, string | string[])*: Limits the information returned to the specific metrics. Supports a list, such as http,ingest. -** *`flat_settings` (Optional, boolean)*: If true, returns settings in flat format. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== reload_secure_settings -Reloads the keystore on nodes in the cluster. - -{ref}/secure-settings.html[Endpoint documentation] -[source,ts] ----- -client.nodes.reloadSecureSettings({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (Optional, string | string[])*: The names of particular nodes in the cluster to target. -** *`secure_settings_password` (Optional, string)*: The password for the Elasticsearch keystore. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== stats -Returns cluster nodes statistics. - -{ref}/cluster-nodes-stats.html[Endpoint documentation] -[source,ts] ----- -client.nodes.stats({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (Optional, string | string[])*: List of node IDs or names used to limit returned information. -** *`metric` (Optional, string | string[])*: Limit the information returned to the specified metrics -** *`index_metric` (Optional, string | string[])*: Limit the information returned for indices metric to the specific index metrics. It can be used only if indices (or all) metric is specified. -** *`completion_fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in fielddata and suggest statistics. -** *`fielddata_fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in fielddata statistics. -** *`fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in the statistics. -** *`groups` (Optional, boolean)*: List of search groups to include in the search statistics. -** *`include_segment_file_sizes` (Optional, boolean)*: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). -** *`level` (Optional, Enum("cluster" | "indices" | "shards"))*: Indicates whether statistics are aggregated at the cluster, index, or shard level. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -** *`types` (Optional, string[])*: A list of document types for the indexing index metric. -** *`include_unloaded_segments` (Optional, boolean)*: If `true`, the response includes information from segments that are not loaded into memory. - -[discrete] -==== usage -Returns information on the usage of features. - -{ref}/cluster-nodes-usage.html[Endpoint documentation] -[source,ts] ----- -client.nodes.usage({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (Optional, string | string[])*: A list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes -** *`metric` (Optional, string | string[])*: Limits the information returned to the specific metrics. -A list of the following options: `_all`, `rest_actions`. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -=== query_rules -[discrete] -==== delete_rule -Delete a query rule. -Delete a query rule within a query ruleset. - -{ref}/delete-query-rule.html[Endpoint documentation] -[source,ts] ----- -client.queryRules.deleteRule({ ruleset_id, rule_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`ruleset_id` (string)*: The unique identifier of the query ruleset containing the rule to delete -** *`rule_id` (string)*: The unique identifier of the query rule within the specified ruleset to delete - -[discrete] -==== delete_ruleset -Delete a query ruleset. - -{ref}/delete-query-ruleset.html[Endpoint documentation] -[source,ts] ----- -client.queryRules.deleteRuleset({ ruleset_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`ruleset_id` (string)*: The unique identifier of the query ruleset to delete - -[discrete] -==== get_rule -Get a query rule. -Get details about a query rule within a query ruleset. - -{ref}/get-query-rule.html[Endpoint documentation] -[source,ts] ----- -client.queryRules.getRule({ ruleset_id, rule_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`ruleset_id` (string)*: The unique identifier of the query ruleset containing the rule to retrieve -** *`rule_id` (string)*: The unique identifier of the query rule within the specified ruleset to retrieve - -[discrete] -==== get_ruleset -Get a query ruleset. -Get details about a query ruleset. - -{ref}/get-query-ruleset.html[Endpoint documentation] -[source,ts] ----- -client.queryRules.getRuleset({ ruleset_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`ruleset_id` (string)*: The unique identifier of the query ruleset - -[discrete] -==== list_rulesets -Get all query rulesets. -Get summarized information about the query rulesets. - -{ref}/list-query-rulesets.html[Endpoint documentation] -[source,ts] ----- -client.queryRules.listRulesets({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`from` (Optional, number)*: Starting offset (default: 0) -** *`size` (Optional, number)*: specifies a max number of results to get - -[discrete] -==== put_rule -Create or update a query rule. -Create or update a query rule within a query ruleset. - -{ref}/put-query-rule.html[Endpoint documentation] -[source,ts] ----- -client.queryRules.putRule({ ruleset_id, rule_id, type, criteria, actions }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`ruleset_id` (string)*: The unique identifier of the query ruleset containing the rule to be created or updated -** *`rule_id` (string)*: The unique identifier of the query rule within the specified ruleset to be created or updated -** *`type` (Enum("pinned" | "exclude"))* -** *`criteria` ({ type, metadata, values } | { type, metadata, values }[])* -** *`actions` ({ ids, docs })* -** *`priority` (Optional, number)* - -[discrete] -==== put_ruleset -Create or update a query ruleset. - -{ref}/put-query-ruleset.html[Endpoint documentation] -[source,ts] ----- -client.queryRules.putRuleset({ ruleset_id, rules }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`ruleset_id` (string)*: The unique identifier of the query ruleset to be created or updated -** *`rules` ({ rule_id, type, criteria, actions, priority } | { rule_id, type, criteria, actions, priority }[])* - -[discrete] -==== test -Test a query ruleset. -Evaluate match criteria against a query ruleset to identify the rules that would match that criteria. - -{ref}/test-query-ruleset.html[Endpoint documentation] -[source,ts] ----- -client.queryRules.test({ ruleset_id, match_criteria }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`ruleset_id` (string)*: The unique identifier of the query ruleset to be created or updated -** *`match_criteria` (Record<string, User-defined value>)* - -[discrete] -=== rollup -[discrete] -==== delete_job -Deletes an existing rollup job. - -{ref}/rollup-delete-job.html[Endpoint documentation] -[source,ts] ----- -client.rollup.deleteJob({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the job. - -[discrete] -==== get_jobs -Retrieves the configuration, stats, and status of rollup jobs. - -{ref}/rollup-get-job.html[Endpoint documentation] -[source,ts] ----- -client.rollup.getJobs({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string)*: Identifier for the rollup job. -If it is `_all` or omitted, the API returns all rollup jobs. - -[discrete] -==== get_rollup_caps -Returns the capabilities of any rollup jobs that have been configured for a specific index or index pattern. - -{ref}/rollup-get-rollup-caps.html[Endpoint documentation] -[source,ts] ----- -client.rollup.getRollupCaps({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string)*: Index, indices or index-pattern to return rollup capabilities for. -`_all` may be used to fetch rollup capabilities from all jobs. - -[discrete] -==== get_rollup_index_caps -Returns the rollup capabilities of all jobs inside of a rollup index (for example, the index where rollup data is stored). - -{ref}/rollup-get-rollup-index-caps.html[Endpoint documentation] -[source,ts] ----- -client.rollup.getRollupIndexCaps({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: Data stream or index to check for rollup capabilities. -Wildcard (`*`) expressions are supported. - -[discrete] -==== put_job -Creates a rollup job. - -{ref}/rollup-put-job.html[Endpoint documentation] -[source,ts] ----- -client.rollup.putJob({ id, cron, groups, index_pattern, page_size, rollup_index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the -data that is associated with the rollup job. The ID is persistent; it is stored with the rolled -up data. If you create a job, let it run for a while, then delete the job, the data that the job -rolled up is still be associated with this job ID. You cannot create a new job with the same ID -since that could lead to problems with mismatched job configurations. -** *`cron` (string)*: A cron string which defines the intervals when the rollup job should be executed. When the interval -triggers, the indexer attempts to rollup the data in the index pattern. The cron pattern is unrelated -to the time interval of the data being rolled up. For example, you may wish to create hourly rollups -of your document but to only run the indexer on a daily basis at midnight, as defined by the cron. The -cron pattern is defined just like a Watcher cron schedule. -** *`groups` ({ date_histogram, histogram, terms })*: Defines the grouping fields and aggregations that are defined for this rollup job. These fields will then be -available later for aggregating into buckets. These aggs and fields can be used in any combination. Think of -the groups configuration as defining a set of tools that can later be used in aggregations to partition the -data. Unlike raw data, we have to think ahead to which fields and aggregations might be used. Rollups provide -enough flexibility that you simply need to determine which fields are needed, not in what order they are needed. -** *`index_pattern` (string)*: The index or index pattern to roll up. Supports wildcard-style patterns (`logstash-*`). The job attempts to -rollup the entire index or index-pattern. -** *`page_size` (number)*: The number of bucket results that are processed on each iteration of the rollup indexer. A larger value tends -to execute faster, but requires more memory during processing. This value has no effect on how the data is -rolled up; it is merely used for tweaking the speed or memory cost of the indexer. -** *`rollup_index` (string)*: The index that contains the rollup results. The index can be shared with other rollup jobs. The data is stored so that it doesn’t interfere with unrelated jobs. -** *`metrics` (Optional, { field, metrics }[])*: Defines the metrics to collect for each grouping tuple. By default, only the doc_counts are collected for each -group. To make rollup useful, you will often add metrics like averages, mins, maxes, etc. Metrics are defined -on a per-field basis and for each field you configure which metric should be collected. -** *`timeout` (Optional, string | -1 | 0)*: Time to wait for the request to complete. -** *`headers` (Optional, Record<string, string | string[]>)* - -[discrete] -==== rollup_search -Enables searching rolled-up data using the standard Query DSL. - -{ref}/rollup-search.html[Endpoint documentation] -[source,ts] ----- -client.rollup.rollupSearch({ index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (string | string[])*: Enables searching rolled-up data using the standard Query DSL. -** *`aggregations` (Optional, Record<string, { aggregations, meta, adjacency_matrix, auto_date_histogram, avg, avg_bucket, boxplot, bucket_script, bucket_selector, bucket_sort, bucket_count_ks_test, bucket_correlation, cardinality, categorize_text, children, composite, cumulative_cardinality, cumulative_sum, date_histogram, date_range, derivative, diversified_sampler, extended_stats, extended_stats_bucket, frequent_item_sets, filter, filters, geo_bounds, geo_centroid, geo_distance, geohash_grid, geo_line, geotile_grid, geohex_grid, global, histogram, ip_range, ip_prefix, inference, line, matrix_stats, max, max_bucket, median_absolute_deviation, min, min_bucket, missing, moving_avg, moving_percentiles, moving_fn, multi_terms, nested, normalize, parent, percentile_ranks, percentiles, percentiles_bucket, range, rare_terms, rate, reverse_nested, random_sampler, sampler, scripted_metric, serial_diff, significant_terms, significant_text, stats, stats_bucket, string_stats, sum, sum_bucket, terms, time_series, top_hits, t_test, top_metrics, value_count, weighted_avg, variable_width_histogram }>)*: Specifies aggregations. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies a DSL query. -** *`size` (Optional, number)*: Must be zero if set, as rollups work on pre-aggregated data. -** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response -** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response - -[discrete] -==== start_job -Starts an existing, stopped rollup job. - -{ref}/rollup-start-job.html[Endpoint documentation] -[source,ts] ----- -client.rollup.startJob({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the rollup job. - -[discrete] -==== stop_job -Stops an existing, started rollup job. - -{ref}/rollup-stop-job.html[Endpoint documentation] -[source,ts] ----- -client.rollup.stopJob({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the rollup job. -** *`timeout` (Optional, string | -1 | 0)*: If `wait_for_completion` is `true`, the API blocks for (at maximum) the specified duration while waiting for the job to stop. -If more than `timeout` time has passed, the API throws a timeout exception. -** *`wait_for_completion` (Optional, boolean)*: If set to `true`, causes the API to block until the indexer state completely stops. -If set to `false`, the API returns immediately and the indexer is stopped asynchronously in the background. - -[discrete] -=== search_application -[discrete] -==== delete -Delete a search application. -Remove a search application and its associated alias. Indices attached to the search application are not removed. - -{ref}/delete-search-application.html[Endpoint documentation] -[source,ts] ----- -client.searchApplication.delete({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the search application to delete - -[discrete] -==== delete_behavioral_analytics -Delete a behavioral analytics collection. -The associated data stream is also deleted. - -{ref}/delete-analytics-collection.html[Endpoint documentation] -[source,ts] ----- -client.searchApplication.deleteBehavioralAnalytics({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the analytics collection to be deleted - -[discrete] -==== get -Get search application details. - -{ref}/get-search-application.html[Endpoint documentation] -[source,ts] ----- -client.searchApplication.get({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the search application - -[discrete] -==== get_behavioral_analytics -Get behavioral analytics collections. - -{ref}/list-analytics-collection.html[Endpoint documentation] -[source,ts] ----- -client.searchApplication.getBehavioralAnalytics({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string[])*: A list of analytics collections to limit the returned information - -[discrete] -==== list -Returns the existing search applications. - -{ref}/list-search-applications.html[Endpoint documentation] -[source,ts] ----- -client.searchApplication.list({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`q` (Optional, string)*: Query in the Lucene query string syntax. -** *`from` (Optional, number)*: Starting offset. -** *`size` (Optional, number)*: Specifies a max number of results to get. - -[discrete] -==== post_behavioral_analytics_event -Creates a behavioral analytics event for existing collection. - -http://todo.com/tbd[Endpoint documentation] -[source,ts] ----- -client.searchApplication.postBehavioralAnalyticsEvent() ----- - - -[discrete] -==== put -Create or update a search application. - -{ref}/put-search-application.html[Endpoint documentation] -[source,ts] ----- -client.searchApplication.put({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the search application to be created or updated. -** *`search_application` (Optional, { indices, analytics_collection_name, template })* -** *`create` (Optional, boolean)*: If `true`, this request cannot replace or update existing Search Applications. - -[discrete] -==== put_behavioral_analytics -Create a behavioral analytics collection. - -{ref}/put-analytics-collection.html[Endpoint documentation] -[source,ts] ----- -client.searchApplication.putBehavioralAnalytics({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the analytics collection to be created or updated. - -[discrete] -==== render_query -Renders a query for given search application search parameters - -{ref}/search-application-render-query.html[Endpoint documentation] -[source,ts] ----- -client.searchApplication.renderQuery() ----- - - -[discrete] -==== search -Run a search application search. -Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. -Unspecified template parameters are assigned their default values if applicable. - -{ref}/search-application-search.html[Endpoint documentation] -[source,ts] ----- -client.searchApplication.search({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the search application to be searched. -** *`params` (Optional, Record<string, User-defined value>)*: Query parameters specific to this request, which will override any defaults specified in the template. -** *`typed_keys` (Optional, boolean)*: Determines whether aggregation names are prefixed by their respective types in the response. - -[discrete] -=== searchable_snapshots -[discrete] -==== cache_stats -Retrieve node-level cache statistics about searchable snapshots. - -{ref}/searchable-snapshots-apis.html[Endpoint documentation] -[source,ts] ----- -client.searchableSnapshots.cacheStats({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (Optional, string | string[])*: A list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes -** *`master_timeout` (Optional, string | -1 | 0)* - -[discrete] -==== clear_cache -Clear the cache of searchable snapshots. - -{ref}/searchable-snapshots-apis.html[Endpoint documentation] -[source,ts] ----- -client.searchableSnapshots.clearCache({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: A list of index names -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`pretty` (Optional, boolean)* -** *`human` (Optional, boolean)* - -[discrete] -==== mount -Mount a snapshot as a searchable index. - -{ref}/searchable-snapshots-api-mount-snapshot.html[Endpoint documentation] -[source,ts] ----- -client.searchableSnapshots.mount({ repository, snapshot, index }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`repository` (string)*: The name of the repository containing the snapshot of the index to mount -** *`snapshot` (string)*: The name of the snapshot of the index to mount -** *`index` (string)* -** *`renamed_index` (Optional, string)* -** *`index_settings` (Optional, Record<string, User-defined value>)* -** *`ignore_index_settings` (Optional, string[])* -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node -** *`wait_for_completion` (Optional, boolean)*: Should this request wait until the operation has completed before returning -** *`storage` (Optional, string)*: Selects the kind of local storage used to accelerate searches. Experimental, and defaults to `full_copy` - -[discrete] -==== stats -Retrieve shard-level statistics about searchable snapshots. - -{ref}/searchable-snapshots-apis.html[Endpoint documentation] -[source,ts] ----- -client.searchableSnapshots.stats({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`index` (Optional, string | string[])*: A list of index names -** *`level` (Optional, Enum("cluster" | "indices" | "shards"))*: Return stats aggregated at cluster, index or shard level - -[discrete] -=== security -[discrete] -==== activate_user_profile -Activate a user profile. - -Create or update a user profile on behalf of another user. - -{ref}/security-api-activate-user-profile.html[Endpoint documentation] -[source,ts] ----- -client.security.activateUserProfile({ grant_type }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`grant_type` (Enum("password" | "access_token"))* -** *`access_token` (Optional, string)* -** *`password` (Optional, string)* -** *`username` (Optional, string)* - -[discrete] -==== authenticate -Authenticate a user. - -Authenticates a user and returns information about the authenticated user. -Include the user information in a [basic auth header](https://en.wikipedia.org/wiki/Basic_access_authentication). -A successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user. -If the user cannot be authenticated, this API returns a 401 status code. - -{ref}/security-api-authenticate.html[Endpoint documentation] -[source,ts] ----- -client.security.authenticate() ----- - - -[discrete] -==== bulk_delete_role -Bulk delete roles. - -The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. -The bulk delete roles API cannot delete roles that are defined in roles files. - -{ref}/security-api-bulk-delete-role.html[Endpoint documentation] -[source,ts] ----- -client.security.bulkDeleteRole({ names }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`names` (string[])*: An array of role names to delete -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== bulk_put_role -Bulk create or update roles. - -The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. -The bulk create or update roles API cannot update roles that are defined in roles files. - -{ref}/security-api-bulk-put-role.html[Endpoint documentation] -[source,ts] ----- -client.security.bulkPutRole({ roles }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`roles` (Record<string, { cluster, indices, remote_indices, remote_cluster, global, applications, metadata, run_as, description, restriction, transient_metadata }>)*: A dictionary of role name to RoleDescriptor objects to add or update -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== bulk_update_api_keys -Updates the attributes of multiple existing API keys. - -{ref}/security-api-bulk-update-api-keys.html[Endpoint documentation] -[source,ts] ----- -client.security.bulkUpdateApiKeys() ----- - - -[discrete] -==== change_password -Change passwords. - -Change the passwords of users in the native realm and built-in users. - -{ref}/security-api-change-password.html[Endpoint documentation] -[source,ts] ----- -client.security.changePassword({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`username` (Optional, string)*: The user whose password you want to change. If you do not specify this -parameter, the password is changed for the current user. -** *`password` (Optional, string)*: The new password value. Passwords must be at least 6 characters long. -** *`password_hash` (Optional, string)*: A hash of the new password value. This must be produced using the same -hashing algorithm as has been configured for password storage. For more details, -see the explanation of the `xpack.security.authc.password_hashing.algorithm` -setting. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== clear_api_key_cache -Clear the API key cache. - -Evict a subset of all entries from the API key cache. -The cache is also automatically cleared on state changes of the security index. - -{ref}/security-api-clear-api-key-cache.html[Endpoint documentation] -[source,ts] ----- -client.security.clearApiKeyCache({ ids }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`ids` (string | string[])*: List of API key IDs to evict from the API key cache. -To evict all API keys, use `*`. -Does not support other wildcard patterns. - -[discrete] -==== clear_cached_privileges -Clear the privileges cache. - -Evict privileges from the native application privilege cache. -The cache is also automatically cleared for applications that have their privileges updated. - -{ref}/security-api-clear-privilege-cache.html[Endpoint documentation] -[source,ts] ----- -client.security.clearCachedPrivileges({ application }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`application` (string)*: A list of application names - -[discrete] -==== clear_cached_realms -Clear the user cache. - -Evict users from the user cache. You can completely clear the cache or evict specific users. - -{ref}/security-api-clear-cache.html[Endpoint documentation] -[source,ts] ----- -client.security.clearCachedRealms({ realms }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`realms` (string | string[])*: List of realms to clear -** *`usernames` (Optional, string[])*: List of usernames to clear from the cache - -[discrete] -==== clear_cached_roles -Clear the roles cache. - -Evict roles from the native role cache. - -{ref}/security-api-clear-role-cache.html[Endpoint documentation] -[source,ts] ----- -client.security.clearCachedRoles({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string | string[])*: Role name - -[discrete] -==== clear_cached_service_tokens -Clear service account token caches. - -Evict a subset of all entries from the service account token caches. - -{ref}/security-api-clear-service-token-caches.html[Endpoint documentation] -[source,ts] ----- -client.security.clearCachedServiceTokens({ namespace, service, name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`namespace` (string)*: An identifier for the namespace -** *`service` (string)*: An identifier for the service name -** *`name` (string | string[])*: A list of service token names - -[discrete] -==== create_api_key -Create an API key. - -Create an API key for access without requiring basic authentication. -A successful request returns a JSON structure that contains the API key, its unique id, and its name. -If applicable, it also returns expiration information for the API key in milliseconds. -NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys. - -{ref}/security-api-create-api-key.html[Endpoint documentation] -[source,ts] ----- -client.security.createApiKey({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`expiration` (Optional, string | -1 | 0)*: Expiration time for the API key. By default, API keys never expire. -** *`name` (Optional, string)*: Specifies the name for this API key. -** *`role_descriptors` (Optional, Record<string, { cluster, indices, remote_indices, remote_cluster, global, applications, metadata, run_as, description, restriction, transient_metadata }>)*: An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API. -** *`metadata` (Optional, Record<string, User-defined value>)*: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== create_cross_cluster_api_key -Create a cross-cluster API key. - -Create an API key of the `cross_cluster` type for the API key based remote cluster access. -A `cross_cluster` API key cannot be used to authenticate through the REST interface. - -IMPORTANT: To authenticate this request you must use a credential that is not an API key. Even if you use an API key that has the required privilege, the API returns an error. - -Cross-cluster API keys are created by the Elasticsearch API key service, which is automatically enabled. - -NOTE: Unlike REST API keys, a cross-cluster API key does not capture permissions of the authenticated user. The API key’s effective permission is exactly as specified with the `access` property. - -A successful request returns a JSON structure that contains the API key, its unique ID, and its name. If applicable, it also returns expiration information for the API key in milliseconds. - -By default, API keys never expire. You can specify expiration information when you create the API keys. - -Cross-cluster API keys can only be updated with the update cross-cluster API key API. -Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error. - -{ref}/security-api-create-cross-cluster-api-key.html[Endpoint documentation] -[source,ts] ----- -client.security.createCrossClusterApiKey({ access, name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`access` ({ replication, search })*: The access to be granted to this API key. -The access is composed of permissions for cross-cluster search and cross-cluster replication. -At least one of them must be specified. - -NOTE: No explicit privileges should be specified for either search or replication access. -The creation process automatically converts the access specification to a role descriptor which has relevant privileges assigned accordingly. -** *`name` (string)*: Specifies the name for this API key. -** *`expiration` (Optional, string | -1 | 0)*: Expiration time for the API key. -By default, API keys never expire. -** *`metadata` (Optional, Record<string, User-defined value>)*: Arbitrary metadata that you want to associate with the API key. -It supports nested data structure. -Within the metadata object, keys beginning with `_` are reserved for system usage. - -[discrete] -==== create_service_token -Create a service account token. - -Create a service accounts token for access without requiring basic authentication. - -{ref}/security-api-create-service-token.html[Endpoint documentation] -[source,ts] ----- -client.security.createServiceToken({ namespace, service }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`namespace` (string)*: An identifier for the namespace -** *`service` (string)*: An identifier for the service name -** *`name` (Optional, string)*: An identifier for the token name -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== delete_privileges -Delete application privileges. - -{ref}/security-api-delete-privilege.html[Endpoint documentation] -[source,ts] ----- -client.security.deletePrivileges({ application, name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`application` (string)*: Application name -** *`name` (string | string[])*: Privilege name -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== delete_role -Delete roles. - -Delete roles in the native realm. - -{ref}/security-api-delete-role.html[Endpoint documentation] -[source,ts] ----- -client.security.deleteRole({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: Role name -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== delete_role_mapping -Delete role mappings. - -{ref}/security-api-delete-role-mapping.html[Endpoint documentation] -[source,ts] ----- -client.security.deleteRoleMapping({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: Role-mapping name -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== delete_service_token -Delete service account tokens. - -Delete service account tokens for a service in a specified namespace. - -{ref}/security-api-delete-service-token.html[Endpoint documentation] -[source,ts] ----- -client.security.deleteServiceToken({ namespace, service, name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`namespace` (string)*: An identifier for the namespace -** *`service` (string)*: An identifier for the service name -** *`name` (string)*: An identifier for the token name -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== delete_user -Delete users. - -Delete users from the native realm. - -{ref}/security-api-delete-user.html[Endpoint documentation] -[source,ts] ----- -client.security.deleteUser({ username }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`username` (string)*: username -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== disable_user -Disable users. - -Disable users in the native realm. - -{ref}/security-api-disable-user.html[Endpoint documentation] -[source,ts] ----- -client.security.disableUser({ username }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`username` (string)*: The username of the user to disable -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== disable_user_profile -Disable a user profile. - -Disable user profiles so that they are not visible in user profile searches. - -{ref}/security-api-disable-user-profile.html[Endpoint documentation] -[source,ts] ----- -client.security.disableUserProfile({ uid }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`uid` (string)*: Unique identifier for the user profile. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If 'true', Elasticsearch refreshes the affected shards to make this operation -visible to search, if 'wait_for' then wait for a refresh to make this operation -visible to search, if 'false' do nothing with refreshes. - -[discrete] -==== enable_user -Enable users. - -Enable users in the native realm. - -{ref}/security-api-enable-user.html[Endpoint documentation] -[source,ts] ----- -client.security.enableUser({ username }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`username` (string)*: The username of the user to enable -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== enable_user_profile -Enable a user profile. - -Enable user profiles to make them visible in user profile searches. - -{ref}/security-api-enable-user-profile.html[Endpoint documentation] -[source,ts] ----- -client.security.enableUserProfile({ uid }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`uid` (string)*: Unique identifier for the user profile. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If 'true', Elasticsearch refreshes the affected shards to make this operation -visible to search, if 'wait_for' then wait for a refresh to make this operation -visible to search, if 'false' do nothing with refreshes. - -[discrete] -==== enroll_kibana -Enroll Kibana. - -Enable a Kibana instance to configure itself for communication with a secured Elasticsearch cluster. - -{ref}/security-api-kibana-enrollment.html[Endpoint documentation] -[source,ts] ----- -client.security.enrollKibana() ----- - - -[discrete] -==== enroll_node -Enroll a node. - -Enroll a new node to allow it to join an existing cluster with security features enabled. - -{ref}/security-api-node-enrollment.html[Endpoint documentation] -[source,ts] ----- -client.security.enrollNode() ----- - - -[discrete] -==== get_api_key -Get API key information. - -Retrieves information for one or more API keys. -NOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. -If you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. - -{ref}/security-api-get-api-key.html[Endpoint documentation] -[source,ts] ----- -client.security.getApiKey({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string)*: An API key id. -This parameter cannot be used with any of `name`, `realm_name` or `username`. -** *`name` (Optional, string)*: An API key name. -This parameter cannot be used with any of `id`, `realm_name` or `username`. -It supports prefix search with wildcard. -** *`owner` (Optional, boolean)*: A boolean flag that can be used to query API keys owned by the currently authenticated user. -The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. -** *`realm_name` (Optional, string)*: The name of an authentication realm. -This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. -** *`username` (Optional, string)*: The username of a user. -This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. -** *`with_limited_by` (Optional, boolean)*: Return the snapshot of the owner user's role descriptors -associated with the API key. An API key's actual -permission is the intersection of its assigned role -descriptors and the owner user's role descriptors. -** *`active_only` (Optional, boolean)*: A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, nor expired at query time. You can specify this together with other parameters such as `owner` or `name`. If `active_only` is false, the response will include both active and inactive (expired or invalidated) keys. -** *`with_profile_uid` (Optional, boolean)*: Determines whether to also retrieve the profile uid, for the API key owner principal, if it exists. - -[discrete] -==== get_builtin_privileges -Get builtin privileges. - -Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch. - -{ref}/security-api-get-builtin-privileges.html[Endpoint documentation] -[source,ts] ----- -client.security.getBuiltinPrivileges() ----- - - -[discrete] -==== get_privileges -Get application privileges. - -{ref}/security-api-get-privileges.html[Endpoint documentation] -[source,ts] ----- -client.security.getPrivileges({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`application` (Optional, string)*: Application name -** *`name` (Optional, string | string[])*: Privilege name - -[discrete] -==== get_role -Get roles. - -Get roles in the native realm. - -{ref}/security-api-get-role.html[Endpoint documentation] -[source,ts] ----- -client.security.getRole({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string | string[])*: The name of the role. You can specify multiple roles as a list. If you do not specify this parameter, the API returns information about all roles. - -[discrete] -==== get_role_mapping -Get role mappings. - -Role mappings define which roles are assigned to each user. -The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. -The get role mappings API cannot retrieve role mappings that are defined in role mapping files. - -{ref}/security-api-get-role-mapping.html[Endpoint documentation] -[source,ts] ----- -client.security.getRoleMapping({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string | string[])*: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. You can specify multiple mapping names as a list. If you do not specify this parameter, the API returns information about all role mappings. - -[discrete] -==== get_service_accounts -Get service accounts. - -Get a list of service accounts that match the provided path parameters. - -{ref}/security-api-get-service-accounts.html[Endpoint documentation] -[source,ts] ----- -client.security.getServiceAccounts({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`namespace` (Optional, string)*: Name of the namespace. Omit this parameter to retrieve information about all service accounts. If you omit this parameter, you must also omit the `service` parameter. -** *`service` (Optional, string)*: Name of the service name. Omit this parameter to retrieve information about all service accounts that belong to the specified `namespace`. - -[discrete] -==== get_service_credentials -Get service account credentials. - -{ref}/security-api-get-service-credentials.html[Endpoint documentation] -[source,ts] ----- -client.security.getServiceCredentials({ namespace, service }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`namespace` (string)*: Name of the namespace. -** *`service` (string)*: Name of the service name. - -[discrete] -==== get_settings -Retrieve settings for the security system indices - -{ref}/security-api-get-settings.html[Endpoint documentation] -[source,ts] ----- -client.security.getSettings() ----- - - -[discrete] -==== get_token -Get a token. - -Create a bearer token for access without requiring basic authentication. - -{ref}/security-api-get-token.html[Endpoint documentation] -[source,ts] ----- -client.security.getToken({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`grant_type` (Optional, Enum("password" | "client_credentials" | "_kerberos" | "refresh_token"))* -** *`scope` (Optional, string)* -** *`password` (Optional, string)* -** *`kerberos_ticket` (Optional, string)* -** *`refresh_token` (Optional, string)* -** *`username` (Optional, string)* - -[discrete] -==== get_user -Get users. - -Get information about users in the native realm and built-in users. - -{ref}/security-api-get-user.html[Endpoint documentation] -[source,ts] ----- -client.security.getUser({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`username` (Optional, string | string[])*: An identifier for the user. You can specify multiple usernames as a list. If you omit this parameter, the API retrieves information about all users. -** *`with_profile_uid` (Optional, boolean)*: If true will return the User Profile ID for a user, if any. - -[discrete] -==== get_user_privileges -Get user privileges. - -{ref}/security-api-get-user-privileges.html[Endpoint documentation] -[source,ts] ----- -client.security.getUserPrivileges({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`application` (Optional, string)*: The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications. -** *`priviledge` (Optional, string)*: The name of the privilege. If you do not specify this parameter, the API returns information about all privileges for the requested application. -** *`username` (Optional, string | null)* - -[discrete] -==== get_user_profile -Get a user profile. - -Get a user's profile using the unique profile ID. - -{ref}/security-api-get-user-profile.html[Endpoint documentation] -[source,ts] ----- -client.security.getUserProfile({ uid }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`uid` (string | string[])*: A unique identifier for the user profile. -** *`data` (Optional, string | string[])*: List of filters for the `data` field of the profile document. -To return all content use `data=*`. To return a subset of content -use `data=<key>` to retrieve content nested under the specified `<key>`. -By default returns no `data` content. - -[discrete] -==== grant_api_key -Grant an API key. - -Create an API key on behalf of another user. -This API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API. -The caller must have authentication credentials (either an access token, or a username and password) for the user on whose behalf the API key will be created. -It is not possible to use this API to create an API key without that user’s credentials. -The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. -In this case, the API key will be created on behalf of the impersonated user. - -This API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf. - -A successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name. -If applicable, it also returns expiration information for the API key in milliseconds. - -By default, API keys never expire. You can specify expiration information when you create the API keys. - -{ref}/security-api-grant-api-key.html[Endpoint documentation] -[source,ts] ----- -client.security.grantApiKey({ api_key, grant_type }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`api_key` ({ name, expiration, role_descriptors, metadata })*: Defines the API key. -** *`grant_type` (Enum("access_token" | "password"))*: The type of grant. Supported grant types are: `access_token`, `password`. -** *`access_token` (Optional, string)*: The user’s access token. -If you specify the `access_token` grant type, this parameter is required. -It is not valid with other grant types. -** *`username` (Optional, string)*: The user name that identifies the user. -If you specify the `password` grant type, this parameter is required. -It is not valid with other grant types. -** *`password` (Optional, string)*: The user’s password. If you specify the `password` grant type, this parameter is required. -It is not valid with other grant types. -** *`run_as` (Optional, string)*: The name of the user to be impersonated. - -[discrete] -==== has_privileges -Check user privileges. - -Determine whether the specified user has a specified list of privileges. - -{ref}/security-api-has-privileges.html[Endpoint documentation] -[source,ts] ----- -client.security.hasPrivileges({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`user` (Optional, string)*: Username -** *`application` (Optional, { application, privileges, resources }[])* -** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of the cluster privileges that you want to check. -** *`index` (Optional, { names, privileges, allow_restricted_indices }[])* - -[discrete] -==== has_privileges_user_profile -Check user profile privileges. - -Determine whether the users associated with the specified user profile IDs have all the requested privileges. - -{ref}/security-api-has-privileges-user-profile.html[Endpoint documentation] -[source,ts] ----- -client.security.hasPrivilegesUserProfile({ uids, privileges }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`uids` (string[])*: A list of profile IDs. The privileges are checked for associated users of the profiles. -** *`privileges` ({ application, cluster, index })* - -[discrete] -==== invalidate_api_key -Invalidate API keys. - -This API invalidates API keys created by the create API key or grant API key APIs. -Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted. -The `manage_api_key` privilege allows deleting any API keys. -The `manage_own_api_key` only allows deleting API keys that are owned by the user. -In addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats: -- Set the parameter `owner=true`. -- Or, set both `username` and `realm_name` to match the user’s identity. -- Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field. - -{ref}/security-api-invalidate-api-key.html[Endpoint documentation] -[source,ts] ----- -client.security.invalidateApiKey({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string)* -** *`ids` (Optional, string[])*: A list of API key ids. -This parameter cannot be used with any of `name`, `realm_name`, or `username`. -** *`name` (Optional, string)*: An API key name. -This parameter cannot be used with any of `ids`, `realm_name` or `username`. -** *`owner` (Optional, boolean)*: Can be used to query API keys owned by the currently authenticated user. -The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. -** *`realm_name` (Optional, string)*: The name of an authentication realm. -This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`. -** *`username` (Optional, string)*: The username of a user. -This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`. - -[discrete] -==== invalidate_token -Invalidate a token. - -The access tokens returned by the get token API have a finite period of time for which they are valid. -After that time period, they can no longer be used. -The time period is defined by the `xpack.security.authc.token.timeout` setting. - -The refresh tokens returned by the get token API are only valid for 24 hours. They can also be used exactly once. -If you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API. - -{ref}/security-api-invalidate-token.html[Endpoint documentation] -[source,ts] ----- -client.security.invalidateToken({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`token` (Optional, string)* -** *`refresh_token` (Optional, string)* -** *`realm_name` (Optional, string)* -** *`username` (Optional, string)* - -[discrete] -==== oidc_authenticate -Exchanges an OpenID Connection authentication response message for an Elasticsearch access token and refresh token pair - -{ref}/security-api-oidc-authenticate.html[Endpoint documentation] -[source,ts] ----- -client.security.oidcAuthenticate() ----- - - -[discrete] -==== oidc_logout -Invalidates a refresh token and access token that was generated from the OpenID Connect Authenticate API - -{ref}/security-api-oidc-logout.html[Endpoint documentation] -[source,ts] ----- -client.security.oidcLogout() ----- - - -[discrete] -==== oidc_prepare_authentication -Creates an OAuth 2.0 authentication request as a URL string - -{ref}/security-api-oidc-prepare-authentication.html[Endpoint documentation] -[source,ts] ----- -client.security.oidcPrepareAuthentication() ----- - - -[discrete] -==== put_privileges -Create or update application privileges. - -{ref}/security-api-put-privileges.html[Endpoint documentation] -[source,ts] ----- -client.security.putPrivileges({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`privileges` (Optional, Record<string, Record<string, { allocate, delete, downsample, freeze, forcemerge, migrate, readonly, rollover, set_priority, searchable_snapshot, shrink, unfollow, wait_for_snapshot }>>)* -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== put_role -Create or update roles. - -The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management. -The create or update roles API cannot update roles that are defined in roles files. -File-based role management is not available in Elastic Serverless. - -{ref}/security-api-put-role.html[Endpoint documentation] -[source,ts] ----- -client.security.putRole({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: The name of the role that is being created or updated. On Elasticsearch Serverless, the role name must begin with a letter or digit and can only contain letters, digits and the characters '_', '-', and '.'. Each role must have a unique name, as this will serve as the identifier for that role. -** *`applications` (Optional, { application, privileges, resources }[])*: A list of application privilege entries. -** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. -** *`global` (Optional, Record<string, User-defined value>)*: An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. -** *`indices` (Optional, { field_security, names, privileges, query, allow_restricted_indices }[])*: A list of indices permissions entries. -** *`remote_indices` (Optional, { clusters, field_security, names, privileges, query, allow_restricted_indices }[])*: A list of remote indices permissions entries. -** *`remote_cluster` (Optional, { clusters, privileges }[])*: A list of remote cluster permissions entries. -** *`metadata` (Optional, Record<string, User-defined value>)*: Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. -** *`run_as` (Optional, string[])*: A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. -** *`description` (Optional, string)*: Optional description of the role descriptor -** *`transient_metadata` (Optional, Record<string, User-defined value>)*: Indicates roles that might be incompatible with the current cluster license, specifically roles with document and field level security. When the cluster license doesn’t allow certain features for a given role, this parameter is updated dynamically to list the incompatible features. If `enabled` is `false`, the role is ignored, but is still listed in the response from the authenticate API. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== put_role_mapping -Create or update role mappings. - -Role mappings define which roles are assigned to each user. -Each mapping has rules that identify users and a list of roles that are granted to those users. -The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files. - -This API does not create roles. Rather, it maps users to existing roles. -Roles can be created by using the create or update roles API or roles files. - -{ref}/security-api-put-role-mapping.html[Endpoint documentation] -[source,ts] ----- -client.security.putRoleMapping({ name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (string)*: Role-mapping name -** *`enabled` (Optional, boolean)* -** *`metadata` (Optional, Record<string, User-defined value>)* -** *`roles` (Optional, string[])* -** *`role_templates` (Optional, { format, template }[])* -** *`rules` (Optional, { any, all, field, except })* -** *`run_as` (Optional, string[])* -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== put_user -Create or update users. - -A password is required for adding a new user but is optional when updating an existing user. -To change a user’s password without updating any other fields, use the change password API. - -{ref}/security-api-put-user.html[Endpoint documentation] -[source,ts] ----- -client.security.putUser({ username }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`username` (string)*: The username of the User -** *`email` (Optional, string | null)* -** *`full_name` (Optional, string | null)* -** *`metadata` (Optional, Record<string, User-defined value>)* -** *`password` (Optional, string)* -** *`password_hash` (Optional, string)* -** *`roles` (Optional, string[])* -** *`enabled` (Optional, boolean)* -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -[discrete] -==== query_api_keys -Find API keys with a query. - -Get a paginated list of API keys and their information. You can optionally filter the results with a query. - -{ref}/security-api-query-api-key.html[Endpoint documentation] -[source,ts] ----- -client.security.queryApiKeys({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`aggregations` (Optional, Record<string, { aggregations, meta, cardinality, composite, date_range, filter, filters, missing, range, terms, value_count }>)*: Any aggregations to run over the corpus of returned API keys. -Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. -This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, -`cardinality`, `value_count`, `composite`, `filter`, and `filters`. -Additionally, aggregations only run over the same subset of fields that query works with. -** *`query` (Optional, { bool, exists, ids, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })*: A query to filter which API keys to return. -If the query parameter is missing, it is equivalent to a `match_all` query. -The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, -`ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. -You can query the following public information associated with an API key: `id`, `type`, `name`, -`creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, and `metadata`. -** *`from` (Optional, number)*: Starting document offset. -By default, you cannot page through more than 10,000 hits using the from and size parameters. -To page through more hits, use the `search_after` parameter. -** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: Other than `id`, all public fields of an API key are eligible for sorting. -In addition, sort can also be applied to the `_doc` field to sort by index order. -** *`size` (Optional, number)*: The number of hits to return. -By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. -To page through more hits, use the `search_after` parameter. -** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Search after definition -** *`with_limited_by` (Optional, boolean)*: Return the snapshot of the owner user's role descriptors associated with the API key. -An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors. -** *`with_profile_uid` (Optional, boolean)*: Determines whether to also retrieve the profile uid, for the API key owner principal, if it exists. -** *`typed_keys` (Optional, boolean)*: Determines whether aggregation names are prefixed by their respective types in the response. - -[discrete] -==== query_role -Find roles with a query. - -Get roles in a paginated manner. You can optionally filter the results with a query. - -{ref}/security-api-query-role.html[Endpoint documentation] -[source,ts] ----- -client.security.queryRole({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`query` (Optional, { bool, exists, ids, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })*: A query to filter which roles to return. -If the query parameter is missing, it is equivalent to a `match_all` query. -The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, -`ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. -You can query the following information associated with roles: `name`, `description`, `metadata`, -`applications.application`, `applications.privileges`, `applications.resources`. -** *`from` (Optional, number)*: Starting document offset. -By default, you cannot page through more than 10,000 hits using the from and size parameters. -To page through more hits, use the `search_after` parameter. -** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: All public fields of a role are eligible for sorting. -In addition, sort can also be applied to the `_doc` field to sort by index order. -** *`size` (Optional, number)*: The number of hits to return. -By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. -To page through more hits, use the `search_after` parameter. -** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Search after definition - -[discrete] -==== query_user -Find users with a query. - -Get information for users in a paginated manner. -You can optionally filter the results with a query. - -{ref}/security-api-query-user.html[Endpoint documentation] -[source,ts] ----- -client.security.queryUser({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`query` (Optional, { ids, bool, exists, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })*: A query to filter which users to return. -If the query parameter is missing, it is equivalent to a `match_all` query. -The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, -`ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. -You can query the following information associated with user: `username`, `roles`, `enabled` -** *`from` (Optional, number)*: Starting document offset. -By default, you cannot page through more than 10,000 hits using the from and size parameters. -To page through more hits, use the `search_after` parameter. -** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: Fields eligible for sorting are: username, roles, enabled -In addition, sort can also be applied to the `_doc` field to sort by index order. -** *`size` (Optional, number)*: The number of hits to return. -By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. -To page through more hits, use the `search_after` parameter. -** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Search after definition -** *`with_profile_uid` (Optional, boolean)*: If true will return the User Profile ID for the users in the query result, if any. - -[discrete] -==== saml_authenticate -Authenticate SAML. - -Submits a SAML response message to Elasticsearch for consumption. - -{ref}/security-api-saml-authenticate.html[Endpoint documentation] -[source,ts] ----- -client.security.samlAuthenticate({ content, ids }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`content` (string)*: The SAML response as it was sent by the user’s browser, usually a Base64 encoded XML document. -** *`ids` (string | string[])*: A json array with all the valid SAML Request Ids that the caller of the API has for the current user. -** *`realm` (Optional, string)*: The name of the realm that should authenticate the SAML response. Useful in cases where many SAML realms are defined. - -[discrete] -==== saml_complete_logout -Logout of SAML completely. - -Verifies the logout response sent from the SAML IdP. - -{ref}/security-api-saml-complete-logout.html[Endpoint documentation] -[source,ts] ----- -client.security.samlCompleteLogout({ realm, ids }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`realm` (string)*: The name of the SAML realm in Elasticsearch for which the configuration is used to verify the logout response. -** *`ids` (string | string[])*: A json array with all the valid SAML Request Ids that the caller of the API has for the current user. -** *`query_string` (Optional, string)*: If the SAML IdP sends the logout response with the HTTP-Redirect binding, this field must be set to the query string of the redirect URI. -** *`content` (Optional, string)*: If the SAML IdP sends the logout response with the HTTP-Post binding, this field must be set to the value of the SAMLResponse form parameter from the logout response. - -[discrete] -==== saml_invalidate -Invalidate SAML. - -Submits a SAML LogoutRequest message to Elasticsearch for consumption. - -{ref}/security-api-saml-invalidate.html[Endpoint documentation] -[source,ts] ----- -client.security.samlInvalidate({ query_string }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`query_string` (string)*: The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. -This query should include a single parameter named SAMLRequest that contains a SAML logout request that is deflated and Base64 encoded. -If the SAML IdP has signed the logout request, the URL should include two extra parameters named SigAlg and Signature that contain the algorithm used for the signature and the signature value itself. -In order for Elasticsearch to be able to verify the IdP’s signature, the value of the query_string field must be an exact match to the string provided by the browser. -The client application must not attempt to parse or process the string in any way. -** *`acs` (Optional, string)*: The Assertion Consumer Service URL that matches the one of the SAML realm in Elasticsearch that should be used. You must specify either this parameter or the realm parameter. -** *`realm` (Optional, string)*: The name of the SAML realm in Elasticsearch the configuration. You must specify either this parameter or the acs parameter. - -[discrete] -==== saml_logout -Logout of SAML. - -Submits a request to invalidate an access token and refresh token. - -{ref}/security-api-saml-logout.html[Endpoint documentation] -[source,ts] ----- -client.security.samlLogout({ token }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`token` (string)*: The access token that was returned as a response to calling the SAML authenticate API. -Alternatively, the most recent token that was received after refreshing the original one by using a refresh_token. -** *`refresh_token` (Optional, string)*: The refresh token that was returned as a response to calling the SAML authenticate API. -Alternatively, the most recent refresh token that was received after refreshing the original access token. - -[discrete] -==== saml_prepare_authentication -Prepare SAML authentication. - -Creates a SAML authentication request (`<AuthnRequest>`) as a URL string, based on the configuration of the respective SAML realm in Elasticsearch. - -{ref}/security-api-saml-prepare-authentication.html[Endpoint documentation] -[source,ts] ----- -client.security.samlPrepareAuthentication({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`acs` (Optional, string)*: The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. -The realm is used to generate the authentication request. You must specify either this parameter or the realm parameter. -** *`realm` (Optional, string)*: The name of the SAML realm in Elasticsearch for which the configuration is used to generate the authentication request. -You must specify either this parameter or the acs parameter. -** *`relay_state` (Optional, string)*: A string that will be included in the redirect URL that this API returns as the RelayState query parameter. -If the Authentication Request is signed, this value is used as part of the signature computation. - -[discrete] -==== saml_service_provider_metadata -Create SAML service provider metadata. - -Generate SAML metadata for a SAML 2.0 Service Provider. - -{ref}/security-api-saml-sp-metadata.html[Endpoint documentation] -[source,ts] ----- -client.security.samlServiceProviderMetadata({ realm_name }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`realm_name` (string)*: The name of the SAML realm in Elasticsearch. - -[discrete] -==== suggest_user_profiles -Suggest a user profile. - -Get suggestions for user profiles that match specified search criteria. - -{ref}/security-api-suggest-user-profile.html[Endpoint documentation] -[source,ts] ----- -client.security.suggestUserProfiles({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`name` (Optional, string)*: Query string used to match name-related fields in user profile documents. -Name-related fields are the user's `username`, `full_name`, and `email`. -** *`size` (Optional, number)*: Number of profiles to return. -** *`data` (Optional, string | string[])*: List of filters for the `data` field of the profile document. -To return all content use `data=*`. To return a subset of content -use `data=<key>` to retrieve content nested under the specified `<key>`. -By default returns no `data` content. -** *`hint` (Optional, { uids, labels })*: Extra search criteria to improve relevance of the suggestion result. -Profiles matching the spcified hint are ranked higher in the response. -Profiles not matching the hint don't exclude the profile from the response -as long as the profile matches the `name` field query. - -[discrete] -==== update_api_key -Update an API key. - -Updates attributes of an existing API key. -Users can only update API keys that they created or that were granted to them. -Use this API to update API keys created by the create API Key or grant API Key APIs. -If you need to apply the same update to many API keys, you can use bulk update API Keys to reduce overhead. -It’s not possible to update expired API keys, or API keys that have been invalidated by invalidate API Key. -This API supports updates to an API key’s access scope and metadata. -The access scope of an API key is derived from the `role_descriptors` you specify in the request, and a snapshot of the owner user’s permissions at the time of the request. -The snapshot of the owner’s permissions is updated automatically on every call. -If you don’t specify `role_descriptors` in the request, a call to this API might still change the API key’s access scope. -This change can occur if the owner user’s permissions have changed since the API key was created or last modified. -To update another user’s API key, use the `run_as` feature to submit a request on behalf of another user. -IMPORTANT: It’s not possible to use an API key as the authentication credential for this API. -To update an API key, the owner user’s credentials are required. - -{ref}/security-api-update-api-key.html[Endpoint documentation] -[source,ts] ----- -client.security.updateApiKey({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: The ID of the API key to update. -** *`role_descriptors` (Optional, Record<string, { cluster, indices, remote_indices, remote_cluster, global, applications, metadata, run_as, description, restriction, transient_metadata }>)*: An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API. -** *`metadata` (Optional, Record<string, User-defined value>)*: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with _ are reserved for system usage. -** *`expiration` (Optional, string | -1 | 0)*: Expiration time for the API key. - -[discrete] -==== update_cross_cluster_api_key -Update a cross-cluster API key. - -Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access. - -{ref}/security-api-update-cross-cluster-api-key.html[Endpoint documentation] -[source,ts] ----- -client.security.updateCrossClusterApiKey({ id, access }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: The ID of the cross-cluster API key to update. -** *`access` ({ replication, search })*: The access to be granted to this API key. -The access is composed of permissions for cross cluster search and cross cluster replication. -At least one of them must be specified. -When specified, the new access assignment fully replaces the previously assigned access. -** *`expiration` (Optional, string | -1 | 0)*: Expiration time for the API key. -By default, API keys never expire. This property can be omitted to leave the value unchanged. -** *`metadata` (Optional, Record<string, User-defined value>)*: Arbitrary metadata that you want to associate with the API key. -It supports nested data structure. -Within the metadata object, keys beginning with `_` are reserved for system usage. -When specified, this information fully replaces metadata previously associated with the API key. - -[discrete] -==== update_settings -Update settings for the security system index - -{ref}/security-api-update-settings.html[Endpoint documentation] -[source,ts] ----- -client.security.updateSettings() ----- - - -[discrete] -==== update_user_profile_data -Update user profile data. - -Update specific data for the user profile that is associated with a unique ID. - -{ref}/security-api-update-user-profile-data.html[Endpoint documentation] -[source,ts] ----- -client.security.updateUserProfileData({ uid }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`uid` (string)*: A unique identifier for the user profile. -** *`labels` (Optional, Record<string, User-defined value>)*: Searchable data that you want to associate with the user profile. This -field supports a nested data structure. -** *`data` (Optional, Record<string, User-defined value>)*: Non-searchable data that you want to associate with the user profile. -This field supports a nested data structure. -** *`if_seq_no` (Optional, number)*: Only perform the operation if the document has this sequence number. -** *`if_primary_term` (Optional, number)*: Only perform the operation if the document has this primary term. -** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If 'true', Elasticsearch refreshes the affected shards to make this operation -visible to search, if 'wait_for' then wait for a refresh to make this operation -visible to search, if 'false' do nothing with refreshes. - -[discrete] -=== shutdown -[discrete] -==== delete_node -Removes a node from the shutdown list. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. - -https://www.elastic.co/guide/en/elasticsearch/reference/current[Endpoint documentation] -[source,ts] ----- -client.shutdown.deleteNode({ node_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (string)*: The node id of node to be removed from the shutdown state -** *`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get_node -Retrieve status of a node or nodes that are currently marked as shutting down. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. - -https://www.elastic.co/guide/en/elasticsearch/reference/current[Endpoint documentation] -[source,ts] ----- -client.shutdown.getNode({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (Optional, string | string[])*: Which node for which to retrieve the shutdown status -** *`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== put_node -Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. - -https://www.elastic.co/guide/en/elasticsearch/reference/current[Endpoint documentation] -[source,ts] ----- -client.shutdown.putNode({ node_id, type, reason }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`node_id` (string)*: The node id of node to be shut down -** *`type` (Enum("restart" | "remove" | "replace"))*: Valid values are restart, remove, or replace. -Use restart when you need to temporarily shut down a node to perform an upgrade, make configuration changes, or perform other maintenance. -Because the node is expected to rejoin the cluster, data is not migrated off of the node. -Use remove when you need to permanently remove a node from the cluster. -The node is not marked ready for shutdown until data is migrated off of the node Use replace to do a 1:1 replacement of a node with another node. -Certain allocation decisions will be ignored (such as disk watermarks) in the interest of true replacement of the source node with the target node. -During a replace-type shutdown, rollover and index creation may result in unassigned shards, and shrink may fail until the replacement is complete. -** *`reason` (string)*: A human-readable reason that the node is being shut down. -This field provides information for other cluster operators; it does not affect the shut down process. -** *`allocation_delay` (Optional, string)*: Only valid if type is restart. -Controls how long Elasticsearch will wait for the node to restart and join the cluster before reassigning its shards to other nodes. -This works the same as delaying allocation with the index.unassigned.node_left.delayed_timeout setting. -If you specify both a restart allocation delay and an index-level allocation delay, the longer of the two is used. -** *`target_node_name` (Optional, string)*: Only valid if type is replace. -Specifies the name of the node that is replacing the node being shut down. -Shards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node. -During relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules. -** *`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -=== simulate -[discrete] -==== ingest -Simulates running ingest with example documents. - -{ref}/simulate-ingest-api.html[Endpoint documentation] -[source,ts] ----- -client.simulate.ingest() ----- - - -[discrete] -=== slm -[discrete] -==== delete_lifecycle -Deletes an existing snapshot lifecycle policy. - -{ref}/slm-api-delete-policy.html[Endpoint documentation] -[source,ts] ----- -client.slm.deleteLifecycle({ policy_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`policy_id` (string)*: The id of the snapshot lifecycle policy to remove - -[discrete] -==== execute_lifecycle -Immediately creates a snapshot according to the lifecycle policy, without waiting for the scheduled time. - -{ref}/slm-api-execute-lifecycle.html[Endpoint documentation] -[source,ts] ----- -client.slm.executeLifecycle({ policy_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`policy_id` (string)*: The id of the snapshot lifecycle policy to be executed - -[discrete] -==== execute_retention -Deletes any snapshots that are expired according to the policy's retention rules. - -{ref}/slm-api-execute-retention.html[Endpoint documentation] -[source,ts] ----- -client.slm.executeRetention() ----- - - -[discrete] -==== get_lifecycle -Retrieves one or more snapshot lifecycle policy definitions and information about the latest snapshot attempts. - -{ref}/slm-api-get-policy.html[Endpoint documentation] -[source,ts] ----- -client.slm.getLifecycle({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`policy_id` (Optional, string | string[])*: List of snapshot lifecycle policies to retrieve - -[discrete] -==== get_stats -Returns global and policy-level statistics about actions taken by snapshot lifecycle management. - -{ref}/slm-api-get-stats.html[Endpoint documentation] -[source,ts] ----- -client.slm.getStats() ----- - - -[discrete] -==== get_status -Retrieves the status of snapshot lifecycle management (SLM). - -{ref}/slm-api-get-status.html[Endpoint documentation] -[source,ts] ----- -client.slm.getStatus() ----- - - -[discrete] -==== put_lifecycle -Creates or updates a snapshot lifecycle policy. - -{ref}/slm-api-put-policy.html[Endpoint documentation] -[source,ts] ----- -client.slm.putLifecycle({ policy_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`policy_id` (string)*: ID for the snapshot lifecycle policy you want to create or update. -** *`config` (Optional, { ignore_unavailable, indices, include_global_state, feature_states, metadata, partial })*: Configuration for each snapshot created by the policy. -** *`name` (Optional, string)*: Name automatically assigned to each snapshot created by the policy. Date math is supported. To prevent conflicting snapshot names, a UUID is automatically appended to each snapshot name. -** *`repository` (Optional, string)*: Repository used to store snapshots created by this policy. This repository must exist prior to the policy’s creation. You can create a repository using the snapshot repository API. -** *`retention` (Optional, { expire_after, max_count, min_count })*: Retention rules used to retain and delete snapshots created by the policy. -** *`schedule` (Optional, string)*: Periodic or absolute schedule at which the policy creates snapshots. SLM applies schedule changes immediately. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== start -Turns on snapshot lifecycle management (SLM). - -{ref}/slm-api-start.html[Endpoint documentation] -[source,ts] ----- -client.slm.start() ----- - - -[discrete] -==== stop -Turns off snapshot lifecycle management (SLM). - -{ref}/slm-api-stop.html[Endpoint documentation] -[source,ts] ----- -client.slm.stop() ----- - - -[discrete] -=== snapshot -[discrete] -==== cleanup_repository -Triggers the review of a snapshot repository’s contents and deletes any stale data not referenced by existing snapshots. - -{ref}/clean-up-snapshot-repo-api.html[Endpoint documentation] -[source,ts] ----- -client.snapshot.cleanupRepository({ repository }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`repository` (string)*: Snapshot repository to clean up. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. - -[discrete] -==== clone -Clones indices from one snapshot into another snapshot in the same repository. - -{ref}/modules-snapshots.html[Endpoint documentation] -[source,ts] ----- -client.snapshot.clone({ repository, snapshot, target_snapshot, indices }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`repository` (string)*: A repository name -** *`snapshot` (string)*: The name of the snapshot to clone from -** *`target_snapshot` (string)*: The name of the cloned snapshot to create -** *`indices` (string)* -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node -** *`timeout` (Optional, string | -1 | 0)* - -[discrete] -==== create -Creates a snapshot in a repository. - -{ref}/modules-snapshots.html[Endpoint documentation] -[source,ts] ----- -client.snapshot.create({ repository, snapshot }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`repository` (string)*: Repository for the snapshot. -** *`snapshot` (string)*: Name of the snapshot. Must be unique in the repository. -** *`ignore_unavailable` (Optional, boolean)*: If `true`, the request ignores data streams and indices in `indices` that are missing or closed. If `false`, the request returns an error for any data stream or index that is missing or closed. -** *`include_global_state` (Optional, boolean)*: If `true`, the current cluster state is included in the snapshot. The cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies. It also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`). -** *`indices` (Optional, string | string[])*: Data streams and indices to include in the snapshot. Supports multi-target syntax. Includes all data streams and indices by default. -** *`feature_states` (Optional, string[])*: Feature states to include in the snapshot. Each feature state includes one or more system indices containing related data. You can view a list of eligible features using the get features API. If `include_global_state` is `true`, all current feature states are included by default. If `include_global_state` is `false`, no feature states are included by default. -** *`metadata` (Optional, Record<string, User-defined value>)*: Optional metadata for the snapshot. May have any contents. Must be less than 1024 bytes. This map is not automatically generated by Elasticsearch. -** *`partial` (Optional, boolean)*: If `true`, allows restoring a partial snapshot of indices with unavailable shards. Only shards that were successfully included in the snapshot will be restored. All missing shards will be recreated as empty. If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_completion` (Optional, boolean)*: If `true`, the request returns a response when the snapshot is complete. If `false`, the request returns a response when the snapshot initializes. - -[discrete] -==== create_repository -Creates a repository. - -{ref}/modules-snapshots.html[Endpoint documentation] -[source,ts] ----- -client.snapshot.createRepository({ repository }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`repository` (string)*: A repository name -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout -** *`verify` (Optional, boolean)*: Whether to verify the repository after creation - -[discrete] -==== delete -Deletes one or more snapshots. - -{ref}/modules-snapshots.html[Endpoint documentation] -[source,ts] ----- -client.snapshot.delete({ repository, snapshot }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`repository` (string)*: A repository name -** *`snapshot` (string)*: A list of snapshot names -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node - -[discrete] -==== delete_repository -Deletes a repository. - -{ref}/modules-snapshots.html[Endpoint documentation] -[source,ts] ----- -client.snapshot.deleteRepository({ repository }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`repository` (string | string[])*: Name of the snapshot repository to unregister. Wildcard (`*`) patterns are supported. -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout - -[discrete] -==== get -Returns information about a snapshot. - -{ref}/modules-snapshots.html[Endpoint documentation] -[source,ts] ----- -client.snapshot.get({ repository, snapshot }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`repository` (string)*: List of snapshot repository names used to limit the request. Wildcard (*) expressions are supported. -** *`snapshot` (string | string[])*: List of snapshot names to retrieve. Also accepts wildcards (*). -- To get information about all snapshots in a registered repository, use a wildcard (*) or _all. -- To get information about any snapshots that are currently running, use _current. -** *`ignore_unavailable` (Optional, boolean)*: If false, the request returns an error for any snapshots that are unavailable. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`verbose` (Optional, boolean)*: If true, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted. -** *`index_details` (Optional, boolean)*: If true, returns additional information about each index in the snapshot comprising the number of shards in the index, the total size of the index in bytes, and the maximum number of segments per shard in the index. Defaults to false, meaning that this information is omitted. -** *`index_names` (Optional, boolean)*: If true, returns the name of each index in each snapshot. -** *`include_repository` (Optional, boolean)*: If true, returns the repository name in each snapshot. -** *`sort` (Optional, Enum("start_time" | "duration" | "name" | "index_count" | "repository" | "shard_count" | "failed_shard_count"))*: Allows setting a sort order for the result. Defaults to start_time, i.e. sorting by snapshot start time stamp. -** *`size` (Optional, number)*: Maximum number of snapshots to return. Defaults to 0 which means return all that match the request without limit. -** *`order` (Optional, Enum("asc" | "desc"))*: Sort order. Valid values are asc for ascending and desc for descending order. Defaults to asc, meaning ascending order. -** *`after` (Optional, string)*: Offset identifier to start pagination from as returned by the next field in the response body. -** *`offset` (Optional, number)*: Numeric offset to start pagination from based on the snapshots matching this request. Using a non-zero value for this parameter is mutually exclusive with using the after parameter. Defaults to 0. -** *`from_sort_value` (Optional, string)*: Value of the current sort column at which to start retrieval. Can either be a string snapshot- or repository name when sorting by snapshot or repository name, a millisecond time value or a number when sorting by index- or shard count. -** *`slm_policy_filter` (Optional, string)*: Filter snapshots by a list of SLM policy names that snapshots belong to. Also accepts wildcards (*) and combinations of wildcards followed by exclude patterns starting with -. To include snapshots not created by an SLM policy you can use the special pattern _none that will match all snapshots without an SLM policy. - -[discrete] -==== get_repository -Returns information about a repository. - -{ref}/modules-snapshots.html[Endpoint documentation] -[source,ts] ----- -client.snapshot.getRepository({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`repository` (Optional, string | string[])*: A list of repository names -** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node - -[discrete] -==== repository_analyze -Analyzes a repository for correctness and performance - -{ref}/modules-snapshots.html[Endpoint documentation] -[source,ts] ----- -client.snapshot.repositoryAnalyze() ----- - - -[discrete] -==== restore -Restores a snapshot. - -{ref}/modules-snapshots.html[Endpoint documentation] -[source,ts] ----- -client.snapshot.restore({ repository, snapshot }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`repository` (string)*: A repository name -** *`snapshot` (string)*: A snapshot name -** *`feature_states` (Optional, string[])* -** *`ignore_index_settings` (Optional, string[])* -** *`ignore_unavailable` (Optional, boolean)* -** *`include_aliases` (Optional, boolean)* -** *`include_global_state` (Optional, boolean)* -** *`index_settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })* -** *`indices` (Optional, string | string[])* -** *`partial` (Optional, boolean)* -** *`rename_pattern` (Optional, string)* -** *`rename_replacement` (Optional, string)* -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node -** *`wait_for_completion` (Optional, boolean)*: Should this request wait until the operation has completed before returning - -[discrete] -==== status -Returns information about the status of a snapshot. - -{ref}/modules-snapshots.html[Endpoint documentation] -[source,ts] ----- -client.snapshot.status({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`repository` (Optional, string)*: A repository name -** *`snapshot` (Optional, string | string[])*: A list of snapshot names -** *`ignore_unavailable` (Optional, boolean)*: Whether to ignore unavailable snapshots, defaults to false which means a SnapshotMissingException is thrown -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node - -[discrete] -==== verify_repository -Verifies a repository. - -{ref}/modules-snapshots.html[Endpoint documentation] -[source,ts] ----- -client.snapshot.verifyRepository({ repository }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`repository` (string)*: A repository name -** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout - -[discrete] -=== sql -[discrete] -==== clear_cursor -Clear an SQL search cursor. - -{ref}/clear-sql-cursor-api.html[Endpoint documentation] -[source,ts] ----- -client.sql.clearCursor({ cursor }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`cursor` (string)*: Cursor to clear. - -[discrete] -==== delete_async -Delete an async SQL search. -Delete an async SQL search or a stored synchronous SQL search. -If the search is still running, the API cancels it. - -{ref}/delete-async-sql-search-api.html[Endpoint documentation] -[source,ts] ----- -client.sql.deleteAsync({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the search. - -[discrete] -==== get_async -Get async SQL search results. -Get the current status and available results for an async SQL search or stored synchronous SQL search. - -{ref}/get-async-sql-search-api.html[Endpoint documentation] -[source,ts] ----- -client.sql.getAsync({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the search. -** *`delimiter` (Optional, string)*: Separator for CSV results. The API only supports this parameter for CSV responses. -** *`format` (Optional, string)*: Format for the response. You must specify a format using this parameter or the -Accept HTTP header. If you specify both, the API uses this parameter. -** *`keep_alive` (Optional, string | -1 | 0)*: Retention period for the search and its results. Defaults -to the `keep_alive` period for the original SQL search. -** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Period to wait for complete results. Defaults to no timeout, -meaning the request waits for complete search results. - -[discrete] -==== get_async_status -Get the async SQL search status. -Get the current status of an async SQL search or a stored synchronous SQL search. - -{ref}/get-async-sql-search-status-api.html[Endpoint documentation] -[source,ts] ----- -client.sql.getAsyncStatus({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Identifier for the search. - -[discrete] -==== query -Get SQL search results. -Run an SQL request. - -{ref}/sql-search-api.html[Endpoint documentation] -[source,ts] ----- -client.sql.query({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`catalog` (Optional, string)*: Default catalog (cluster) for queries. If unspecified, the queries execute on the data in the local cluster only. -** *`columnar` (Optional, boolean)*: If true, the results in a columnar fashion: one row represents all the values of a certain column from the current page of results. -** *`cursor` (Optional, string)*: Cursor used to retrieve a set of paginated results. -If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. -It ignores other request body parameters. -** *`fetch_size` (Optional, number)*: The maximum number of rows (or entries) to return in one response -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Elasticsearch query DSL for additional filtering. -** *`query` (Optional, string)*: SQL query to run. -** *`request_timeout` (Optional, string | -1 | 0)*: The timeout before the request fails. -** *`page_timeout` (Optional, string | -1 | 0)*: The timeout before a pagination request fails. -** *`time_zone` (Optional, string)*: ISO-8601 time zone ID for the search. -** *`field_multi_value_leniency` (Optional, boolean)*: Throw an exception when encountering multiple values for a field (default) or be lenient and return the first value from the list (without any guarantees of what that will be - typically the first in natural ascending order). -** *`runtime_mappings` (Optional, Record<string, { fields, fetch_fields, format, input_field, target_field, target_index, script, type }>)*: Defines one or more runtime fields in the search request. These fields take -precedence over mapped fields with the same name. -** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Period to wait for complete results. Defaults to no timeout, meaning the request waits for complete search results. If the search doesn’t finish within this period, the search becomes async. -** *`params` (Optional, Record<string, User-defined value>)*: Values for parameters in the query. -** *`keep_alive` (Optional, string | -1 | 0)*: Retention period for an async or saved synchronous search. -** *`keep_on_completion` (Optional, boolean)*: If true, Elasticsearch stores synchronous searches if you also specify the wait_for_completion_timeout parameter. If false, Elasticsearch only stores async searches that don’t finish before the wait_for_completion_timeout. -** *`index_using_frozen` (Optional, boolean)*: If true, the search can run on frozen indices. Defaults to false. -** *`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile"))*: Format for the response. - -[discrete] -==== translate -Translate SQL into Elasticsearch queries. -Translate an SQL search into a search API request containing Query DSL. - -{ref}/sql-translate-api.html[Endpoint documentation] -[source,ts] ----- -client.sql.translate({ query }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`query` (string)*: SQL query to run. -** *`fetch_size` (Optional, number)*: The maximum number of rows (or entries) to return in one response. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Elasticsearch query DSL for additional filtering. -** *`time_zone` (Optional, string)*: ISO-8601 time zone ID for the search. - -[discrete] -=== ssl -[discrete] -==== certificates -Get SSL certificates. - -Get information about the X.509 certificates that are used to encrypt communications in the cluster. -The API returns a list that includes certificates from all TLS contexts including: - -- Settings for transport and HTTP interfaces -- TLS settings that are used within authentication realms -- TLS settings for remote monitoring exporters - -The list includes certificates that are used for configuring trust, such as those configured in the `xpack.security.transport.ssl.truststore` and `xpack.security.transport.ssl.certificate_authorities` settings. -It also includes certificates that are used for configuring server identity, such as `xpack.security.http.ssl.keystore` and `xpack.security.http.ssl.certificate settings`. - -The list does not include certificates that are sourced from the default SSL context of the Java Runtime Environment (JRE), even if those certificates are in use within Elasticsearch. - -NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the API returns all the certificates that are included in the PKCS#11 token irrespective of whether these are used in the Elasticsearch TLS configuration. - -If Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster. - -{ref}/security-api-ssl.html[Endpoint documentation] -[source,ts] ----- -client.ssl.certificates() ----- - - -[discrete] -=== synonyms -[discrete] -==== delete_synonym -Delete a synonym set. - -{ref}/delete-synonyms-set.html[Endpoint documentation] -[source,ts] ----- -client.synonyms.deleteSynonym({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: The id of the synonyms set to be deleted - -[discrete] -==== delete_synonym_rule -Delete a synonym rule. -Delete a synonym rule from a synonym set. - -{ref}/delete-synonym-rule.html[Endpoint documentation] -[source,ts] ----- -client.synonyms.deleteSynonymRule({ set_id, rule_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`set_id` (string)*: The id of the synonym set to be updated -** *`rule_id` (string)*: The id of the synonym rule to be deleted - -[discrete] -==== get_synonym -Get a synonym set. - -{ref}/get-synonyms-set.html[Endpoint documentation] -[source,ts] ----- -client.synonyms.getSynonym({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: "The id of the synonyms set to be retrieved -** *`from` (Optional, number)*: Starting offset for query rules to be retrieved -** *`size` (Optional, number)*: specifies a max number of query rules to retrieve - -[discrete] -==== get_synonym_rule -Get a synonym rule. -Get a synonym rule from a synonym set. - -{ref}/get-synonym-rule.html[Endpoint documentation] -[source,ts] ----- -client.synonyms.getSynonymRule({ set_id, rule_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`set_id` (string)*: The id of the synonym set to retrieve the synonym rule from -** *`rule_id` (string)*: The id of the synonym rule to retrieve - -[discrete] -==== get_synonyms_sets -Get all synonym sets. -Get a summary of all defined synonym sets. - -{ref}/list-synonyms-sets.html[Endpoint documentation] -[source,ts] ----- -client.synonyms.getSynonymsSets({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`from` (Optional, number)*: Starting offset -** *`size` (Optional, number)*: specifies a max number of results to get - -[discrete] -==== put_synonym -Create or update a synonym set. -Synonyms sets are limited to a maximum of 10,000 synonym rules per set. -If you need to manage more synonym rules, you can create multiple synonym sets. - -{ref}/put-synonyms-set.html[Endpoint documentation] -[source,ts] ----- -client.synonyms.putSynonym({ id, synonyms_set }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: The id of the synonyms set to be created or updated -** *`synonyms_set` ({ id, synonyms } | { id, synonyms }[])*: The synonym set information to update - -[discrete] -==== put_synonym_rule -Create or update a synonym rule. -Create or update a synonym rule in a synonym set. - -{ref}/put-synonym-rule.html[Endpoint documentation] -[source,ts] ----- -client.synonyms.putSynonymRule({ set_id, rule_id, synonyms }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`set_id` (string)*: The id of the synonym set to be updated with the synonym rule -** *`rule_id` (string)*: The id of the synonym rule to be updated or created -** *`synonyms` (string)* - -[discrete] -=== tasks -[discrete] -==== cancel -Cancels a task, if it can be cancelled through an API. - -{ref}/tasks.html[Endpoint documentation] -[source,ts] ----- -client.tasks.cancel({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`task_id` (Optional, string | number)*: ID of the task. -** *`actions` (Optional, string | string[])*: List or wildcard expression of actions used to limit the request. -** *`nodes` (Optional, string[])*: List of node IDs or names used to limit the request. -** *`parent_task_id` (Optional, string)*: Parent task ID used to limit the tasks. -** *`wait_for_completion` (Optional, boolean)*: Should the request block until the cancellation of the task and its descendant tasks is completed. Defaults to false - -[discrete] -==== get -Get task information. -Returns information about the tasks currently executing in the cluster. - -{ref}/tasks.html[Endpoint documentation] -[source,ts] ----- -client.tasks.get({ task_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`task_id` (string)*: ID of the task. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. -If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the task has completed. - -[discrete] -==== list -The task management API returns information about tasks currently executing on one or more nodes in the cluster. - -{ref}/tasks.html[Endpoint documentation] -[source,ts] ----- -client.tasks.list({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`actions` (Optional, string | string[])*: List or wildcard expression of actions used to limit the request. -** *`detailed` (Optional, boolean)*: If `true`, the response includes detailed information about shard recoveries. -** *`group_by` (Optional, Enum("nodes" | "parents" | "none"))*: Key used to group tasks in the response. -** *`nodes` (Optional, string | string[])*: List of node IDs or names used to limit returned information. -** *`parent_task_id` (Optional, string)*: Parent task ID used to limit returned information. To return all tasks, omit this parameter or use a value of `-1`. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the operation is complete. - -[discrete] -=== text_structure -[discrete] -==== find_field_structure -Finds the structure of a text field in an index. - -{ref}/find-field-structure.html[Endpoint documentation] -[source,ts] ----- -client.textStructure.findFieldStructure() ----- - - -[discrete] -==== find_message_structure -Finds the structure of a list of messages. The messages must contain data that is suitable to be ingested into Elasticsearch. - -{ref}/find-message-structure.html[Endpoint documentation] -[source,ts] ----- -client.textStructure.findMessageStructure() ----- - - -[discrete] -==== find_structure -Finds the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. - -{ref}/find-structure.html[Endpoint documentation] -[source,ts] ----- -client.textStructure.findStructure({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`text_files` (Optional, TJsonDocument[])* -** *`charset` (Optional, string)*: The text’s character set. It must be a character set that is supported by the JVM that Elasticsearch uses. For example, UTF-8, UTF-16LE, windows-1252, or EUC-JP. If this parameter is not specified, the structure finder chooses an appropriate character set. -** *`column_names` (Optional, string)*: If you have set format to delimited, you can specify the column names in a list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", etc. -** *`delimiter` (Optional, string)*: If you have set format to delimited, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (|). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. -** *`ecs_compatibility` (Optional, string)*: The mode of compatibility with ECS compliant Grok patterns (disabled or v1, default: disabled). -** *`explain` (Optional, boolean)*: If this parameter is set to true, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. -** *`format` (Optional, string)*: The high level structure of the text. Valid values are ndjson, xml, delimited, and semi_structured_text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. -** *`grok_pattern` (Optional, string)*: If you have set format to semi_structured_text, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the timestamp_field parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If grok_pattern is not specified, the structure finder creates a Grok pattern. -** *`has_header_row` (Optional, boolean)*: If you have set format to delimited, you can use this parameter to indicate whether the column names are in the first row of the text. If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. -** *`line_merge_size_limit` (Optional, number)*: The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured text. If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. -** *`lines_to_sample` (Optional, number)*: The number of lines to include in the structural analysis, starting from the beginning of the text. The minimum is 2; If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines. -** *`quote` (Optional, string)*: If you have set format to delimited, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote ("). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. -** *`should_trim_fields` (Optional, boolean)*: If you have set format to delimited, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (|), the default value is true. Otherwise, the default value is false. -** *`timeout` (Optional, string | -1 | 0)*: Sets the maximum amount of time that the structure analysis make take. If the analysis is still running when the timeout expires then it will be aborted. -** *`timestamp_field` (Optional, string)*: Optional parameter to specify the timestamp field in the file -** *`timestamp_format` (Optional, string)*: The Java time format of the timestamp field in the text. - -[discrete] -==== test_grok_pattern -Tests a Grok pattern on some text. - -{ref}/test-grok-pattern.html[Endpoint documentation] -[source,ts] ----- -client.textStructure.testGrokPattern({ grok_pattern, text }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`grok_pattern` (string)*: Grok pattern to run on the text. -** *`text` (string[])*: Lines of text to run the Grok pattern on. -** *`ecs_compatibility` (Optional, string)*: The mode of compatibility with ECS compliant Grok patterns (disabled or v1, default: disabled). - -[discrete] -=== transform -[discrete] -==== delete_transform -Delete a transform. -Deletes a transform. - -{ref}/delete-transform.html[Endpoint documentation] -[source,ts] ----- -client.transform.deleteTransform({ transform_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`transform_id` (string)*: Identifier for the transform. -** *`force` (Optional, boolean)*: If this value is false, the transform must be stopped before it can be deleted. If true, the transform is -deleted regardless of its current state. -** *`delete_dest_index` (Optional, boolean)*: If this value is true, the destination index is deleted together with the transform. If false, the destination -index will not be deleted -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== get_node_stats -Retrieves transform usage information for transform nodes. -[source,ts] ----- -client.transform.getNodeStats() ----- - - -[discrete] -==== get_transform -Get transforms. -Retrieves configuration information for transforms. - -{ref}/get-transform.html[Endpoint documentation] -[source,ts] ----- -client.transform.getTransform({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`transform_id` (Optional, string | string[])*: Identifier for the transform. It can be a transform identifier or a -wildcard expression. You can get information for all transforms by using -`_all`, by specifying `*` as the `<transform_id>`, or by omitting the -`<transform_id>`. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -1. Contains wildcard expressions and there are no transforms that match. -2. Contains the _all string or no identifiers and there are no matches. -3. Contains wildcard expressions and there are only partial matches. - -If this parameter is false, the request returns a 404 status code when -there are no matches or only partial matches. -** *`from` (Optional, number)*: Skips the specified number of transforms. -** *`size` (Optional, number)*: Specifies the maximum number of transforms to obtain. -** *`exclude_generated` (Optional, boolean)*: Excludes fields that were automatically added when creating the -transform. This allows the configuration to be in an acceptable format to -be retrieved and then added to another cluster. - -[discrete] -==== get_transform_stats -Get transform stats. -Retrieves usage information for transforms. - -{ref}/get-transform-stats.html[Endpoint documentation] -[source,ts] ----- -client.transform.getTransformStats({ transform_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`transform_id` (string | string[])*: Identifier for the transform. It can be a transform identifier or a -wildcard expression. You can get information for all transforms by using -`_all`, by specifying `*` as the `<transform_id>`, or by omitting the -`<transform_id>`. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: - -1. Contains wildcard expressions and there are no transforms that match. -2. Contains the _all string or no identifiers and there are no matches. -3. Contains wildcard expressions and there are only partial matches. - -If this parameter is false, the request returns a 404 status code when -there are no matches or only partial matches. -** *`from` (Optional, number)*: Skips the specified number of transforms. -** *`size` (Optional, number)*: Specifies the maximum number of transforms to obtain. -** *`timeout` (Optional, string | -1 | 0)*: Controls the time to wait for the stats - -[discrete] -==== preview_transform -Preview a transform. -Generates a preview of the results that you will get when you create a transform with the same configuration. - -It returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also -generates a list of mappings and settings for the destination index. These values are determined based on the field -types of the source index and the transform aggregations. - -{ref}/preview-transform.html[Endpoint documentation] -[source,ts] ----- -client.transform.previewTransform({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`transform_id` (Optional, string)*: Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform -configuration details in the request body. -** *`dest` (Optional, { index, op_type, pipeline, routing, version_type })*: The destination for the transform. -** *`description` (Optional, string)*: Free text description of the transform. -** *`frequency` (Optional, string | -1 | 0)*: The interval between checks for changes in the source indices when the -transform is running continuously. Also determines the retry interval in -the event of transient failures while the transform is searching or -indexing. The minimum value is 1s and the maximum is 1h. -** *`pivot` (Optional, { aggregations, group_by })*: The pivot method transforms the data by aggregating and grouping it. -These objects define the group by fields and the aggregation to reduce -the data. -** *`source` (Optional, { index, query, remote, size, slice, sort, _source, runtime_mappings })*: The source of the data for the transform. -** *`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })*: Defines optional transform settings. -** *`sync` (Optional, { time })*: Defines the properties transforms require to run continuously. -** *`retention_policy` (Optional, { time })*: Defines a retention policy for the transform. Data that meets the defined -criteria is deleted from the destination index. -** *`latest` (Optional, { sort, unique_key })*: The latest method transforms the data by finding the latest document for -each unique key. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the -timeout expires, the request fails and returns an error. - -[discrete] -==== put_transform -Create a transform. -Creates a transform. - -A transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as -a data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a -unique row per entity. - -You must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If -you choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in -the pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values -in the latest object. - -You must have `create_index`, `index`, and `read` privileges on the destination index and `read` and -`view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the -transform remembers which roles the user that created it had at the time of creation and uses those same roles. If -those roles do not have the required privileges on the source and destination indices, the transform fails when it -attempts unauthorized operations. - -NOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any -`.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do -not give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not -give users any privileges on `.data-frame-internal*` indices. - -{ref}/put-transform.html[Endpoint documentation] -[source,ts] ----- -client.transform.putTransform({ transform_id, dest, source }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`transform_id` (string)*: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), -hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. -** *`dest` ({ index, op_type, pipeline, routing, version_type })*: The destination for the transform. -** *`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })*: The source of the data for the transform. -** *`description` (Optional, string)*: Free text description of the transform. -** *`frequency` (Optional, string | -1 | 0)*: The interval between checks for changes in the source indices when the transform is running continuously. Also -determines the retry interval in the event of transient failures while the transform is searching or indexing. -The minimum value is `1s` and the maximum is `1h`. -** *`latest` (Optional, { sort, unique_key })*: The latest method transforms the data by finding the latest document for each unique key. -** *`_meta` (Optional, Record<string, User-defined value>)*: Defines optional transform metadata. -** *`pivot` (Optional, { aggregations, group_by })*: The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields -and the aggregation to reduce the data. -** *`retention_policy` (Optional, { time })*: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the -destination index. -** *`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })*: Defines optional transform settings. -** *`sync` (Optional, { time })*: Defines the properties transforms require to run continuously. -** *`defer_validation` (Optional, boolean)*: When the transform is created, a series of validations occur to ensure its success. For example, there is a -check for the existence of the source indices and a check that the destination index is not part of the source -index pattern. You can use this parameter to skip the checks, for example when the source index does not exist -until after the transform is created. The validations are always run when you start the transform, however, with -the exception of privilege checks. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -[discrete] -==== reset_transform -Reset a transform. -Resets a transform. -Before you can reset it, you must stop it; alternatively, use the `force` query parameter. -If the destination index was created by the transform, it is deleted. - -{ref}/reset-transform.html[Endpoint documentation] -[source,ts] ----- -client.transform.resetTransform({ transform_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`transform_id` (string)*: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), -hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. -** *`force` (Optional, boolean)*: If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform -must be stopped before it can be reset. - -[discrete] -==== schedule_now_transform -Schedule a transform to start now. -Instantly runs a transform to process data. - -If you _schedule_now a transform, it will process the new data instantly, -without waiting for the configured frequency interval. After _schedule_now API is called, -the transform will be processed again at now + frequency unless _schedule_now API -is called again in the meantime. - -{ref}/schedule-now-transform.html[Endpoint documentation] -[source,ts] ----- -client.transform.scheduleNowTransform({ transform_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`transform_id` (string)*: Identifier for the transform. -** *`timeout` (Optional, string | -1 | 0)*: Controls the time to wait for the scheduling to take place - -[discrete] -==== start_transform -Start a transform. -Starts a transform. - -When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is -set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping -definitions for the destination index from the source indices and the transform aggregations. If fields in the -destination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations), -the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce -mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you -start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings -in a pivot transform. - -When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you -created the transform, they occur when you start the transform—with the exception of privilege checks. When -Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the -time of creation and uses those same roles. If those roles do not have the required privileges on the source and -destination indices, the transform fails when it attempts unauthorized operations. - -{ref}/start-transform.html[Endpoint documentation] -[source,ts] ----- -client.transform.startTransform({ transform_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`transform_id` (string)*: Identifier for the transform. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -** *`from` (Optional, string)*: Restricts the set of transformed entities to those changed after this time. Relative times like now-30d are supported. Only applicable for continuous transforms. - -[discrete] -==== stop_transform -Stop transforms. -Stops one or more transforms. - -{ref}/stop-transform.html[Endpoint documentation] -[source,ts] ----- -client.transform.stopTransform({ transform_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`transform_id` (string)*: Identifier for the transform. To stop multiple transforms, use a list or a wildcard expression. -To stop all transforms, use `_all` or `*` as the identifier. -** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; -contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there -are only partial matches. - -If it is true, the API returns a successful acknowledgement message when there are no matches. When there are -only partial matches, the API stops the appropriate transforms. - -If it is false, the request returns a 404 status code when there are no matches or only partial matches. -** *`force` (Optional, boolean)*: If it is true, the API forcefully stops the transforms. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response when `wait_for_completion` is `true`. If no response is received before the -timeout expires, the request returns a timeout exception. However, the request continues processing and -eventually moves the transform to a STOPPED state. -** *`wait_for_checkpoint` (Optional, boolean)*: If it is true, the transform does not completely stop until the current checkpoint is completed. If it is false, -the transform stops as soon as possible. -** *`wait_for_completion` (Optional, boolean)*: If it is true, the API blocks until the indexer state completely stops. If it is false, the API returns -immediately and the indexer is stopped asynchronously in the background. - -[discrete] -==== update_transform -Update a transform. -Updates certain properties of a transform. - -All updated properties except `description` do not take effect until after the transform starts the next checkpoint, -thus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata` -privileges for the source indices. You must also have `index` and `read` privileges for the destination index. When -Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the -time of update and runs with those privileges. - -{ref}/update-transform.html[Endpoint documentation] -[source,ts] ----- -client.transform.updateTransform({ transform_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`transform_id` (string)*: Identifier for the transform. -** *`dest` (Optional, { index, op_type, pipeline, routing, version_type })*: The destination for the transform. -** *`description` (Optional, string)*: Free text description of the transform. -** *`frequency` (Optional, string | -1 | 0)*: The interval between checks for changes in the source indices when the -transform is running continuously. Also determines the retry interval in -the event of transient failures while the transform is searching or -indexing. The minimum value is 1s and the maximum is 1h. -** *`_meta` (Optional, Record<string, User-defined value>)*: Defines optional transform metadata. -** *`source` (Optional, { index, query, remote, size, slice, sort, _source, runtime_mappings })*: The source of the data for the transform. -** *`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })*: Defines optional transform settings. -** *`sync` (Optional, { time })*: Defines the properties transforms require to run continuously. -** *`retention_policy` (Optional, { time } | null)*: Defines a retention policy for the transform. Data that meets the defined -criteria is deleted from the destination index. -** *`defer_validation` (Optional, boolean)*: When true, deferrable validations are not run. This behavior may be -desired if the source index does not exist until after the transform is -created. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the -timeout expires, the request fails and returns an error. - -[discrete] -==== upgrade_transforms -Upgrades all transforms. -This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It -also cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not -affect the source and destination indices. The upgrade also does not affect the roles that transforms use when -Elasticsearch security features are enabled; the role used to read source data and write to the destination index -remains unchanged. - -{ref}/upgrade-transforms.html[Endpoint documentation] -[source,ts] ----- -client.transform.upgradeTransforms({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`dry_run` (Optional, boolean)*: When true, the request checks for updates but does not run them. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and -returns an error. - -[discrete] -=== watcher -[discrete] -==== ack_watch -Acknowledges a watch, manually throttling the execution of the watch's actions. - -{ref}/watcher-api-ack-watch.html[Endpoint documentation] -[source,ts] ----- -client.watcher.ackWatch({ watch_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`watch_id` (string)*: Watch ID -** *`action_id` (Optional, string | string[])*: A list of the action ids to be acked - -[discrete] -==== activate_watch -Activates a currently inactive watch. - -{ref}/watcher-api-activate-watch.html[Endpoint documentation] -[source,ts] ----- -client.watcher.activateWatch({ watch_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`watch_id` (string)*: Watch ID - -[discrete] -==== deactivate_watch -Deactivates a currently active watch. - -{ref}/watcher-api-deactivate-watch.html[Endpoint documentation] -[source,ts] ----- -client.watcher.deactivateWatch({ watch_id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`watch_id` (string)*: Watch ID - -[discrete] -==== delete_watch -Removes a watch from Watcher. - -{ref}/watcher-api-delete-watch.html[Endpoint documentation] -[source,ts] ----- -client.watcher.deleteWatch({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Watch ID - -[discrete] -==== execute_watch -This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. -For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can execute the watch without executing all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after execution. - -{ref}/watcher-api-execute-watch.html[Endpoint documentation] -[source,ts] ----- -client.watcher.executeWatch({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (Optional, string)*: Identifier for the watch. -** *`action_modes` (Optional, Record<string, Enum("simulate" | "force_simulate" | "execute" | "force_execute" | "skip")>)*: Determines how to handle the watch actions as part of the watch execution. -** *`alternative_input` (Optional, Record<string, User-defined value>)*: When present, the watch uses this object as a payload instead of executing its own input. -** *`ignore_condition` (Optional, boolean)*: When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter. -** *`record_execution` (Optional, boolean)*: When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. In addition, the status of the watch is updated, possibly throttling subsequent executions. This can also be specified as an HTTP parameter. -** *`simulated_actions` (Optional, { actions, all, use_all })* -** *`trigger_data` (Optional, { scheduled_time, triggered_time })*: This structure is parsed as the data of the trigger event that will be used during the watch execution -** *`watch` (Optional, { actions, condition, input, metadata, status, throttle_period, throttle_period_in_millis, transform, trigger })*: When present, this watch is used instead of the one specified in the request. This watch is not persisted to the index and record_execution cannot be set. -** *`debug` (Optional, boolean)*: Defines whether the watch runs in debug mode. - -[discrete] -==== get_settings -Retrieve settings for the watcher system index - -{ref}/watcher-api-get-settings.html[Endpoint documentation] -[source,ts] ----- -client.watcher.getSettings() ----- - - -[discrete] -==== get_watch -Retrieves a watch by its ID. - -{ref}/watcher-api-get-watch.html[Endpoint documentation] -[source,ts] ----- -client.watcher.getWatch({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Watch ID - -[discrete] -==== put_watch -Creates a new watch, or updates an existing one. - -{ref}/watcher-api-put-watch.html[Endpoint documentation] -[source,ts] ----- -client.watcher.putWatch({ id }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`id` (string)*: Watch ID -** *`actions` (Optional, Record<string, { add_backing_index, remove_backing_index }>)* -** *`condition` (Optional, { always, array_compare, compare, never, script })* -** *`input` (Optional, { chain, http, search, simple })* -** *`metadata` (Optional, Record<string, User-defined value>)* -** *`throttle_period` (Optional, string)* -** *`transform` (Optional, { chain, script, search })* -** *`trigger` (Optional, { schedule })* -** *`active` (Optional, boolean)*: Specify whether the watch is in/active by default -** *`if_primary_term` (Optional, number)*: only update the watch if the last operation that has changed the watch has the specified primary term -** *`if_seq_no` (Optional, number)*: only update the watch if the last operation that has changed the watch has the specified sequence number -** *`version` (Optional, number)*: Explicit version number for concurrency control - -[discrete] -==== query_watches -Retrieves stored watches. - -{ref}/watcher-api-query-watches.html[Endpoint documentation] -[source,ts] ----- -client.watcher.queryWatches({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`from` (Optional, number)*: The offset from the first result to fetch. Needs to be non-negative. -** *`size` (Optional, number)*: The number of hits to return. Needs to be non-negative. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Optional, query filter watches to be returned. -** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: Optional sort definition. -** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])*: Optional search After to do pagination using last hit’s sort values. - -[discrete] -==== start -Starts Watcher if it is not already running. - -{ref}/watcher-api-start.html[Endpoint documentation] -[source,ts] ----- -client.watcher.start() ----- - - -[discrete] -==== stats -Retrieves the current Watcher metrics. - -{ref}/watcher-api-stats.html[Endpoint documentation] -[source,ts] ----- -client.watcher.stats({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`metric` (Optional, Enum("_all" | "queued_watches" | "current_watches" | "pending_watches") | Enum("_all" | "queued_watches" | "current_watches" | "pending_watches")[])*: Defines which additional metrics are included in the response. -** *`emit_stacktraces` (Optional, boolean)*: Defines whether stack traces are generated for each watch that is running. - -[discrete] -==== stop -Stops Watcher if it is running. - -{ref}/watcher-api-stop.html[Endpoint documentation] -[source,ts] ----- -client.watcher.stop() ----- - - -[discrete] -==== update_settings -Update settings for the watcher system index - -{ref}/watcher-api-update-settings.html[Endpoint documentation] -[source,ts] ----- -client.watcher.updateSettings() ----- - - -[discrete] -=== xpack -[discrete] -==== info -Provides general information about the installed X-Pack features. - -{ref}/info-api.html[Endpoint documentation] -[source,ts] ----- -client.xpack.info({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`categories` (Optional, Enum("build" | "features" | "license")[])*: A list of the information categories to include in the response. For example, `build,license,features`. -** *`accept_enterprise` (Optional, boolean)*: If this param is used it must be set to true -** *`human` (Optional, boolean)*: Defines whether additional human-readable information is included in the response. In particular, it adds descriptions and a tag line. - -[discrete] -==== usage -This API provides information about which features are currently enabled and available under the current license and some usage statistics. - -{ref}/usage-api.html[Endpoint documentation] -[source,ts] ----- -client.xpack.usage({ ... }) ----- - -[discrete] -==== Arguments - -* *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - diff --git a/docs/reference/async_search.asciidoc b/docs/reference/async_search.asciidoc new file mode 100644 index 000000000..b113daa72 --- /dev/null +++ b/docs/reference/async_search.asciidoc @@ -0,0 +1,250 @@ +[[reference-async_search]] +== client.asyncSearch + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.asyncSearch.delete]] +== `client.asyncSearch.delete()` + +Delete an async search. If the asynchronous search is still running, it is cancelled. Otherwise, the saved search results are deleted. If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege. + +{ref}/async-search.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: AsyncSearchDeleteRequest, options?: TransportRequestOptions) => Promise<AsyncSearchDeleteResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface AsyncSearchDeleteRequest extends <<RequestBase>> { + id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type AsyncSearchDeleteResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.asyncSearch.get]] +== `client.asyncSearch.get()` + +Get async search results. Retrieve the results of a previously submitted asynchronous search request. If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it. + +{ref}/async-search.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: AsyncSearchGetRequest, options?: TransportRequestOptions) => Promise<AsyncSearchGetResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface AsyncSearchGetRequest extends <<RequestBase>> { + id: <<Id>> + keep_alive?: <<Duration>> + typed_keys?: boolean + wait_for_completion_timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type AsyncSearchGetResponse<TDocument = unknown, TAggregations = Record<<<AggregateName>>, <<AggregationsAggregate>>>> = <<AsyncSearchAsyncSearchDocumentResponseBase>><TDocument, TAggregations> + +---- + + +[discrete] +[[client.asyncSearch.status]] +== `client.asyncSearch.status()` + +Get the async search status. Get the status of a previously submitted async search request given its identifier, without retrieving search results. If the Elasticsearch security features are enabled, use of this API is restricted to the `monitoring_user` role. + +{ref}/async-search.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: AsyncSearchStatusRequest, options?: TransportRequestOptions) => Promise<AsyncSearchStatusResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface AsyncSearchStatusRequest extends <<RequestBase>> { + id: <<Id>> + keep_alive?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type AsyncSearchStatusResponse = AsyncSearchStatusStatusResponseBase + +---- + + +[discrete] +[[client.asyncSearch.submit]] +== `client.asyncSearch.submit()` + +Run an async search. When the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field. Partial results become available following the sort criteria that was requested. Warning: Asynchronous search does not support scroll or search requests that include only the suggest section. By default, Elasticsearch does not allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. The maximum allowed size for a stored async search response can be set by changing the `search.max_async_search_response_size` cluster level setting. + +{ref}/async-search.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: AsyncSearchSubmitRequest, options?: TransportRequestOptions) => Promise<AsyncSearchSubmitResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface AsyncSearchSubmitRequest extends <<RequestBase>> { + index?: <<Indices>> + wait_for_completion_timeout?: <<Duration>> + keep_on_completion?: boolean + allow_no_indices?: boolean + allow_partial_search_results?: boolean + analyzer?: string + analyze_wildcard?: boolean + batched_reduce_size?: <<long>> + ccs_minimize_roundtrips?: boolean + default_operator?: <<QueryDslOperator>> + df?: string + expand_wildcards?: <<ExpandWildcards>> + ignore_throttled?: boolean + ignore_unavailable?: boolean + lenient?: boolean + max_concurrent_shard_requests?: <<long>> + preference?: string + request_cache?: boolean + routing?: <<Routing>> + search_type?: <<SearchType>> + suggest_field?: <<Field>> + suggest_mode?: <<SuggestMode>> + suggest_size?: <<long>> + suggest_text?: string + typed_keys?: boolean + rest_total_hits_as_int?: boolean + _source_excludes?: <<Fields>> + _source_includes?: <<Fields>> + q?: string + aggregations?: Record<string, <<AggregationsAggregationContainer>>> + pass:[/**] @alias aggregations */ + aggs?: Record<string, <<AggregationsAggregationContainer>>> + collapse?: <<SearchFieldCollapse>> + explain?: boolean + ext?: Record<string, any> + from?: <<integer>> + highlight?: <<SearchHighlight>> + track_total_hits?: <<SearchTrackHits>> + indices_boost?: Record<<<IndexName>>, <<double>>>[] + docvalue_fields?: (<<QueryDslFieldAndFormat>> | <<Field>>)[] + knn?: <<KnnSearch>> | <<KnnSearch>>[] + min_score?: <<double>> + post_filter?: <<QueryDslQueryContainer>> + profile?: boolean + query?: <<QueryDslQueryContainer>> + rescore?: <<SearchRescore>> | <<SearchRescore>>[] + script_fields?: Record<string, <<ScriptField>>> + search_after?: <<SortResults>> + size?: <<integer>> + slice?: <<SlicedScroll>> + sort?: <<Sort>> + _source?: <<SearchSourceConfig>> + fields?: (<<QueryDslFieldAndFormat>> | <<Field>>)[] + suggest?: <<SearchSuggester>> + terminate_after?: <<long>> + timeout?: string + track_scores?: boolean + version?: boolean + seq_no_primary_term?: boolean + stored_fields?: <<Fields>> + pit?: <<SearchPointInTimeReference>> + runtime_mappings?: <<MappingRuntimeFields>> + stats?: string[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type AsyncSearchSubmitResponse<TDocument = unknown, TAggregations = Record<<<AggregateName>>, <<AggregationsAggregate>>>> = <<AsyncSearchAsyncSearchDocumentResponseBase>><TDocument, TAggregations> + +---- + + diff --git a/docs/reference/autoscaling.asciidoc b/docs/reference/autoscaling.asciidoc new file mode 100644 index 000000000..a00108f46 --- /dev/null +++ b/docs/reference/autoscaling.asciidoc @@ -0,0 +1,192 @@ +[[reference-autoscaling]] +== client.autoscaling + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.autoscaling.deleteAutoscalingPolicy]] +== `client.autoscaling.deleteAutoscalingPolicy()` + +Delete an autoscaling policy. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. + +{ref}/autoscaling-delete-autoscaling-policy.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions) => Promise<AutoscalingDeleteAutoscalingPolicyResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface AutoscalingDeleteAutoscalingPolicyRequest extends <<RequestBase>> { + name: <<Name>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type AutoscalingDeleteAutoscalingPolicyResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.autoscaling.getAutoscalingCapacity]] +== `client.autoscaling.getAutoscalingCapacity()` + +Get the autoscaling capacity. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. This API gets the current autoscaling capacity based on the configured autoscaling policy. It will return information to size the cluster appropriately to the current workload. The `required_capacity` is calculated as the maximum of the `required_capacity` result of all individual deciders that are enabled for the policy. The operator should verify that the `current_nodes` match the operator’s knowledge of the cluster to avoid making autoscaling decisions based on stale or incomplete information. The response contains decider-specific information you can use to diagnose how and why autoscaling determined a certain capacity was required. This information is provided for diagnosis only. Do not use this information to make autoscaling decisions. + +{ref}/autoscaling-get-autoscaling-capacity.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions) => Promise<AutoscalingGetAutoscalingCapacityResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface AutoscalingGetAutoscalingCapacityRequest extends <<RequestBase>> { + master_timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface AutoscalingGetAutoscalingCapacityResponse { + policies: Record<string, AutoscalingGetAutoscalingCapacityAutoscalingDeciders> +} + +---- + + +[discrete] +[[client.autoscaling.getAutoscalingPolicy]] +== `client.autoscaling.getAutoscalingPolicy()` + +Get an autoscaling policy. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. + +{ref}/autoscaling-get-autoscaling-capacity.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions) => Promise<AutoscalingGetAutoscalingPolicyResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface AutoscalingGetAutoscalingPolicyRequest extends <<RequestBase>> { + name: <<Name>> + master_timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type AutoscalingGetAutoscalingPolicyResponse = <<AutoscalingAutoscalingPolicy>> + +---- + + +[discrete] +[[client.autoscaling.putAutoscalingPolicy]] +== `client.autoscaling.putAutoscalingPolicy()` + +Create or update an autoscaling policy. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. + +{ref}/autoscaling-put-autoscaling-policy.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions) => Promise<AutoscalingPutAutoscalingPolicyResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface AutoscalingPutAutoscalingPolicyRequest extends <<RequestBase>> { + name: <<Name>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> + policy?: <<AutoscalingAutoscalingPolicy>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type AutoscalingPutAutoscalingPolicyResponse = <<AcknowledgedResponseBase>> + +---- + + diff --git a/docs/reference/bulk.asciidoc b/docs/reference/bulk.asciidoc new file mode 100644 index 000000000..227ba43c1 --- /dev/null +++ b/docs/reference/bulk.asciidoc @@ -0,0 +1,88 @@ +[[reference-bulk]] +== client.bulk + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.bulk]] +== `client.bulk()` + +Bulk index or delete documents. Performs multiple indexing or delete operations in a single API call. This reduces overhead and can greatly increase indexing speed. + +{ref}/docs-bulk.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: BulkRequest, options?: TransportRequestOptions) => Promise<BulkResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface BulkRequest<TDocument = unknown, TPartialDocument = unknown> extends <<RequestBase>> { + index?: <<IndexName>> + pipeline?: string + refresh?: <<Refresh>> + routing?: <<Routing>> + _source?: <<SearchSourceConfigParam>> + _source_excludes?: <<Fields>> + _source_includes?: <<Fields>> + timeout?: <<Duration>> + wait_for_active_shards?: <<WaitForActiveShards>> + require_alias?: boolean + operations?: (<<BulkOperationContainer>> | <<BulkUpdateAction>><TDocument, TPartialDocument> | TDocument)[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface BulkResponse { + errors: boolean + items: Partial<Record<<<BulkOperationType>>, <<BulkResponseItem>>>>[] + took: <<long>> + ingest_took?: <<long>> +} + +---- + + diff --git a/docs/reference/cat.asciidoc b/docs/reference/cat.asciidoc new file mode 100644 index 000000000..37fbca03c --- /dev/null +++ b/docs/reference/cat.asciidoc @@ -0,0 +1,1047 @@ +[[reference-cat]] +== client.cat + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.cat.aliases]] +== `client.cat.aliases()` + +Get aliases. Retrieves the cluster’s index aliases, including filter and routing information. The API does not return data stream aliases. CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. + +{ref}/cat-alias.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CatAliasesRequest, options?: TransportRequestOptions) => Promise<CatAliasesResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CatAliasesRequest extends <<CatCatRequestBase>> { + name?: <<Names>> + expand_wildcards?: <<ExpandWildcards>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CatAliasesResponse = CatAliasesAliasesRecord[] + +---- + + +[discrete] +[[client.cat.allocation]] +== `client.cat.allocation()` + +Get shard allocation information. Get a snapshot of the number of shards allocated to each data node and their disk space. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. + +{ref}/cat-allocation.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CatAllocationRequest, options?: TransportRequestOptions) => Promise<CatAllocationResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CatAllocationRequest extends <<CatCatRequestBase>> { + node_id?: <<NodeIds>> + bytes?: <<Bytes>> + local?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CatAllocationResponse = CatAllocationAllocationRecord[] + +---- + + +[discrete] +[[client.cat.componentTemplates]] +== `client.cat.componentTemplates()` + +Get component templates. Returns information about component templates in a cluster. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get component template API. + +{ref}/cat-component-templates.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CatComponentTemplatesRequest, options?: TransportRequestOptions) => Promise<CatComponentTemplatesResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CatComponentTemplatesRequest extends <<CatCatRequestBase>> { + name?: string + local?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CatComponentTemplatesResponse = CatComponentTemplatesComponentTemplate[] + +---- + + +[discrete] +[[client.cat.count]] +== `client.cat.count()` + +Get a document count. Provides quick access to a document count for a data stream, an index, or an entire cluster. The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the count API. + +{ref}/cat-count.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CatCountRequest, options?: TransportRequestOptions) => Promise<CatCountResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CatCountRequest extends <<CatCatRequestBase>> { + index?: <<Indices>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CatCountResponse = CatCountCountRecord[] + +---- + + +[discrete] +[[client.cat.fielddata]] +== `client.cat.fielddata()` + +Get field data cache information. Get the amount of heap memory currently used by the field data cache on every data node in the cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes stats API. + +{ref}/cat-fielddata.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CatFielddataRequest, options?: TransportRequestOptions) => Promise<CatFielddataResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CatFielddataRequest extends <<CatCatRequestBase>> { + fields?: <<Fields>> + bytes?: <<Bytes>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CatFielddataResponse = CatFielddataFielddataRecord[] + +---- + + +[discrete] +[[client.cat.health]] +== `client.cat.health()` + +Get the cluster health status. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the cluster health API. This API is often used to check malfunctioning clusters. To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: `HH:MM:SS`, which is human-readable but includes no date information; `Unix epoch time`, which is machine-sortable and includes date information. The latter format is useful for cluster recoveries that take multiple days. You can use the cat health API to verify cluster health across multiple nodes. You also can use the API to track the recovery of a large cluster over a longer period of time. + +{ref}/cat-health.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CatHealthRequest, options?: TransportRequestOptions) => Promise<CatHealthResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CatHealthRequest extends <<CatCatRequestBase>> { + time?: <<TimeUnit>> + ts?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CatHealthResponse = CatHealthHealthRecord[] + +---- + + +[discrete] +[[client.cat.help]] +== `client.cat.help()` + +Get CAT help. Returns help for the CAT APIs. + +{ref}/cat.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CatHelpRequest, options?: TransportRequestOptions) => Promise<CatHelpResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CatHelpRequest extends <<CatCatRequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CatHelpResponse = CatHelpHelpRecord[] + +---- + + +[discrete] +[[client.cat.indices]] +== `client.cat.indices()` + +Get index information. Returns high-level information about indices in a cluster, including backing indices for data streams. Use this request to get the following information for each index in a cluster: - shard count - document count - deleted document count - primary store size - total store size of all shards, including shard replicas These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. To get an accurate count of Elasticsearch documents, use the cat count or count APIs. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use an index endpoint. + +{ref}/cat-indices.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CatIndicesRequest, options?: TransportRequestOptions) => Promise<CatIndicesResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CatIndicesRequest extends <<CatCatRequestBase>> { + index?: <<Indices>> + bytes?: <<Bytes>> + expand_wildcards?: <<ExpandWildcards>> + health?: <<HealthStatus>> + include_unloaded_segments?: boolean + pri?: boolean + time?: <<TimeUnit>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CatIndicesResponse = CatIndicesIndicesRecord[] + +---- + + +[discrete] +[[client.cat.master]] +== `client.cat.master()` + +Get master node information. Get information about the master node, including the ID, bound IP address, and name. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + +{ref}/cat-master.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CatMasterRequest, options?: TransportRequestOptions) => Promise<CatMasterResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CatMasterRequest extends <<CatCatRequestBase>> { + local?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CatMasterResponse = CatMasterMasterRecord[] + +---- + + +[discrete] +[[client.cat.mlDataFrameAnalytics]] +== `client.cat.mlDataFrameAnalytics()` + +Get data frame analytics jobs. Returns configuration and usage information about data frame analytics jobs. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get data frame analytics jobs statistics API. + +{ref}/cat-dfanalytics.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise<CatMlDataFrameAnalyticsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CatMlDataFrameAnalyticsRequest extends <<CatCatRequestBase>> { + id?: <<Id>> + allow_no_match?: boolean + bytes?: <<Bytes>> + h?: <<CatCatDfaColumns>> + s?: <<CatCatDfaColumns>> + time?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CatMlDataFrameAnalyticsResponse = CatMlDataFrameAnalyticsDataFrameAnalyticsRecord[] + +---- + + +[discrete] +[[client.cat.mlDatafeeds]] +== `client.cat.mlDatafeeds()` + +Get datafeeds. Returns configuration and usage information about datafeeds. This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get datafeed statistics API. + +{ref}/cat-datafeeds.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CatMlDatafeedsRequest, options?: TransportRequestOptions) => Promise<CatMlDatafeedsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CatMlDatafeedsRequest extends <<CatCatRequestBase>> { + datafeed_id?: <<Id>> + allow_no_match?: boolean + h?: <<CatCatDatafeedColumns>> + s?: <<CatCatDatafeedColumns>> + time?: <<TimeUnit>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CatMlDatafeedsResponse = CatMlDatafeedsDatafeedsRecord[] + +---- + + +[discrete] +[[client.cat.mlJobs]] +== `client.cat.mlJobs()` + +Get anomaly detection jobs. Returns configuration and usage information for anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get anomaly detection job statistics API. + +{ref}/cat-anomaly-detectors.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CatMlJobsRequest, options?: TransportRequestOptions) => Promise<CatMlJobsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CatMlJobsRequest extends <<CatCatRequestBase>> { + job_id?: <<Id>> + allow_no_match?: boolean + bytes?: <<Bytes>> + h?: <<CatCatAnonalyDetectorColumns>> + s?: <<CatCatAnonalyDetectorColumns>> + time?: <<TimeUnit>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CatMlJobsResponse = CatMlJobsJobsRecord[] + +---- + + +[discrete] +[[client.cat.mlTrainedModels]] +== `client.cat.mlTrainedModels()` + +Get trained models. Returns configuration and usage information about inference trained models. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get trained models statistics API. + +{ref}/cat-trained-model.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CatMlTrainedModelsRequest, options?: TransportRequestOptions) => Promise<CatMlTrainedModelsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CatMlTrainedModelsRequest extends <<CatCatRequestBase>> { + model_id?: <<Id>> + allow_no_match?: boolean + bytes?: <<Bytes>> + h?: <<CatCatTrainedModelsColumns>> + s?: <<CatCatTrainedModelsColumns>> + from?: <<integer>> + size?: <<integer>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CatMlTrainedModelsResponse = CatMlTrainedModelsTrainedModelsRecord[] + +---- + + +[discrete] +[[client.cat.nodeattrs]] +== `client.cat.nodeattrs()` + +Get node attribute information. Get information about custom node attributes. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + +{ref}/cat-nodeattrs.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CatNodeattrsRequest, options?: TransportRequestOptions) => Promise<CatNodeattrsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CatNodeattrsRequest extends <<CatCatRequestBase>> { + local?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CatNodeattrsResponse = CatNodeattrsNodeAttributesRecord[] + +---- + + +[discrete] +[[client.cat.nodes]] +== `client.cat.nodes()` + +Get node information. Get information about the nodes in a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + +{ref}/cat-nodes.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CatNodesRequest, options?: TransportRequestOptions) => Promise<CatNodesResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CatNodesRequest extends <<CatCatRequestBase>> { + bytes?: <<Bytes>> + full_id?: boolean | string + include_unloaded_segments?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CatNodesResponse = CatNodesNodesRecord[] + +---- + + +[discrete] +[[client.cat.pendingTasks]] +== `client.cat.pendingTasks()` + +Get pending task information. Get information about cluster-level changes that have not yet taken effect. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API. + +{ref}/cat-pending-tasks.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CatPendingTasksRequest, options?: TransportRequestOptions) => Promise<CatPendingTasksResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CatPendingTasksRequest extends <<CatCatRequestBase>> { + local?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CatPendingTasksResponse = CatPendingTasksPendingTasksRecord[] + +---- + + +[discrete] +[[client.cat.plugins]] +== `client.cat.plugins()` + +Get plugin information. Get a list of plugins running on each node of a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + +{ref}/cat-plugins.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CatPluginsRequest, options?: TransportRequestOptions) => Promise<CatPluginsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CatPluginsRequest extends <<CatCatRequestBase>> { + local?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CatPluginsResponse = CatPluginsPluginsRecord[] + +---- + + +[discrete] +[[client.cat.recovery]] +== `client.cat.recovery()` + +Get shard recovery information. Get information about ongoing and completed shard recoveries. Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. For data streams, the API returns information about the stream’s backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API. + +{ref}/cat-recovery.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CatRecoveryRequest, options?: TransportRequestOptions) => Promise<CatRecoveryResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CatRecoveryRequest extends <<CatCatRequestBase>> { + index?: <<Indices>> + active_only?: boolean + bytes?: <<Bytes>> + detailed?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CatRecoveryResponse = CatRecoveryRecoveryRecord[] + +---- + + +[discrete] +[[client.cat.repositories]] +== `client.cat.repositories()` + +Get snapshot repository information. Get a list of snapshot repositories for a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API. + +{ref}/cat-repositories.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CatRepositoriesRequest, options?: TransportRequestOptions) => Promise<CatRepositoriesResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CatRepositoriesRequest extends <<CatCatRequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CatRepositoriesResponse = CatRepositoriesRepositoriesRecord[] + +---- + + +[discrete] +[[client.cat.segments]] +== `client.cat.segments()` + +Get segment information. Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API. + +{ref}/cat-segments.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CatSegmentsRequest, options?: TransportRequestOptions) => Promise<CatSegmentsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CatSegmentsRequest extends <<CatCatRequestBase>> { + index?: <<Indices>> + bytes?: <<Bytes>> + local?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CatSegmentsResponse = CatSegmentsSegmentsRecord[] + +---- + + +[discrete] +[[client.cat.shards]] +== `client.cat.shards()` + +Get shard information. Get information about the shards in a cluster. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. + +{ref}/cat-shards.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CatShardsRequest, options?: TransportRequestOptions) => Promise<CatShardsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CatShardsRequest extends <<CatCatRequestBase>> { + index?: <<Indices>> + bytes?: <<Bytes>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CatShardsResponse = CatShardsShardsRecord[] + +---- + + +[discrete] +[[client.cat.snapshots]] +== `client.cat.snapshots()` + +Get snapshot information Get information about the snapshots stored in one or more repositories. A snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API. + +{ref}/cat-snapshots.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CatSnapshotsRequest, options?: TransportRequestOptions) => Promise<CatSnapshotsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CatSnapshotsRequest extends <<CatCatRequestBase>> { + repository?: <<Names>> + ignore_unavailable?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CatSnapshotsResponse = CatSnapshotsSnapshotsRecord[] + +---- + + +[discrete] +[[client.cat.tasks]] +== `client.cat.tasks()` + +Get task information. Get information about tasks currently running in the cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API. + +{ref}/tasks.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CatTasksRequest, options?: TransportRequestOptions) => Promise<CatTasksResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CatTasksRequest extends <<CatCatRequestBase>> { + actions?: string[] + detailed?: boolean + node_id?: string[] + parent_task_id?: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CatTasksResponse = CatTasksTasksRecord[] + +---- + + +[discrete] +[[client.cat.templates]] +== `client.cat.templates()` + +Get index template information. Get information about the index templates in a cluster. You can use index templates to apply index settings and field mappings to new indices at creation. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API. + +{ref}/cat-templates.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CatTemplatesRequest, options?: TransportRequestOptions) => Promise<CatTemplatesResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CatTemplatesRequest extends <<CatCatRequestBase>> { + name?: <<Name>> + local?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CatTemplatesResponse = CatTemplatesTemplatesRecord[] + +---- + + +[discrete] +[[client.cat.threadPool]] +== `client.cat.threadPool()` + +Get thread pool statistics. Get thread pool statistics for each node in a cluster. Returned information includes all built-in thread pools and custom thread pools. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + +{ref}/cat-thread-pool.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CatThreadPoolRequest, options?: TransportRequestOptions) => Promise<CatThreadPoolResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CatThreadPoolRequest extends <<CatCatRequestBase>> { + thread_pool_patterns?: <<Names>> + time?: <<TimeUnit>> + local?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CatThreadPoolResponse = CatThreadPoolThreadPoolRecord[] + +---- + + +[discrete] +[[client.cat.transforms]] +== `client.cat.transforms()` + +Get transform information. Get configuration and usage information about transforms. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get transform statistics API. + +{ref}/cat-transforms.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CatTransformsRequest, options?: TransportRequestOptions) => Promise<CatTransformsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CatTransformsRequest extends <<CatCatRequestBase>> { + transform_id?: <<Id>> + allow_no_match?: boolean + from?: <<integer>> + h?: <<CatCatTransformColumns>> + s?: <<CatCatTransformColumns>> + time?: <<TimeUnit>> + size?: <<integer>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CatTransformsResponse = CatTransformsTransformsRecord[] + +---- + + diff --git a/docs/reference/ccr.asciidoc b/docs/reference/ccr.asciidoc new file mode 100644 index 000000000..2f118b898 --- /dev/null +++ b/docs/reference/ccr.asciidoc @@ -0,0 +1,574 @@ +[[reference-ccr]] +== client.ccr + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.ccr.deleteAutoFollowPattern]] +== `client.ccr.deleteAutoFollowPattern()` + +Deletes auto-follow patterns. + +{ref}/ccr-delete-auto-follow-pattern.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions) => Promise<CcrDeleteAutoFollowPatternResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CcrDeleteAutoFollowPatternRequest extends <<RequestBase>> { + name: <<Name>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CcrDeleteAutoFollowPatternResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ccr.follow]] +== `client.ccr.follow()` + +Creates a new follower index configured to follow the referenced leader index. + +{ref}/ccr-put-follow.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CcrFollowRequest, options?: TransportRequestOptions) => Promise<CcrFollowResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CcrFollowRequest extends <<RequestBase>> { + index: <<IndexName>> + wait_for_active_shards?: <<WaitForActiveShards>> + data_stream_name?: string + leader_index: <<IndexName>> + max_outstanding_read_requests?: <<long>> + max_outstanding_write_requests?: <<integer>> + max_read_request_operation_count?: <<integer>> + max_read_request_size?: <<ByteSize>> + max_retry_delay?: <<Duration>> + max_write_buffer_count?: <<integer>> + max_write_buffer_size?: <<ByteSize>> + max_write_request_operation_count?: <<integer>> + max_write_request_size?: <<ByteSize>> + read_poll_timeout?: <<Duration>> + remote_cluster: string + settings?: <<IndicesIndexSettings>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface CcrFollowResponse { + follow_index_created: boolean + follow_index_shards_acked: boolean + index_following_started: boolean +} + +---- + + +[discrete] +[[client.ccr.followInfo]] +== `client.ccr.followInfo()` + +Retrieves information about all follower indices, including parameters and status for each follower index + +{ref}/ccr-get-follow-info.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CcrFollowInfoRequest, options?: TransportRequestOptions) => Promise<CcrFollowInfoResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CcrFollowInfoRequest extends <<RequestBase>> { + index: <<Indices>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface CcrFollowInfoResponse { + follower_indices: CcrFollowInfoFollowerIndex[] +} + +---- + + +[discrete] +[[client.ccr.followStats]] +== `client.ccr.followStats()` + +Retrieves follower stats. return shard-level stats about the following tasks associated with each shard for the specified indices. + +{ref}/ccr-get-follow-stats.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CcrFollowStatsRequest, options?: TransportRequestOptions) => Promise<CcrFollowStatsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CcrFollowStatsRequest extends <<RequestBase>> { + index: <<Indices>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface CcrFollowStatsResponse { + indices: <<CcrFollowIndexStats>>[] +} + +---- + + +[discrete] +[[client.ccr.forgetFollower]] +== `client.ccr.forgetFollower()` + +Removes the follower retention leases from the leader. + +{ref}/ccr-post-forget-follower.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CcrForgetFollowerRequest, options?: TransportRequestOptions) => Promise<CcrForgetFollowerResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CcrForgetFollowerRequest extends <<RequestBase>> { + index: <<IndexName>> + follower_cluster?: string + follower_index?: <<IndexName>> + follower_index_uuid?: <<Uuid>> + leader_remote_cluster?: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface CcrForgetFollowerResponse { + _shards: <<ShardStatistics>> +} + +---- + + +[discrete] +[[client.ccr.getAutoFollowPattern]] +== `client.ccr.getAutoFollowPattern()` + +Gets configured auto-follow patterns. Returns the specified auto-follow pattern collection. + +{ref}/ccr-get-auto-follow-pattern.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions) => Promise<CcrGetAutoFollowPatternResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CcrGetAutoFollowPatternRequest extends <<RequestBase>> { + name?: <<Name>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface CcrGetAutoFollowPatternResponse { + patterns: CcrGetAutoFollowPatternAutoFollowPattern[] +} + +---- + + +[discrete] +[[client.ccr.pauseAutoFollowPattern]] +== `client.ccr.pauseAutoFollowPattern()` + +Pauses an auto-follow pattern + +{ref}/ccr-pause-auto-follow-pattern.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions) => Promise<CcrPauseAutoFollowPatternResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CcrPauseAutoFollowPatternRequest extends <<RequestBase>> { + name: <<Name>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CcrPauseAutoFollowPatternResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ccr.pauseFollow]] +== `client.ccr.pauseFollow()` + +Pauses a follower index. The follower index will not fetch any additional operations from the leader index. + +{ref}/ccr-post-pause-follow.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CcrPauseFollowRequest, options?: TransportRequestOptions) => Promise<CcrPauseFollowResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CcrPauseFollowRequest extends <<RequestBase>> { + index: <<IndexName>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CcrPauseFollowResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ccr.putAutoFollowPattern]] +== `client.ccr.putAutoFollowPattern()` + +Creates a new named collection of auto-follow patterns against a specified remote cluster. Newly created indices on the remote cluster matching any of the specified patterns will be automatically configured as follower indices. + +{ref}/ccr-put-auto-follow-pattern.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions) => Promise<CcrPutAutoFollowPatternResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CcrPutAutoFollowPatternRequest extends <<RequestBase>> { + name: <<Name>> + remote_cluster: string + follow_index_pattern?: <<IndexPattern>> + leader_index_patterns?: <<IndexPatterns>> + leader_index_exclusion_patterns?: <<IndexPatterns>> + max_outstanding_read_requests?: <<integer>> + settings?: Record<string, any> + max_outstanding_write_requests?: <<integer>> + read_poll_timeout?: <<Duration>> + max_read_request_operation_count?: <<integer>> + max_read_request_size?: <<ByteSize>> + max_retry_delay?: <<Duration>> + max_write_buffer_count?: <<integer>> + max_write_buffer_size?: <<ByteSize>> + max_write_request_operation_count?: <<integer>> + max_write_request_size?: <<ByteSize>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CcrPutAutoFollowPatternResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ccr.resumeAutoFollowPattern]] +== `client.ccr.resumeAutoFollowPattern()` + +Resumes an auto-follow pattern that has been paused + +{ref}/ccr-resume-auto-follow-pattern.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions) => Promise<CcrResumeAutoFollowPatternResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CcrResumeAutoFollowPatternRequest extends <<RequestBase>> { + name: <<Name>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CcrResumeAutoFollowPatternResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ccr.resumeFollow]] +== `client.ccr.resumeFollow()` + +Resumes a follower index that has been paused + +{ref}/ccr-post-resume-follow.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CcrResumeFollowRequest, options?: TransportRequestOptions) => Promise<CcrResumeFollowResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CcrResumeFollowRequest extends <<RequestBase>> { + index: <<IndexName>> + max_outstanding_read_requests?: <<long>> + max_outstanding_write_requests?: <<long>> + max_read_request_operation_count?: <<long>> + max_read_request_size?: string + max_retry_delay?: <<Duration>> + max_write_buffer_count?: <<long>> + max_write_buffer_size?: string + max_write_request_operation_count?: <<long>> + max_write_request_size?: string + read_poll_timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CcrResumeFollowResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ccr.stats]] +== `client.ccr.stats()` + +Gets all stats related to cross-cluster replication. + +{ref}/ccr-get-stats.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CcrStatsRequest, options?: TransportRequestOptions) => Promise<CcrStatsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CcrStatsRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface CcrStatsResponse { + auto_follow_stats: CcrStatsAutoFollowStats + follow_stats: CcrStatsFollowStats +} + +---- + + +[discrete] +[[client.ccr.unfollow]] +== `client.ccr.unfollow()` + +Stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. + +{ref}/ccr-post-unfollow.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CcrUnfollowRequest, options?: TransportRequestOptions) => Promise<CcrUnfollowResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CcrUnfollowRequest extends <<RequestBase>> { + index: <<IndexName>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CcrUnfollowResponse = <<AcknowledgedResponseBase>> + +---- + + diff --git a/docs/reference/clear_scroll.asciidoc b/docs/reference/clear_scroll.asciidoc new file mode 100644 index 000000000..679e8c4a0 --- /dev/null +++ b/docs/reference/clear_scroll.asciidoc @@ -0,0 +1,76 @@ +[[reference-clear_scroll]] +== client.clearScroll + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.clearScroll]] +== `client.clearScroll()` + +Clear a scrolling search. Clear the search context and results for a scrolling search. + +{ref}/clear-scroll-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ClearScrollRequest, options?: TransportRequestOptions) => Promise<ClearScrollResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ClearScrollRequest extends <<RequestBase>> { + scroll_id?: <<ScrollIds>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ClearScrollResponse { + succeeded: boolean + num_freed: <<integer>> +} + +---- + + diff --git a/docs/reference/close_point_in_time.asciidoc b/docs/reference/close_point_in_time.asciidoc new file mode 100644 index 000000000..a1c0f2dd0 --- /dev/null +++ b/docs/reference/close_point_in_time.asciidoc @@ -0,0 +1,76 @@ +[[reference-close_point_in_time]] +== client.closePointInTime + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.closePointInTime]] +== `client.closePointInTime()` + +Close a point in time. A point in time must be opened explicitly before being used in search requests. The `keep_alive` parameter tells Elasticsearch how long it should persist. A point in time is automatically closed when the `keep_alive` period has elapsed. However, keeping points in time has a cost; close them as soon as they are no longer required for search requests. + +{ref}/point-in-time-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ClosePointInTimeRequest, options?: TransportRequestOptions) => Promise<ClosePointInTimeResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ClosePointInTimeRequest extends <<RequestBase>> { + id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ClosePointInTimeResponse { + succeeded: boolean + num_freed: <<integer>> +} + +---- + + diff --git a/docs/reference/cluster.asciidoc b/docs/reference/cluster.asciidoc new file mode 100644 index 000000000..258069f09 --- /dev/null +++ b/docs/reference/cluster.asciidoc @@ -0,0 +1,731 @@ +[[reference-cluster]] +== client.cluster + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.cluster.allocationExplain]] +== `client.cluster.allocationExplain()` + +Provides explanations for shard allocations in the cluster. + +{ref}/cluster-allocation-explain.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ClusterAllocationExplainRequest, options?: TransportRequestOptions) => Promise<ClusterAllocationExplainResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ClusterAllocationExplainRequest extends <<RequestBase>> { + include_disk_info?: boolean + include_yes_decisions?: boolean + current_node?: string + index?: <<IndexName>> + primary?: boolean + shard?: <<integer>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ClusterAllocationExplainResponse { + allocate_explanation?: string + allocation_delay?: <<Duration>> + allocation_delay_in_millis?: <<DurationValue>><<<UnitMillis>>> + can_allocate?: ClusterAllocationExplainDecision + can_move_to_other_node?: ClusterAllocationExplainDecision + can_rebalance_cluster?: ClusterAllocationExplainDecision + can_rebalance_cluster_decisions?: ClusterAllocationExplainAllocationDecision[] + can_rebalance_to_other_node?: ClusterAllocationExplainDecision + can_remain_decisions?: ClusterAllocationExplainAllocationDecision[] + can_remain_on_current_node?: ClusterAllocationExplainDecision + cluster_info?: ClusterAllocationExplainClusterInfo + configured_delay?: <<Duration>> + configured_delay_in_millis?: <<DurationValue>><<<UnitMillis>>> + current_node?: ClusterAllocationExplainCurrentNode + current_state: string + index: <<IndexName>> + move_explanation?: string + node_allocation_decisions?: ClusterAllocationExplainNodeAllocationExplanation[] + primary: boolean + rebalance_explanation?: string + remaining_delay?: <<Duration>> + remaining_delay_in_millis?: <<DurationValue>><<<UnitMillis>>> + shard: <<integer>> + unassigned_info?: ClusterAllocationExplainUnassignedInformation + note?: string +} + +---- + + +[discrete] +[[client.cluster.deleteComponentTemplate]] +== `client.cluster.deleteComponentTemplate()` + +Delete component templates. Deletes component templates. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. + +{ref}/indices-component-template.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions) => Promise<ClusterDeleteComponentTemplateResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ClusterDeleteComponentTemplateRequest extends <<RequestBase>> { + name: <<Names>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type ClusterDeleteComponentTemplateResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.cluster.deleteVotingConfigExclusions]] +== `client.cluster.deleteVotingConfigExclusions()` + +Clears cluster voting config exclusions. + +{ref}/voting-config-exclusions.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions) => Promise<ClusterDeleteVotingConfigExclusionsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ClusterDeleteVotingConfigExclusionsRequest extends <<RequestBase>> { + wait_for_removal?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type ClusterDeleteVotingConfigExclusionsResponse = boolean + +---- + + +[discrete] +[[client.cluster.existsComponentTemplate]] +== `client.cluster.existsComponentTemplate()` + +Check component templates. Returns information about whether a particular component template exists. + +{ref}/indices-component-template.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions) => Promise<ClusterExistsComponentTemplateResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ClusterExistsComponentTemplateRequest extends <<RequestBase>> { + name: <<Names>> + master_timeout?: <<Duration>> + local?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type ClusterExistsComponentTemplateResponse = boolean + +---- + + +[discrete] +[[client.cluster.getComponentTemplate]] +== `client.cluster.getComponentTemplate()` + +Get component templates. Retrieves information about component templates. + +{ref}/indices-component-template.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ClusterGetComponentTemplateRequest, options?: TransportRequestOptions) => Promise<ClusterGetComponentTemplateResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ClusterGetComponentTemplateRequest extends <<RequestBase>> { + name?: <<Name>> + flat_settings?: boolean + include_defaults?: boolean + local?: boolean + master_timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ClusterGetComponentTemplateResponse { + component_templates: <<ClusterComponentTemplate>>[] +} + +---- + + +[discrete] +[[client.cluster.getSettings]] +== `client.cluster.getSettings()` + +Returns cluster-wide settings. By default, it returns only settings that have been explicitly defined. + +{ref}/cluster-get-settings.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ClusterGetSettingsRequest, options?: TransportRequestOptions) => Promise<ClusterGetSettingsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ClusterGetSettingsRequest extends <<RequestBase>> { + flat_settings?: boolean + include_defaults?: boolean + master_timeout?: <<Duration>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ClusterGetSettingsResponse { + persistent: Record<string, any> + transient: Record<string, any> + defaults?: Record<string, any> +} + +---- + + +[discrete] +[[client.cluster.health]] +== `client.cluster.health()` + +The cluster health API returns a simple status on the health of the cluster. You can also use the API to get the health status of only specified data streams and indices. For data streams, the API retrieves the health status of the stream’s backing indices. The cluster health status is: green, yellow or red. On the shard level, a red status indicates that the specific shard is not allocated in the cluster, yellow means that the primary shard is allocated but replicas are not, and green means that all shards are allocated. The index level status is controlled by the worst shard status. The cluster status is controlled by the worst index status. + +{ref}/cluster-health.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ClusterHealthRequest, options?: TransportRequestOptions) => Promise<ClusterHealthResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ClusterHealthRequest extends <<RequestBase>> { + index?: <<Indices>> + expand_wildcards?: <<ExpandWildcards>> + level?: <<Level>> + local?: boolean + master_timeout?: <<Duration>> + timeout?: <<Duration>> + wait_for_active_shards?: <<WaitForActiveShards>> + wait_for_events?: <<WaitForEvents>> + wait_for_nodes?: string | <<integer>> + wait_for_no_initializing_shards?: boolean + wait_for_no_relocating_shards?: boolean + wait_for_status?: <<HealthStatus>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type ClusterHealthResponse = ClusterHealthHealthResponseBody + +---- + + +[discrete] +[[client.cluster.info]] +== `client.cluster.info()` + +Get cluster info. Returns basic information about the cluster. + +{ref}/cluster-info.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ClusterInfoRequest, options?: TransportRequestOptions) => Promise<ClusterInfoResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ClusterInfoRequest extends <<RequestBase>> { + target: <<ClusterInfoTargets>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ClusterInfoResponse { + cluster_name: <<Name>> + http?: <<NodesHttp>> + ingest?: <<NodesIngest>> + thread_pool?: Record<string, <<NodesThreadCount>>> + script?: <<NodesScripting>> +} + +---- + + +[discrete] +[[client.cluster.pendingTasks]] +== `client.cluster.pendingTasks()` + +Returns cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet been executed. NOTE: This API returns a list of any pending updates to the cluster state. These are distinct from the tasks reported by the Task Management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. + +{ref}/cluster-pending.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ClusterPendingTasksRequest, options?: TransportRequestOptions) => Promise<ClusterPendingTasksResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ClusterPendingTasksRequest extends <<RequestBase>> { + local?: boolean + master_timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ClusterPendingTasksResponse { + tasks: ClusterPendingTasksPendingTask[] +} + +---- + + +[discrete] +[[client.cluster.postVotingConfigExclusions]] +== `client.cluster.postVotingConfigExclusions()` + +Updates the cluster voting config exclusions by node ids or node names. + +{ref}/voting-config-exclusions.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions) => Promise<ClusterPostVotingConfigExclusionsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ClusterPostVotingConfigExclusionsRequest extends <<RequestBase>> { + node_names?: <<Names>> + node_ids?: <<Ids>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type ClusterPostVotingConfigExclusionsResponse = boolean + +---- + + +[discrete] +[[client.cluster.putComponentTemplate]] +== `client.cluster.putComponentTemplate()` + +Create or update a component template. Creates or updates a component template. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. An index template can be composed of multiple component templates. To use a component template, specify it in an index template’s `composed_of` list. Component templates are only applied to new data streams and indices as part of a matching index template. Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template. Component templates are only used during index creation. For data streams, this includes data stream creation and the creation of a stream’s backing indices. Changes to component templates do not affect existing indices, including a stream’s backing indices. You can use C-style `/* *\/` block comments in component templates. You can include comments anywhere in the request body except before the opening curly bracket. + +{ref}/indices-component-template.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ClusterPutComponentTemplateRequest, options?: TransportRequestOptions) => Promise<ClusterPutComponentTemplateResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ClusterPutComponentTemplateRequest extends <<RequestBase>> { + name: <<Name>> + create?: boolean + master_timeout?: <<Duration>> + template: <<IndicesIndexState>> + version?: <<VersionNumber>> + _meta?: <<Metadata>> + deprecated?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type ClusterPutComponentTemplateResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.cluster.putSettings]] +== `client.cluster.putSettings()` + +Updates the cluster settings. + +{ref}/cluster-update-settings.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ClusterPutSettingsRequest, options?: TransportRequestOptions) => Promise<ClusterPutSettingsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ClusterPutSettingsRequest extends <<RequestBase>> { + flat_settings?: boolean + master_timeout?: <<Duration>> + timeout?: <<Duration>> + persistent?: Record<string, any> + transient?: Record<string, any> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ClusterPutSettingsResponse { + acknowledged: boolean + persistent: Record<string, any> + transient: Record<string, any> +} + +---- + + +[discrete] +[[client.cluster.remoteInfo]] +== `client.cluster.remoteInfo()` + +The cluster remote info API allows you to retrieve all of the configured remote cluster information. It returns connection and endpoint information keyed by the configured remote cluster alias. + +{ref}/cluster-remote-info.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ClusterRemoteInfoRequest, options?: TransportRequestOptions) => Promise<ClusterRemoteInfoResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ClusterRemoteInfoRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type ClusterRemoteInfoResponse = Record<string, ClusterRemoteInfoClusterRemoteInfo> + +---- + + +[discrete] +[[client.cluster.reroute]] +== `client.cluster.reroute()` + +Allows to manually change the allocation of individual shards in the cluster. + +{ref}/cluster-reroute.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ClusterRerouteRequest, options?: TransportRequestOptions) => Promise<ClusterRerouteResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ClusterRerouteRequest extends <<RequestBase>> { + dry_run?: boolean + explain?: boolean + metric?: <<Metrics>> + retry_failed?: boolean + master_timeout?: <<Duration>> + timeout?: <<Duration>> + commands?: ClusterRerouteCommand[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ClusterRerouteResponse { + acknowledged: boolean + explanations?: ClusterRerouteRerouteExplanation[] + state?: any +} + +---- + + +[discrete] +[[client.cluster.state]] +== `client.cluster.state()` + +Returns a comprehensive information about the state of the cluster. + +{ref}/cluster-state.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ClusterStateRequest, options?: TransportRequestOptions) => Promise<ClusterStateResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ClusterStateRequest extends <<RequestBase>> { + metric?: <<Metrics>> + index?: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + flat_settings?: boolean + ignore_unavailable?: boolean + local?: boolean + master_timeout?: <<Duration>> + wait_for_metadata_version?: <<VersionNumber>> + wait_for_timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type ClusterStateResponse = any + +---- + + +[discrete] +[[client.cluster.stats]] +== `client.cluster.stats()` + +Returns cluster statistics. It returns basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). + +{ref}/cluster-stats.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ClusterStatsRequest, options?: TransportRequestOptions) => Promise<ClusterStatsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ClusterStatsRequest extends <<RequestBase>> { + node_id?: <<NodeIds>> + include_remotes?: boolean + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type ClusterStatsResponse = ClusterStatsStatsResponseBase + +---- + + diff --git a/docs/reference/connector.asciidoc b/docs/reference/connector.asciidoc new file mode 100644 index 000000000..52ca46d68 --- /dev/null +++ b/docs/reference/connector.asciidoc @@ -0,0 +1,1077 @@ +[[reference-connector]] +== client.connector + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.connector.checkIn]] +== `client.connector.checkIn()` + +Check in a connector. Update the `last_seen` field in the connector and set it to the current timestamp. + +{ref}/check-in-connector-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorCheckInRequest, options?: TransportRequestOptions) => Promise<ConnectorCheckInResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ConnectorCheckInRequest extends <<RequestBase>> { + connector_id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ConnectorCheckInResponse { + result: <<Result>> +} + +---- + + +[discrete] +[[client.connector.delete]] +== `client.connector.delete()` + +Delete a connector. Removes a connector and associated sync jobs. This is a destructive action that is not recoverable. NOTE: This action doesn’t delete any API keys, ingest pipelines, or data indices associated with the connector. These need to be removed manually. + +{ref}/delete-connector-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorDeleteRequest, options?: TransportRequestOptions) => Promise<ConnectorDeleteResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ConnectorDeleteRequest extends <<RequestBase>> { + connector_id: <<Id>> + delete_sync_jobs?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type ConnectorDeleteResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.connector.get]] +== `client.connector.get()` + +Get a connector. Get the details about a connector. + +{ref}/get-connector-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorGetRequest, options?: TransportRequestOptions) => Promise<ConnectorGetResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ConnectorGetRequest extends <<RequestBase>> { + connector_id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type ConnectorGetResponse = <<ConnectorConnector>> + +---- + + +[discrete] +[[client.connector.list]] +== `client.connector.list()` + +Get all connectors. Get information about all connectors. + +{ref}/list-connector-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorListRequest, options?: TransportRequestOptions) => Promise<ConnectorListResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ConnectorListRequest extends <<RequestBase>> { + from?: <<integer>> + size?: <<integer>> + index_name?: <<Indices>> + connector_name?: <<Names>> + service_type?: <<Names>> + query?: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ConnectorListResponse { + count: <<long>> + results: <<ConnectorConnector>>[] +} + +---- + + +[discrete] +[[client.connector.post]] +== `client.connector.post()` + +Create a connector. Connectors are Elasticsearch integrations that bring content from third-party data sources, which can be deployed on Elastic Cloud or hosted on your own infrastructure. Elastic managed connectors (Native connectors) are a managed service on Elastic Cloud. Self-managed connectors (Connector clients) are self-managed on your infrastructure. + +{ref}/create-connector-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorPostRequest, options?: TransportRequestOptions) => Promise<ConnectorPostResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ConnectorPostRequest extends <<RequestBase>> { + description?: string + index_name?: <<IndexName>> + is_native?: boolean + language?: string + name?: string + service_type?: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ConnectorPostResponse { + result: <<Result>> + id: <<Id>> +} + +---- + + +[discrete] +[[client.connector.put]] +== `client.connector.put()` + +Create or update a connector. + +{ref}/create-connector-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorPutRequest, options?: TransportRequestOptions) => Promise<ConnectorPutResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ConnectorPutRequest extends <<RequestBase>> { + connector_id?: <<Id>> + description?: string + index_name?: <<IndexName>> + is_native?: boolean + language?: string + name?: string + service_type?: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ConnectorPutResponse { + result: <<Result>> + id: <<Id>> +} + +---- + + +[discrete] +[[client.connector.syncJobCancel]] +== `client.connector.syncJobCancel()` + +Cancel a connector sync job. Cancel a connector sync job, which sets the status to cancelling and updates `cancellation_requested_at` to the current time. The connector service is then responsible for setting the status of connector sync jobs to cancelled. + +{ref}/cancel-connector-sync-job-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorSyncJobCancelRequest, options?: TransportRequestOptions) => Promise<ConnectorSyncJobCancelResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ConnectorSyncJobCancelRequest extends <<RequestBase>> { + connector_sync_job_id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ConnectorSyncJobCancelResponse { + result: <<Result>> +} + +---- + + +[discrete] +[[client.connector.syncJobCheckIn]] +== `client.connector.syncJobCheckIn()` + +Checks in a connector sync job (refreshes 'last_seen'). + +{ref}/check-in-connector-sync-job-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorSyncJobCheckInRequest, options?: TransportRequestOptions) => Promise<ConnectorSyncJobCheckInResponse> +---- + +[discrete] +[[client.connector.syncJobClaim]] +== `client.connector.syncJobClaim()` + +Claims a connector sync job. +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorSyncJobClaimRequest, options?: TransportRequestOptions) => Promise<ConnectorSyncJobClaimResponse> +---- + +[discrete] +[[client.connector.syncJobDelete]] +== `client.connector.syncJobDelete()` + +Delete a connector sync job. Remove a connector sync job and its associated data. This is a destructive action that is not recoverable. + +{ref}/delete-connector-sync-job-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorSyncJobDeleteRequest, options?: TransportRequestOptions) => Promise<ConnectorSyncJobDeleteResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ConnectorSyncJobDeleteRequest extends <<RequestBase>> { + connector_sync_job_id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type ConnectorSyncJobDeleteResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.connector.syncJobError]] +== `client.connector.syncJobError()` + +Sets an error for a connector sync job. + +{ref}/set-connector-sync-job-error-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorSyncJobErrorRequest, options?: TransportRequestOptions) => Promise<ConnectorSyncJobErrorResponse> +---- + +[discrete] +[[client.connector.syncJobGet]] +== `client.connector.syncJobGet()` + +Get a connector sync job. + +{ref}/get-connector-sync-job-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorSyncJobGetRequest, options?: TransportRequestOptions) => Promise<ConnectorSyncJobGetResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ConnectorSyncJobGetRequest extends <<RequestBase>> { + connector_sync_job_id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type ConnectorSyncJobGetResponse = <<ConnectorConnectorSyncJob>> + +---- + + +[discrete] +[[client.connector.syncJobList]] +== `client.connector.syncJobList()` + +Get all connector sync jobs. Get information about all stored connector sync jobs listed by their creation date in ascending order. + +{ref}/list-connector-sync-jobs-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorSyncJobListRequest, options?: TransportRequestOptions) => Promise<ConnectorSyncJobListResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ConnectorSyncJobListRequest extends <<RequestBase>> { + from?: <<integer>> + size?: <<integer>> + status?: <<ConnectorSyncStatus>> + connector_id?: <<Id>> + job_type?: <<ConnectorSyncJobType>> | <<ConnectorSyncJobType>>[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ConnectorSyncJobListResponse { + count: <<long>> + results: <<ConnectorConnectorSyncJob>>[] +} + +---- + + +[discrete] +[[client.connector.syncJobPost]] +== `client.connector.syncJobPost()` + +Create a connector sync job. Create a connector sync job document in the internal index and initialize its counters and timestamps with default values. + +{ref}/create-connector-sync-job-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorSyncJobPostRequest, options?: TransportRequestOptions) => Promise<ConnectorSyncJobPostResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ConnectorSyncJobPostRequest extends <<RequestBase>> { + id: <<Id>> + job_type?: <<ConnectorSyncJobType>> + trigger_method?: <<ConnectorSyncJobTriggerMethod>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ConnectorSyncJobPostResponse { + id: <<Id>> +} + +---- + + +[discrete] +[[client.connector.syncJobUpdateStats]] +== `client.connector.syncJobUpdateStats()` + +Updates the stats fields in the connector sync job document. + +{ref}/set-connector-sync-job-stats-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptions) => Promise<ConnectorSyncJobUpdateStatsResponse> +---- + +[discrete] +[[client.connector.updateActiveFiltering]] +== `client.connector.updateActiveFiltering()` + +Activate the connector draft filter. Activates the valid draft filtering for a connector. + +{ref}/update-connector-filtering-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptions) => Promise<ConnectorUpdateActiveFilteringResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ConnectorUpdateActiveFilteringRequest extends <<RequestBase>> { + connector_id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ConnectorUpdateActiveFilteringResponse { + result: <<Result>> +} + +---- + + +[discrete] +[[client.connector.updateApiKeyId]] +== `client.connector.updateApiKeyId()` + +Update the connector API key ID. Update the `api_key_id` and `api_key_secret_id` fields of a connector. You can specify the ID of the API key used for authorization and the ID of the connector secret where the API key is stored. The connector secret ID is required only for Elastic managed (native) connectors. Self-managed connectors (connector clients) do not use this field. + +{ref}/update-connector-api-key-id-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptions) => Promise<ConnectorUpdateApiKeyIdResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ConnectorUpdateApiKeyIdRequest extends <<RequestBase>> { + connector_id: <<Id>> + api_key_id?: string + api_key_secret_id?: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ConnectorUpdateApiKeyIdResponse { + result: <<Result>> +} + +---- + + +[discrete] +[[client.connector.updateConfiguration]] +== `client.connector.updateConfiguration()` + +Update the connector configuration. Update the configuration field in the connector document. + +{ref}/update-connector-configuration-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorUpdateConfigurationRequest, options?: TransportRequestOptions) => Promise<ConnectorUpdateConfigurationResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ConnectorUpdateConfigurationRequest extends <<RequestBase>> { + connector_id: <<Id>> + configuration?: <<ConnectorConnectorConfiguration>> + values?: Record<string, any> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ConnectorUpdateConfigurationResponse { + result: <<Result>> +} + +---- + + +[discrete] +[[client.connector.updateError]] +== `client.connector.updateError()` + +Update the connector error field. Set the error field for the connector. If the error provided in the request body is non-null, the connector’s status is updated to error. Otherwise, if the error is reset to null, the connector status is updated to connected. + +{ref}/update-connector-error-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorUpdateErrorRequest, options?: TransportRequestOptions) => Promise<ConnectorUpdateErrorResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ConnectorUpdateErrorRequest extends <<RequestBase>> { + connector_id: <<Id>> + error: <<SpecUtilsWithNullValue>><string> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ConnectorUpdateErrorResponse { + result: <<Result>> +} + +---- + + +[discrete] +[[client.connector.updateFeatures]] +== `client.connector.updateFeatures()` + +Updates the connector features in the connector document. + +{ref}/update-connector-features-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorUpdateFeaturesRequest, options?: TransportRequestOptions) => Promise<ConnectorUpdateFeaturesResponse> +---- + +[discrete] +[[client.connector.updateFiltering]] +== `client.connector.updateFiltering()` + +Update the connector filtering. Update the draft filtering configuration of a connector and marks the draft validation state as edited. The filtering draft is activated once validated by the running Elastic connector service. The filtering property is used to configure sync rules (both basic and advanced) for a connector. + +{ref}/update-connector-filtering-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorUpdateFilteringRequest, options?: TransportRequestOptions) => Promise<ConnectorUpdateFilteringResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ConnectorUpdateFilteringRequest extends <<RequestBase>> { + connector_id: <<Id>> + filtering?: <<ConnectorFilteringConfig>>[] + rules?: <<ConnectorFilteringRule>>[] + advanced_snippet?: <<ConnectorFilteringAdvancedSnippet>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ConnectorUpdateFilteringResponse { + result: <<Result>> +} + +---- + + +[discrete] +[[client.connector.updateFilteringValidation]] +== `client.connector.updateFilteringValidation()` + +Update the connector draft filtering validation. Update the draft filtering validation info for a connector. +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptions) => Promise<ConnectorUpdateFilteringValidationResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ConnectorUpdateFilteringValidationRequest extends <<RequestBase>> { + connector_id: <<Id>> + validation: <<ConnectorFilteringRulesValidation>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ConnectorUpdateFilteringValidationResponse { + result: <<Result>> +} + +---- + + +[discrete] +[[client.connector.updateIndexName]] +== `client.connector.updateIndexName()` + +Update the connector index name. Update the `index_name` field of a connector, specifying the index where the data ingested by the connector is stored. + +{ref}/update-connector-index-name-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorUpdateIndexNameRequest, options?: TransportRequestOptions) => Promise<ConnectorUpdateIndexNameResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ConnectorUpdateIndexNameRequest extends <<RequestBase>> { + connector_id: <<Id>> + index_name: <<SpecUtilsWithNullValue>><<<IndexName>>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ConnectorUpdateIndexNameResponse { + result: <<Result>> +} + +---- + + +[discrete] +[[client.connector.updateName]] +== `client.connector.updateName()` + +Update the connector name and description. + +{ref}/update-connector-name-description-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorUpdateNameRequest, options?: TransportRequestOptions) => Promise<ConnectorUpdateNameResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ConnectorUpdateNameRequest extends <<RequestBase>> { + connector_id: <<Id>> + name?: string + description?: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ConnectorUpdateNameResponse { + result: <<Result>> +} + +---- + + +[discrete] +[[client.connector.updateNative]] +== `client.connector.updateNative()` + +Update the connector is_native flag. +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorUpdateNativeRequest, options?: TransportRequestOptions) => Promise<ConnectorUpdateNativeResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ConnectorUpdateNativeRequest extends <<RequestBase>> { + connector_id: <<Id>> + is_native: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ConnectorUpdateNativeResponse { + result: <<Result>> +} + +---- + + +[discrete] +[[client.connector.updatePipeline]] +== `client.connector.updatePipeline()` + +Update the connector pipeline. When you create a new connector, the configuration of an ingest pipeline is populated with default settings. + +{ref}/update-connector-pipeline-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorUpdatePipelineRequest, options?: TransportRequestOptions) => Promise<ConnectorUpdatePipelineResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ConnectorUpdatePipelineRequest extends <<RequestBase>> { + connector_id: <<Id>> + pipeline: <<ConnectorIngestPipelineParams>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ConnectorUpdatePipelineResponse { + result: <<Result>> +} + +---- + + +[discrete] +[[client.connector.updateScheduling]] +== `client.connector.updateScheduling()` + +Update the connector scheduling. + +{ref}/update-connector-scheduling-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorUpdateSchedulingRequest, options?: TransportRequestOptions) => Promise<ConnectorUpdateSchedulingResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ConnectorUpdateSchedulingRequest extends <<RequestBase>> { + connector_id: <<Id>> + scheduling: <<ConnectorSchedulingConfiguration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ConnectorUpdateSchedulingResponse { + result: <<Result>> +} + +---- + + +[discrete] +[[client.connector.updateServiceType]] +== `client.connector.updateServiceType()` + +Update the connector service type. + +{ref}/update-connector-service-type-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptions) => Promise<ConnectorUpdateServiceTypeResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ConnectorUpdateServiceTypeRequest extends <<RequestBase>> { + connector_id: <<Id>> + service_type: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ConnectorUpdateServiceTypeResponse { + result: <<Result>> +} + +---- + + +[discrete] +[[client.connector.updateStatus]] +== `client.connector.updateStatus()` + +Update the connector status. + +{ref}/update-connector-status-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ConnectorUpdateStatusRequest, options?: TransportRequestOptions) => Promise<ConnectorUpdateStatusResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ConnectorUpdateStatusRequest extends <<RequestBase>> { + connector_id: <<Id>> + status: <<ConnectorConnectorStatus>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ConnectorUpdateStatusResponse { + result: <<Result>> +} + +---- + + diff --git a/docs/reference/count.asciidoc b/docs/reference/count.asciidoc new file mode 100644 index 000000000..1c5c8c273 --- /dev/null +++ b/docs/reference/count.asciidoc @@ -0,0 +1,91 @@ +[[reference-count]] +== client.count + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.count]] +== `client.count()` + +Count search results. Get the number of documents matching a query. + +{ref}/search-count.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CountRequest, options?: TransportRequestOptions) => Promise<CountResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CountRequest extends <<RequestBase>> { + index?: <<Indices>> + allow_no_indices?: boolean + analyzer?: string + analyze_wildcard?: boolean + default_operator?: <<QueryDslOperator>> + df?: string + expand_wildcards?: <<ExpandWildcards>> + ignore_throttled?: boolean + ignore_unavailable?: boolean + lenient?: boolean + min_score?: <<double>> + preference?: string + routing?: <<Routing>> + terminate_after?: <<long>> + q?: string + query?: <<QueryDslQueryContainer>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface CountResponse { + count: <<long>> + _shards: <<ShardStatistics>> +} + +---- + + diff --git a/docs/reference/create.asciidoc b/docs/reference/create.asciidoc new file mode 100644 index 000000000..e438ebd81 --- /dev/null +++ b/docs/reference/create.asciidoc @@ -0,0 +1,82 @@ +[[reference-create]] +== client.create + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.create]] +== `client.create()` + +Index a document. Adds a JSON document to the specified data stream or index and makes it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. + +{ref}/docs-index_.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: CreateRequest, options?: TransportRequestOptions) => Promise<CreateResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface CreateRequest<TDocument = unknown> extends <<RequestBase>> { + id: <<Id>> + index: <<IndexName>> + pipeline?: string + refresh?: <<Refresh>> + routing?: <<Routing>> + timeout?: <<Duration>> + version?: <<VersionNumber>> + version_type?: <<VersionType>> + wait_for_active_shards?: <<WaitForActiveShards>> + document?: TDocument +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type CreateResponse = <<WriteResponseBase>> + +---- + + diff --git a/docs/reference/dangling_indices.asciidoc b/docs/reference/dangling_indices.asciidoc new file mode 100644 index 000000000..7ce35d062 --- /dev/null +++ b/docs/reference/dangling_indices.asciidoc @@ -0,0 +1,153 @@ +[[reference-dangling_indices]] +== client.danglingIndices + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.danglingIndices.deleteDanglingIndex]] +== `client.danglingIndices.deleteDanglingIndex()` + +Delete a dangling index. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. + +{ref}/modules-gateway-dangling-indices.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions) => Promise<DanglingIndicesDeleteDanglingIndexResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface DanglingIndicesDeleteDanglingIndexRequest extends <<RequestBase>> { + index_uuid: <<Uuid>> + accept_data_loss: boolean + master_timeout?: <<Duration>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type DanglingIndicesDeleteDanglingIndexResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.danglingIndices.importDanglingIndex]] +== `client.danglingIndices.importDanglingIndex()` + +Import a dangling index. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. + +{ref}/modules-gateway-dangling-indices.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions) => Promise<DanglingIndicesImportDanglingIndexResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface DanglingIndicesImportDanglingIndexRequest extends <<RequestBase>> { + index_uuid: <<Uuid>> + accept_data_loss: boolean + master_timeout?: <<Duration>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type DanglingIndicesImportDanglingIndexResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.danglingIndices.listDanglingIndices]] +== `client.danglingIndices.listDanglingIndices()` + +Get the dangling indices. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. Use this API to list dangling indices, which you can then import or delete. + +{ref}/modules-gateway-dangling-indices.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions) => Promise<DanglingIndicesListDanglingIndicesResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface DanglingIndicesListDanglingIndicesRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface DanglingIndicesListDanglingIndicesResponse { + dangling_indices: DanglingIndicesListDanglingIndicesDanglingIndex[] +} + +---- + + diff --git a/docs/reference/delete.asciidoc b/docs/reference/delete.asciidoc new file mode 100644 index 000000000..6e330d353 --- /dev/null +++ b/docs/reference/delete.asciidoc @@ -0,0 +1,82 @@ +[[reference-delete]] +== client.delete + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.delete]] +== `client.delete()` + +Delete a document. Removes a JSON document from the specified index. + +{ref}/docs-delete.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: DeleteRequest, options?: TransportRequestOptions) => Promise<DeleteResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface DeleteRequest extends <<RequestBase>> { + id: <<Id>> + index: <<IndexName>> + if_primary_term?: <<long>> + if_seq_no?: <<SequenceNumber>> + refresh?: <<Refresh>> + routing?: <<Routing>> + timeout?: <<Duration>> + version?: <<VersionNumber>> + version_type?: <<VersionType>> + wait_for_active_shards?: <<WaitForActiveShards>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type DeleteResponse = <<WriteResponseBase>> + +---- + + diff --git a/docs/reference/delete_by_query.asciidoc b/docs/reference/delete_by_query.asciidoc new file mode 100644 index 000000000..01043593f --- /dev/null +++ b/docs/reference/delete_by_query.asciidoc @@ -0,0 +1,121 @@ +[[reference-delete_by_query]] +== client.deleteByQuery + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.deleteByQuery]] +== `client.deleteByQuery()` + +Delete documents. Deletes documents that match the specified query. + +{ref}/docs-delete-by-query.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: DeleteByQueryRequest, options?: TransportRequestOptions) => Promise<DeleteByQueryResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface DeleteByQueryRequest extends <<RequestBase>> { + index: <<Indices>> + allow_no_indices?: boolean + analyzer?: string + analyze_wildcard?: boolean + conflicts?: <<Conflicts>> + default_operator?: <<QueryDslOperator>> + df?: string + expand_wildcards?: <<ExpandWildcards>> + from?: <<long>> + ignore_unavailable?: boolean + lenient?: boolean + preference?: string + refresh?: boolean + request_cache?: boolean + requests_per_second?: <<float>> + routing?: <<Routing>> + q?: string + scroll?: <<Duration>> + scroll_size?: <<long>> + search_timeout?: <<Duration>> + search_type?: <<SearchType>> + slices?: <<Slices>> + sort?: string[] + stats?: string[] + terminate_after?: <<long>> + timeout?: <<Duration>> + version?: boolean + wait_for_active_shards?: <<WaitForActiveShards>> + wait_for_completion?: boolean + max_docs?: <<long>> + query?: <<QueryDslQueryContainer>> + slice?: <<SlicedScroll>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface DeleteByQueryResponse { + batches?: <<long>> + deleted?: <<long>> + failures?: <<BulkIndexByScrollFailure>>[] + noops?: <<long>> + requests_per_second?: <<float>> + retries?: <<Retries>> + slice_id?: <<integer>> + task?: <<TaskId>> + throttled?: <<Duration>> + throttled_millis?: <<DurationValue>><<<UnitMillis>>> + throttled_until?: <<Duration>> + throttled_until_millis?: <<DurationValue>><<<UnitMillis>>> + timed_out?: boolean + took?: <<DurationValue>><<<UnitMillis>>> + total?: <<long>> + version_conflicts?: <<long>> +} + +---- + + diff --git a/docs/reference/delete_by_query_rethrottle.asciidoc b/docs/reference/delete_by_query_rethrottle.asciidoc new file mode 100644 index 000000000..b3e91cf68 --- /dev/null +++ b/docs/reference/delete_by_query_rethrottle.asciidoc @@ -0,0 +1,74 @@ +[[reference-delete_by_query_rethrottle]] +== client.deleteByQueryRethrottle + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.deleteByQueryRethrottle]] +== `client.deleteByQueryRethrottle()` + +Throttle a delete by query operation. Change the number of requests per second for a particular delete by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. + +{ref}/docs-delete-by-query.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: DeleteByQueryRethrottleRequest, options?: TransportRequestOptions) => Promise<DeleteByQueryRethrottleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface DeleteByQueryRethrottleRequest extends <<RequestBase>> { + task_id: <<TaskId>> + requests_per_second?: <<float>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type DeleteByQueryRethrottleResponse = <<TasksTaskListResponseBase>> + +---- + + diff --git a/docs/reference/delete_script.asciidoc b/docs/reference/delete_script.asciidoc new file mode 100644 index 000000000..9f41d0850 --- /dev/null +++ b/docs/reference/delete_script.asciidoc @@ -0,0 +1,75 @@ +[[reference-delete_script]] +== client.deleteScript + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.deleteScript]] +== `client.deleteScript()` + +Delete a script or search template. Deletes a stored script or search template. + +{ref}/modules-scripting.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: DeleteScriptRequest, options?: TransportRequestOptions) => Promise<DeleteScriptResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface DeleteScriptRequest extends <<RequestBase>> { + id: <<Id>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type DeleteScriptResponse = <<AcknowledgedResponseBase>> + +---- + + diff --git a/docs/reference/enrich.asciidoc b/docs/reference/enrich.asciidoc new file mode 100644 index 000000000..cee45125e --- /dev/null +++ b/docs/reference/enrich.asciidoc @@ -0,0 +1,232 @@ +[[reference-enrich]] +== client.enrich + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.enrich.deletePolicy]] +== `client.enrich.deletePolicy()` + +Delete an enrich policy. Deletes an existing enrich policy and its enrich index. + +{ref}/delete-enrich-policy-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: EnrichDeletePolicyRequest, options?: TransportRequestOptions) => Promise<EnrichDeletePolicyResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface EnrichDeletePolicyRequest extends <<RequestBase>> { + name: <<Name>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type EnrichDeletePolicyResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.enrich.executePolicy]] +== `client.enrich.executePolicy()` + +Run an enrich policy. Create the enrich index for an existing enrich policy. + +{ref}/execute-enrich-policy-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: EnrichExecutePolicyRequest, options?: TransportRequestOptions) => Promise<EnrichExecutePolicyResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface EnrichExecutePolicyRequest extends <<RequestBase>> { + name: <<Name>> + wait_for_completion?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface EnrichExecutePolicyResponse { + status?: EnrichExecutePolicyExecuteEnrichPolicyStatus + task_id?: <<TaskId>> +} + +---- + + +[discrete] +[[client.enrich.getPolicy]] +== `client.enrich.getPolicy()` + +Get an enrich policy. Returns information about an enrich policy. + +{ref}/get-enrich-policy-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: EnrichGetPolicyRequest, options?: TransportRequestOptions) => Promise<EnrichGetPolicyResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface EnrichGetPolicyRequest extends <<RequestBase>> { + name?: <<Names>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface EnrichGetPolicyResponse { + policies: <<EnrichSummary>>[] +} + +---- + + +[discrete] +[[client.enrich.putPolicy]] +== `client.enrich.putPolicy()` + +Create an enrich policy. Creates an enrich policy. + +{ref}/put-enrich-policy-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: EnrichPutPolicyRequest, options?: TransportRequestOptions) => Promise<EnrichPutPolicyResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface EnrichPutPolicyRequest extends <<RequestBase>> { + name: <<Name>> + geo_match?: <<EnrichPolicy>> + match?: <<EnrichPolicy>> + range?: <<EnrichPolicy>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type EnrichPutPolicyResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.enrich.stats]] +== `client.enrich.stats()` + +Get enrich stats. Returns enrich coordinator statistics and information about enrich policies that are currently executing. + +{ref}/enrich-stats-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: EnrichStatsRequest, options?: TransportRequestOptions) => Promise<EnrichStatsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface EnrichStatsRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface EnrichStatsResponse { + coordinator_stats: EnrichStatsCoordinatorStats[] + executing_policies: EnrichStatsExecutingPolicy[] + cache_stats?: EnrichStatsCacheStats[] +} + +---- + + diff --git a/docs/reference/eql.asciidoc b/docs/reference/eql.asciidoc new file mode 100644 index 000000000..0fd421af5 --- /dev/null +++ b/docs/reference/eql.asciidoc @@ -0,0 +1,211 @@ +[[reference-eql]] +== client.eql + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.eql.delete]] +== `client.eql.delete()` + +Delete an async EQL search. Delete an async EQL search or a stored synchronous EQL search. The API also deletes results for the search. + +{ref}/eql-search-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: EqlDeleteRequest, options?: TransportRequestOptions) => Promise<EqlDeleteResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface EqlDeleteRequest extends <<RequestBase>> { + id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type EqlDeleteResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.eql.get]] +== `client.eql.get()` + +Get async EQL search results. Get the current status and available results for an async EQL search or a stored synchronous EQL search. + +{ref}/get-async-eql-search-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: EqlGetRequest, options?: TransportRequestOptions) => Promise<EqlGetResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface EqlGetRequest extends <<RequestBase>> { + id: <<Id>> + keep_alive?: <<Duration>> + wait_for_completion_timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type EqlGetResponse<TEvent = unknown> = <<EqlEqlSearchResponseBase>><TEvent> + +---- + + +[discrete] +[[client.eql.getStatus]] +== `client.eql.getStatus()` + +Get the async EQL status. Get the current status for an async EQL search or a stored synchronous EQL search without returning results. + +{ref}/get-async-eql-status-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: EqlGetStatusRequest, options?: TransportRequestOptions) => Promise<EqlGetStatusResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface EqlGetStatusRequest extends <<RequestBase>> { + id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface EqlGetStatusResponse { + id: <<Id>> + is_partial: boolean + is_running: boolean + start_time_in_millis?: <<EpochTime>><<<UnitMillis>>> + expiration_time_in_millis?: <<EpochTime>><<<UnitMillis>>> + completion_status?: <<integer>> +} + +---- + + +[discrete] +[[client.eql.search]] +== `client.eql.search()` + +Get EQL search results. Returns search results for an Event Query Language (EQL) query. EQL assumes each document in a data stream or index corresponds to an event. + +{ref}/eql-search-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: EqlSearchRequest, options?: TransportRequestOptions) => Promise<EqlSearchResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface EqlSearchRequest extends <<RequestBase>> { + index: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_unavailable?: boolean + query: string + case_sensitive?: boolean + event_category_field?: <<Field>> + tiebreaker_field?: <<Field>> + timestamp_field?: <<Field>> + fetch_size?: <<uint>> + filter?: <<QueryDslQueryContainer>> | <<QueryDslQueryContainer>>[] + keep_alive?: <<Duration>> + keep_on_completion?: boolean + wait_for_completion_timeout?: <<Duration>> + size?: <<uint>> + fields?: <<QueryDslFieldAndFormat>> | <<Field>> | (<<QueryDslFieldAndFormat>> | <<Field>>)[] + result_position?: EqlSearchResultPosition + runtime_mappings?: <<MappingRuntimeFields>> + max_samples_per_key?: <<integer>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type EqlSearchResponse<TEvent = unknown> = <<EqlEqlSearchResponseBase>><TEvent> + +---- + + diff --git a/docs/reference/esql.asciidoc b/docs/reference/esql.asciidoc new file mode 100644 index 000000000..e0f822dbe --- /dev/null +++ b/docs/reference/esql.asciidoc @@ -0,0 +1,112 @@ +[[reference-esql]] +== client.esql + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.esql.asyncQuery]] +== `client.esql.asyncQuery()` + +Executes an ESQL request asynchronously + +{ref}/esql-async-query-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: EsqlAsyncQueryRequest, options?: TransportRequestOptions) => Promise<EsqlAsyncQueryResponse> +---- + +[discrete] +[[client.esql.asyncQueryGet]] +== `client.esql.asyncQueryGet()` + +Retrieves the results of a previously submitted async query request given its ID. + +{ref}/esql-async-query-get-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: EsqlAsyncQueryGetRequest, options?: TransportRequestOptions) => Promise<EsqlAsyncQueryGetResponse> +---- + +[discrete] +[[client.esql.query]] +== `client.esql.query()` + +Run an ES|QL query. Get search results for an ES|QL (Elasticsearch query language) query. + +{ref}/esql-rest.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: EsqlQueryRequest, options?: TransportRequestOptions) => Promise<EsqlQueryResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface EsqlQueryRequest extends <<RequestBase>> { + format?: EsqlQueryEsqlFormat + delimiter?: string + drop_null_columns?: boolean + columnar?: boolean + filter?: <<QueryDslQueryContainer>> + locale?: string + params?: <<FieldValue>>[] + profile?: boolean + query: string + tables?: Record<string, Record<string, <<EsqlTableValuesContainer>>>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type EsqlQueryResponse = <<EsqlColumns>> + +---- + + diff --git a/docs/reference/exists.asciidoc b/docs/reference/exists.asciidoc new file mode 100644 index 000000000..75dd40842 --- /dev/null +++ b/docs/reference/exists.asciidoc @@ -0,0 +1,84 @@ +[[reference-exists]] +== client.exists + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.exists]] +== `client.exists()` + +Check a document. Checks if a specified document exists. + +{ref}/docs-get.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ExistsRequest, options?: TransportRequestOptions) => Promise<ExistsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ExistsRequest extends <<RequestBase>> { + id: <<Id>> + index: <<IndexName>> + preference?: string + realtime?: boolean + refresh?: boolean + routing?: <<Routing>> + _source?: <<SearchSourceConfigParam>> + _source_excludes?: <<Fields>> + _source_includes?: <<Fields>> + stored_fields?: <<Fields>> + version?: <<VersionNumber>> + version_type?: <<VersionType>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type ExistsResponse = boolean + +---- + + diff --git a/docs/reference/exists_source.asciidoc b/docs/reference/exists_source.asciidoc new file mode 100644 index 000000000..fbe9f7382 --- /dev/null +++ b/docs/reference/exists_source.asciidoc @@ -0,0 +1,83 @@ +[[reference-exists_source]] +== client.existsSource + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.existsSource]] +== `client.existsSource()` + +Check for a document source. Checks if a document's `_source` is stored. + +{ref}/docs-get.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ExistsSourceRequest, options?: TransportRequestOptions) => Promise<ExistsSourceResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ExistsSourceRequest extends <<RequestBase>> { + id: <<Id>> + index: <<IndexName>> + preference?: string + realtime?: boolean + refresh?: boolean + routing?: <<Routing>> + _source?: <<SearchSourceConfigParam>> + _source_excludes?: <<Fields>> + _source_includes?: <<Fields>> + version?: <<VersionNumber>> + version_type?: <<VersionType>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type ExistsSourceResponse = boolean + +---- + + diff --git a/docs/reference/explain.asciidoc b/docs/reference/explain.asciidoc new file mode 100644 index 000000000..f7368ac72 --- /dev/null +++ b/docs/reference/explain.asciidoc @@ -0,0 +1,93 @@ +[[reference-explain]] +== client.explain + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.explain]] +== `client.explain()` + +Explain a document match result. Returns information about why a specific document matches, or doesn’t match, a query. + +{ref}/search-explain.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ExplainRequest, options?: TransportRequestOptions) => Promise<ExplainResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ExplainRequest extends <<RequestBase>> { + id: <<Id>> + index: <<IndexName>> + analyzer?: string + analyze_wildcard?: boolean + default_operator?: <<QueryDslOperator>> + df?: string + lenient?: boolean + preference?: string + routing?: <<Routing>> + _source?: <<SearchSourceConfigParam>> + _source_excludes?: <<Fields>> + _source_includes?: <<Fields>> + stored_fields?: <<Fields>> + q?: string + query?: <<QueryDslQueryContainer>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ExplainResponse<TDocument = unknown> { + _index: <<IndexName>> + _id: <<Id>> + matched: boolean + explanation?: <<ExplainExplanationDetail>> + get?: <<InlineGet>><TDocument> +} + +---- + + diff --git a/docs/reference/features.asciidoc b/docs/reference/features.asciidoc new file mode 100644 index 000000000..c7c047ca5 --- /dev/null +++ b/docs/reference/features.asciidoc @@ -0,0 +1,110 @@ +[[reference-features]] +== client.features + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.features.getFeatures]] +== `client.features.getFeatures()` + +Gets a list of features which can be included in snapshots using the feature_states field when creating a snapshot + +{ref}/get-features-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: FeaturesGetFeaturesRequest, options?: TransportRequestOptions) => Promise<FeaturesGetFeaturesResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface FeaturesGetFeaturesRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface FeaturesGetFeaturesResponse { + features: <<FeaturesFeature>>[] +} + +---- + + +[discrete] +[[client.features.resetFeatures]] +== `client.features.resetFeatures()` + +Resets the internal state of features, usually by deleting system indices + +{ref}/modules-snapshots.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: FeaturesResetFeaturesRequest, options?: TransportRequestOptions) => Promise<FeaturesResetFeaturesResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface FeaturesResetFeaturesRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface FeaturesResetFeaturesResponse { + features: <<FeaturesFeature>>[] +} + +---- + + diff --git a/docs/reference/field_caps.asciidoc b/docs/reference/field_caps.asciidoc new file mode 100644 index 000000000..b08e196b5 --- /dev/null +++ b/docs/reference/field_caps.asciidoc @@ -0,0 +1,86 @@ +[[reference-field_caps]] +== client.fieldCaps + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.fieldCaps]] +== `client.fieldCaps()` + +Get the field capabilities. Get information about the capabilities of fields among multiple indices. For data streams, the API returns field capabilities among the stream’s backing indices. It returns runtime fields like any other field. For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family. + +{ref}/search-field-caps.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: FieldCapsRequest, options?: TransportRequestOptions) => Promise<FieldCapsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface FieldCapsRequest extends <<RequestBase>> { + index?: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_unavailable?: boolean + include_unmapped?: boolean + filters?: string + types?: string[] + include_empty_fields?: boolean + fields?: <<Fields>> + index_filter?: <<QueryDslQueryContainer>> + runtime_mappings?: <<MappingRuntimeFields>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface FieldCapsResponse { + indices: <<Indices>> + fields: Record<<<Field>>, Record<string, <<FieldCapsFieldCapability>>>> +} + +---- + + diff --git a/docs/reference/fleet.asciidoc b/docs/reference/fleet.asciidoc new file mode 100644 index 000000000..f4027b44d --- /dev/null +++ b/docs/reference/fleet.asciidoc @@ -0,0 +1,243 @@ +[[reference-fleet]] +== client.fleet + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.fleet.globalCheckpoints]] +== `client.fleet.globalCheckpoints()` + +Get global checkpoints. Get the current global checkpoints for an index. This API is designed for internal use by the Fleet server project. + +{ref}/get-global-checkpoints.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: FleetGlobalCheckpointsRequest, options?: TransportRequestOptions) => Promise<FleetGlobalCheckpointsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface FleetGlobalCheckpointsRequest extends <<RequestBase>> { + index: <<IndexName>> | <<IndexAlias>> + wait_for_advance?: boolean + wait_for_index?: boolean + checkpoints?: <<FleetCheckpoint>>[] + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface FleetGlobalCheckpointsResponse { + global_checkpoints: <<FleetCheckpoint>>[] + timed_out: boolean +} + +---- + + +[discrete] +[[client.fleet.msearch]] +== `client.fleet.msearch()` + +Run multiple Fleet searches. Run several Fleet searches with a single API request. The API follows the same structure as the multi search API. However, similar to the Fleet search API, it supports the `wait_for_checkpoints` parameter. +[discrete] +=== Function signature + +[source,ts] +---- +(request: FleetMsearchRequest, options?: TransportRequestOptions) => Promise<FleetMsearchResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface FleetMsearchRequest extends <<RequestBase>> { + index?: <<IndexName>> | <<IndexAlias>> + allow_no_indices?: boolean + ccs_minimize_roundtrips?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_throttled?: boolean + ignore_unavailable?: boolean + max_concurrent_searches?: <<long>> + max_concurrent_shard_requests?: <<long>> + pre_filter_shard_size?: <<long>> + search_type?: <<SearchType>> + rest_total_hits_as_int?: boolean + typed_keys?: boolean + wait_for_checkpoints?: <<FleetCheckpoint>>[] + allow_partial_search_results?: boolean + searches?: <<MsearchRequestItem>>[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface FleetMsearchResponse<TDocument = unknown> { + docs: <<MsearchResponseItem>><TDocument>[] +} + +---- + + +[discrete] +[[client.fleet.search]] +== `client.fleet.search()` + +Run a Fleet search. The purpose of the Fleet search API is to provide an API where the search will be run only after the provided checkpoint has been processed and is visible for searches inside of Elasticsearch. +[discrete] +=== Function signature + +[source,ts] +---- +(request: FleetSearchRequest, options?: TransportRequestOptions) => Promise<FleetSearchResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface FleetSearchRequest extends <<RequestBase>> { + index: <<IndexName>> | <<IndexAlias>> + allow_no_indices?: boolean + analyzer?: string + analyze_wildcard?: boolean + batched_reduce_size?: <<long>> + ccs_minimize_roundtrips?: boolean + default_operator?: <<QueryDslOperator>> + df?: string + expand_wildcards?: <<ExpandWildcards>> + ignore_throttled?: boolean + ignore_unavailable?: boolean + lenient?: boolean + max_concurrent_shard_requests?: <<long>> + preference?: string + pre_filter_shard_size?: <<long>> + request_cache?: boolean + routing?: <<Routing>> + scroll?: <<Duration>> + search_type?: <<SearchType>> + suggest_field?: <<Field>> + suggest_mode?: <<SuggestMode>> + suggest_size?: <<long>> + suggest_text?: string + typed_keys?: boolean + rest_total_hits_as_int?: boolean + _source_excludes?: <<Fields>> + _source_includes?: <<Fields>> + q?: string + wait_for_checkpoints?: <<FleetCheckpoint>>[] + allow_partial_search_results?: boolean + aggregations?: Record<string, <<AggregationsAggregationContainer>>> + pass:[/**] @alias aggregations */ + aggs?: Record<string, <<AggregationsAggregationContainer>>> + collapse?: <<SearchFieldCollapse>> + explain?: boolean + ext?: Record<string, any> + from?: <<integer>> + highlight?: <<SearchHighlight>> + track_total_hits?: <<SearchTrackHits>> + indices_boost?: Record<<<IndexName>>, <<double>>>[] + docvalue_fields?: (<<QueryDslFieldAndFormat>> | <<Field>>)[] + min_score?: <<double>> + post_filter?: <<QueryDslQueryContainer>> + profile?: boolean + query?: <<QueryDslQueryContainer>> + rescore?: <<SearchRescore>> | <<SearchRescore>>[] + script_fields?: Record<string, <<ScriptField>>> + search_after?: <<SortResults>> + size?: <<integer>> + slice?: <<SlicedScroll>> + sort?: <<Sort>> + _source?: <<SearchSourceConfig>> + fields?: (<<QueryDslFieldAndFormat>> | <<Field>>)[] + suggest?: <<SearchSuggester>> + terminate_after?: <<long>> + timeout?: string + track_scores?: boolean + version?: boolean + seq_no_primary_term?: boolean + stored_fields?: <<Fields>> + pit?: <<SearchPointInTimeReference>> + runtime_mappings?: <<MappingRuntimeFields>> + stats?: string[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface FleetSearchResponse<TDocument = unknown> { + took: <<long>> + timed_out: boolean + _shards: <<ShardStatistics>> + hits: <<SearchHitsMetadata>><TDocument> + aggregations?: Record<<<AggregateName>>, <<AggregationsAggregate>>> + _clusters?: <<ClusterStatistics>> + fields?: Record<string, any> + max_score?: <<double>> + num_reduce_phases?: <<long>> + profile?: <<SearchProfile>> + pit_id?: <<Id>> + _scroll_id?: <<ScrollId>> + suggest?: Record<<<SuggestionName>>, <<SearchSuggest>><TDocument>[]> + terminated_early?: boolean +} + +---- + + diff --git a/docs/reference/get.asciidoc b/docs/reference/get.asciidoc new file mode 100644 index 000000000..a9f708e8a --- /dev/null +++ b/docs/reference/get.asciidoc @@ -0,0 +1,85 @@ +[[reference-get]] +== client.get + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.get]] +== `client.get()` + +Get a document by its ID. Retrieves the document with the specified ID from an index. + +{ref}/docs-get.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: GetRequest, options?: TransportRequestOptions) => Promise<GetResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface GetRequest extends <<RequestBase>> { + id: <<Id>> + index: <<IndexName>> + force_synthetic_source?: boolean + preference?: string + realtime?: boolean + refresh?: boolean + routing?: <<Routing>> + _source?: <<SearchSourceConfigParam>> + _source_excludes?: <<Fields>> + _source_includes?: <<Fields>> + stored_fields?: <<Fields>> + version?: <<VersionNumber>> + version_type?: <<VersionType>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type GetResponse<TDocument = unknown> = <<GetGetResult>><TDocument> + +---- + + diff --git a/docs/reference/get_script.asciidoc b/docs/reference/get_script.asciidoc new file mode 100644 index 000000000..51b82c41c --- /dev/null +++ b/docs/reference/get_script.asciidoc @@ -0,0 +1,78 @@ +[[reference-get_script]] +== client.getScript + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.getScript]] +== `client.getScript()` + +Get a script or search template. Retrieves a stored script or search template. + +{ref}/modules-scripting.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: GetScriptRequest, options?: TransportRequestOptions) => Promise<GetScriptResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface GetScriptRequest extends <<RequestBase>> { + id: <<Id>> + master_timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface GetScriptResponse { + _id: <<Id>> + found: boolean + script?: <<StoredScript>> +} + +---- + + diff --git a/docs/reference/get_script_context.asciidoc b/docs/reference/get_script_context.asciidoc new file mode 100644 index 000000000..26702689c --- /dev/null +++ b/docs/reference/get_script_context.asciidoc @@ -0,0 +1,73 @@ +[[reference-get_script_context]] +== client.getScriptContext + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.getScriptContext]] +== `client.getScriptContext()` + +Get script contexts. Get a list of supported script contexts and their methods. + +{painless}/painless-contexts.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: GetScriptContextRequest, options?: TransportRequestOptions) => Promise<GetScriptContextResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface GetScriptContextRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface GetScriptContextResponse { + contexts: <<GetScriptContextContext>>[] +} + +---- + + diff --git a/docs/reference/get_script_languages.asciidoc b/docs/reference/get_script_languages.asciidoc new file mode 100644 index 000000000..918a2b728 --- /dev/null +++ b/docs/reference/get_script_languages.asciidoc @@ -0,0 +1,74 @@ +[[reference-get_script_languages]] +== client.getScriptLanguages + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.getScriptLanguages]] +== `client.getScriptLanguages()` + +Get script languages. Get a list of available script types, languages, and contexts. + +{ref}/modules-scripting.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: GetScriptLanguagesRequest, options?: TransportRequestOptions) => Promise<GetScriptLanguagesResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface GetScriptLanguagesRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface GetScriptLanguagesResponse { + language_contexts: <<GetScriptLanguagesLanguageContext>>[] + types_allowed: string[] +} + +---- + + diff --git a/docs/reference/get_source.asciidoc b/docs/reference/get_source.asciidoc new file mode 100644 index 000000000..ba729f2a0 --- /dev/null +++ b/docs/reference/get_source.asciidoc @@ -0,0 +1,84 @@ +[[reference-get_source]] +== client.getSource + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.getSource]] +== `client.getSource()` + +Get a document's source. Returns the source of a document. + +{ref}/docs-get.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: GetSourceRequest, options?: TransportRequestOptions) => Promise<GetSourceResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface GetSourceRequest extends <<RequestBase>> { + id: <<Id>> + index: <<IndexName>> + preference?: string + realtime?: boolean + refresh?: boolean + routing?: <<Routing>> + _source?: <<SearchSourceConfigParam>> + _source_excludes?: <<Fields>> + _source_includes?: <<Fields>> + stored_fields?: <<Fields>> + version?: <<VersionNumber>> + version_type?: <<VersionType>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type GetSourceResponse<TDocument = unknown> = TDocument + +---- + + diff --git a/docs/reference/graph.asciidoc b/docs/reference/graph.asciidoc new file mode 100644 index 000000000..fdb70353b --- /dev/null +++ b/docs/reference/graph.asciidoc @@ -0,0 +1,85 @@ +[[reference-graph]] +== client.graph + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.graph.explore]] +== `client.graph.explore()` + +Explore graph analytics. Extract and summarize information about the documents and terms in an Elasticsearch data stream or index. The easiest way to understand the behavior of this API is to use the Graph UI to explore connections. An initial request to the `_explore` API contains a seed query that identifies the documents of interest and specifies the fields that define the vertices and connections you want to include in the graph. Subsequent requests enable you to spider out from one more vertices of interest. You can exclude vertices that have already been returned. + +{ref}/graph-explore-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: GraphExploreRequest, options?: TransportRequestOptions) => Promise<GraphExploreResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface GraphExploreRequest extends <<RequestBase>> { + index: <<Indices>> + routing?: <<Routing>> + timeout?: <<Duration>> + connections?: <<GraphHop>> + controls?: <<GraphExploreControls>> + query?: <<QueryDslQueryContainer>> + vertices?: <<GraphVertexDefinition>>[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface GraphExploreResponse { + connections: <<GraphConnection>>[] + failures: <<ShardFailure>>[] + timed_out: boolean + took: <<long>> + vertices: <<GraphVertex>>[] +} + +---- + + diff --git a/docs/reference/health_report.asciidoc b/docs/reference/health_report.asciidoc new file mode 100644 index 000000000..a335933be --- /dev/null +++ b/docs/reference/health_report.asciidoc @@ -0,0 +1,80 @@ +[[reference-health_report]] +== client.healthReport + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.healthReport]] +== `client.healthReport()` + +Returns the health of the cluster. + +{ref}/health-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: HealthReportRequest, options?: TransportRequestOptions) => Promise<HealthReportResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface HealthReportRequest extends <<RequestBase>> { + feature?: string | string[] + timeout?: <<Duration>> + verbose?: boolean + size?: <<integer>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface HealthReportResponse { + cluster_name: string + indicators: <<HealthReportIndicators>> + status?: <<HealthReportIndicatorHealthStatus>> +} + +---- + + diff --git a/docs/reference/ilm.asciidoc b/docs/reference/ilm.asciidoc new file mode 100644 index 000000000..b7426b698 --- /dev/null +++ b/docs/reference/ilm.asciidoc @@ -0,0 +1,473 @@ +[[reference-ilm]] +== client.ilm + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.ilm.deleteLifecycle]] +== `client.ilm.deleteLifecycle()` + +Deletes the specified lifecycle policy definition. You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. + +{ref}/ilm-delete-lifecycle.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IlmDeleteLifecycleRequest, options?: TransportRequestOptions) => Promise<IlmDeleteLifecycleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IlmDeleteLifecycleRequest extends <<RequestBase>> { + name: <<Name>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IlmDeleteLifecycleResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ilm.explainLifecycle]] +== `client.ilm.explainLifecycle()` + +Retrieves information about the index’s current lifecycle state, such as the currently executing phase, action, and step. Shows when the index entered each one, the definition of the running phase, and information about any failures. + +{ref}/ilm-explain-lifecycle.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IlmExplainLifecycleRequest, options?: TransportRequestOptions) => Promise<IlmExplainLifecycleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IlmExplainLifecycleRequest extends <<RequestBase>> { + index: <<IndexName>> + only_errors?: boolean + only_managed?: boolean + master_timeout?: <<Duration>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IlmExplainLifecycleResponse { + indices: Record<<<IndexName>>, IlmExplainLifecycleLifecycleExplain> +} + +---- + + +[discrete] +[[client.ilm.getLifecycle]] +== `client.ilm.getLifecycle()` + +Retrieves a lifecycle policy. + +{ref}/ilm-get-lifecycle.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IlmGetLifecycleRequest, options?: TransportRequestOptions) => Promise<IlmGetLifecycleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IlmGetLifecycleRequest extends <<RequestBase>> { + name?: <<Name>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IlmGetLifecycleResponse = Record<string, IlmGetLifecycleLifecycle> + +---- + + +[discrete] +[[client.ilm.getStatus]] +== `client.ilm.getStatus()` + +Retrieves the current index lifecycle management (ILM) status. + +{ref}/ilm-get-status.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IlmGetStatusRequest, options?: TransportRequestOptions) => Promise<IlmGetStatusResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IlmGetStatusRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IlmGetStatusResponse { + operation_mode: <<LifecycleOperationMode>> +} + +---- + + +[discrete] +[[client.ilm.migrateToDataTiers]] +== `client.ilm.migrateToDataTiers()` + +Switches the indices, ILM policies, and legacy, composable and component templates from using custom node attributes and attribute-based allocation filters to using data tiers, and optionally deletes one legacy index template.+ Using node roles enables ILM to automatically move the indices between data tiers. + +{ref}/ilm-migrate-to-data-tiers.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IlmMigrateToDataTiersRequest, options?: TransportRequestOptions) => Promise<IlmMigrateToDataTiersResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IlmMigrateToDataTiersRequest extends <<RequestBase>> { + dry_run?: boolean + legacy_template_to_delete?: string + node_attribute?: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IlmMigrateToDataTiersResponse { + dry_run: boolean + removed_legacy_template: string + migrated_ilm_policies: string[] + migrated_indices: <<Indices>> + migrated_legacy_templates: string[] + migrated_composable_templates: string[] + migrated_component_templates: string[] +} + +---- + + +[discrete] +[[client.ilm.moveToStep]] +== `client.ilm.moveToStep()` + +Manually moves an index into the specified step and executes that step. + +{ref}/ilm-move-to-step.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IlmMoveToStepRequest, options?: TransportRequestOptions) => Promise<IlmMoveToStepResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IlmMoveToStepRequest extends <<RequestBase>> { + index: <<IndexName>> + current_step: IlmMoveToStepStepKey + next_step: IlmMoveToStepStepKey +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IlmMoveToStepResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ilm.putLifecycle]] +== `client.ilm.putLifecycle()` + +Creates a lifecycle policy. If the specified policy exists, the policy is replaced and the policy version is incremented. + +{ref}/ilm-put-lifecycle.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IlmPutLifecycleRequest, options?: TransportRequestOptions) => Promise<IlmPutLifecycleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IlmPutLifecycleRequest extends <<RequestBase>> { + name: <<Name>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> + policy?: <<IlmPolicy>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IlmPutLifecycleResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ilm.removePolicy]] +== `client.ilm.removePolicy()` + +Removes the assigned lifecycle policy and stops managing the specified index + +{ref}/ilm-remove-policy.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IlmRemovePolicyRequest, options?: TransportRequestOptions) => Promise<IlmRemovePolicyResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IlmRemovePolicyRequest extends <<RequestBase>> { + index: <<IndexName>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IlmRemovePolicyResponse { + failed_indexes: <<IndexName>>[] + has_failures: boolean +} + +---- + + +[discrete] +[[client.ilm.retry]] +== `client.ilm.retry()` + +Retries executing the policy for an index that is in the ERROR step. + +{ref}/ilm-retry-policy.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IlmRetryRequest, options?: TransportRequestOptions) => Promise<IlmRetryResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IlmRetryRequest extends <<RequestBase>> { + index: <<IndexName>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IlmRetryResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ilm.start]] +== `client.ilm.start()` + +Start the index lifecycle management (ILM) plugin. + +{ref}/ilm-start.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IlmStartRequest, options?: TransportRequestOptions) => Promise<IlmStartResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IlmStartRequest extends <<RequestBase>> { + master_timeout?: <<Duration>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IlmStartResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ilm.stop]] +== `client.ilm.stop()` + +Halts all lifecycle management operations and stops the index lifecycle management (ILM) plugin + +{ref}/ilm-stop.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IlmStopRequest, options?: TransportRequestOptions) => Promise<IlmStopResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IlmStopRequest extends <<RequestBase>> { + master_timeout?: <<Duration>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IlmStopResponse = <<AcknowledgedResponseBase>> + +---- + + diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc new file mode 100644 index 000000000..153163cf3 --- /dev/null +++ b/docs/reference/index.asciidoc @@ -0,0 +1,86 @@ +[[reference-index]] +== client.index + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.index]] +== `client.index()` + +Index a document. Adds a JSON document to the specified data stream or index and makes it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. + +{ref}/docs-index_.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndexRequest, options?: TransportRequestOptions) => Promise<IndexResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndexRequest<TDocument = unknown> extends <<RequestBase>> { + id?: <<Id>> + index: <<IndexName>> + if_primary_term?: <<long>> + if_seq_no?: <<SequenceNumber>> + op_type?: <<OpType>> + pipeline?: string + refresh?: <<Refresh>> + routing?: <<Routing>> + timeout?: <<Duration>> + version?: <<VersionNumber>> + version_type?: <<VersionType>> + wait_for_active_shards?: <<WaitForActiveShards>> + require_alias?: boolean + document?: TDocument +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndexResponse = <<WriteResponseBase>> + +---- + + diff --git a/docs/reference/indices.asciidoc b/docs/reference/indices.asciidoc new file mode 100644 index 000000000..5414518d5 --- /dev/null +++ b/docs/reference/indices.asciidoc @@ -0,0 +1,2605 @@ +[[reference-indices]] +== client.indices + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.indices.addBlock]] +== `client.indices.addBlock()` + +Add an index block. Limits the operations allowed on an index by blocking specific operation types. + +{ref}/index-modules-blocks.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesAddBlockRequest, options?: TransportRequestOptions) => Promise<IndicesAddBlockResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesAddBlockRequest extends <<RequestBase>> { + index: <<IndexName>> + block: IndicesAddBlockIndicesBlockOptions + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_unavailable?: boolean + master_timeout?: <<Duration>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IndicesAddBlockResponse { + acknowledged: boolean + shards_acknowledged: boolean + indices: IndicesAddBlockIndicesBlockStatus[] +} + +---- + + +[discrete] +[[client.indices.analyze]] +== `client.indices.analyze()` + +Get tokens from text analysis. The analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) on a text string and returns the resulting tokens. + +{ref}/indices-analyze.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesAnalyzeRequest, options?: TransportRequestOptions) => Promise<IndicesAnalyzeResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesAnalyzeRequest extends <<RequestBase>> { + index?: <<IndexName>> + analyzer?: string + attributes?: string[] + char_filter?: <<AnalysisCharFilter>>[] + explain?: boolean + field?: <<Field>> + filter?: <<AnalysisTokenFilter>>[] + normalizer?: string + text?: IndicesAnalyzeTextToAnalyze + tokenizer?: <<AnalysisTokenizer>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IndicesAnalyzeResponse { + detail?: IndicesAnalyzeAnalyzeDetail + tokens?: IndicesAnalyzeAnalyzeToken[] +} + +---- + + +[discrete] +[[client.indices.clearCache]] +== `client.indices.clearCache()` + +Clears the caches of one or more indices. For data streams, the API clears the caches of the stream’s backing indices. + +{ref}/indices-clearcache.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesClearCacheRequest, options?: TransportRequestOptions) => Promise<IndicesClearCacheResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesClearCacheRequest extends <<RequestBase>> { + index?: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + fielddata?: boolean + fields?: <<Fields>> + ignore_unavailable?: boolean + query?: boolean + request?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesClearCacheResponse = <<ShardsOperationResponseBase>> + +---- + + +[discrete] +[[client.indices.clone]] +== `client.indices.clone()` + +Clones an existing index. + +{ref}/indices-clone-index.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesCloneRequest, options?: TransportRequestOptions) => Promise<IndicesCloneResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesCloneRequest extends <<RequestBase>> { + index: <<IndexName>> + target: <<Name>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> + wait_for_active_shards?: <<WaitForActiveShards>> + aliases?: Record<<<IndexName>>, <<IndicesAlias>>> + settings?: Record<string, any> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IndicesCloneResponse { + acknowledged: boolean + index: <<IndexName>> + shards_acknowledged: boolean +} + +---- + + +[discrete] +[[client.indices.close]] +== `client.indices.close()` + +Closes an index. + +{ref}/indices-close.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesCloseRequest, options?: TransportRequestOptions) => Promise<IndicesCloseResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesCloseRequest extends <<RequestBase>> { + index: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_unavailable?: boolean + master_timeout?: <<Duration>> + timeout?: <<Duration>> + wait_for_active_shards?: <<WaitForActiveShards>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IndicesCloseResponse { + acknowledged: boolean + indices: Record<<<IndexName>>, IndicesCloseCloseIndexResult> + shards_acknowledged: boolean +} + +---- + + +[discrete] +[[client.indices.create]] +== `client.indices.create()` + +Create an index. Creates a new index. + +{ref}/indices-create-index.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesCreateRequest, options?: TransportRequestOptions) => Promise<IndicesCreateResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesCreateRequest extends <<RequestBase>> { + index: <<IndexName>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> + wait_for_active_shards?: <<WaitForActiveShards>> + aliases?: Record<<<Name>>, <<IndicesAlias>>> + mappings?: <<MappingTypeMapping>> + settings?: <<IndicesIndexSettings>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IndicesCreateResponse { + index: <<IndexName>> + shards_acknowledged: boolean + acknowledged: boolean +} + +---- + + +[discrete] +[[client.indices.createDataStream]] +== `client.indices.createDataStream()` + +Create a data stream. Creates a data stream. You must have a matching index template with data stream enabled. + +{ref}/data-streams.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesCreateDataStreamRequest, options?: TransportRequestOptions) => Promise<IndicesCreateDataStreamResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesCreateDataStreamRequest extends <<RequestBase>> { + name: <<DataStreamName>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesCreateDataStreamResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.indices.dataStreamsStats]] +== `client.indices.dataStreamsStats()` + +Get data stream stats. Retrieves statistics for one or more data streams. + +{ref}/data-streams.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesDataStreamsStatsRequest, options?: TransportRequestOptions) => Promise<IndicesDataStreamsStatsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesDataStreamsStatsRequest extends <<RequestBase>> { + name?: <<IndexName>> + expand_wildcards?: <<ExpandWildcards>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IndicesDataStreamsStatsResponse { + _shards: <<ShardStatistics>> + backing_indices: <<integer>> + data_stream_count: <<integer>> + data_streams: IndicesDataStreamsStatsDataStreamsStatsItem[] + total_store_sizes?: <<ByteSize>> + total_store_size_bytes: <<long>> +} + +---- + + +[discrete] +[[client.indices.delete]] +== `client.indices.delete()` + +Delete indices. Deletes one or more indices. + +{ref}/indices-delete-index.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesDeleteRequest, options?: TransportRequestOptions) => Promise<IndicesDeleteResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesDeleteRequest extends <<RequestBase>> { + index: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_unavailable?: boolean + master_timeout?: <<Duration>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesDeleteResponse = <<IndicesResponseBase>> + +---- + + +[discrete] +[[client.indices.deleteAlias]] +== `client.indices.deleteAlias()` + +Delete an alias. Removes a data stream or index from an alias. + +{ref}/indices-aliases.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesDeleteAliasRequest, options?: TransportRequestOptions) => Promise<IndicesDeleteAliasResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesDeleteAliasRequest extends <<RequestBase>> { + index: <<Indices>> + name: <<Names>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesDeleteAliasResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.indices.deleteDataLifecycle]] +== `client.indices.deleteDataLifecycle()` + +Delete data stream lifecycles. Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle. + +{ref}/data-streams-delete-lifecycle.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptions) => Promise<IndicesDeleteDataLifecycleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesDeleteDataLifecycleRequest extends <<RequestBase>> { + name: <<DataStreamNames>> + expand_wildcards?: <<ExpandWildcards>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesDeleteDataLifecycleResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.indices.deleteDataStream]] +== `client.indices.deleteDataStream()` + +Delete data streams. Deletes one or more data streams and their backing indices. + +{ref}/data-streams.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesDeleteDataStreamRequest, options?: TransportRequestOptions) => Promise<IndicesDeleteDataStreamResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesDeleteDataStreamRequest extends <<RequestBase>> { + name: <<DataStreamNames>> + master_timeout?: <<Duration>> + expand_wildcards?: <<ExpandWildcards>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesDeleteDataStreamResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.indices.deleteIndexTemplate]] +== `client.indices.deleteIndexTemplate()` + +Delete an index template. The provided <index-template> may contain multiple template names separated by a comma. If multiple template names are specified then there is no wildcard support and the provided names should match completely with existing templates. + +{ref}/indices-delete-template.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions) => Promise<IndicesDeleteIndexTemplateResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesDeleteIndexTemplateRequest extends <<RequestBase>> { + name: <<Names>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesDeleteIndexTemplateResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.indices.deleteTemplate]] +== `client.indices.deleteTemplate()` + +Deletes a legacy index template. + +{ref}/indices-delete-template-v1.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesDeleteTemplateRequest, options?: TransportRequestOptions) => Promise<IndicesDeleteTemplateResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesDeleteTemplateRequest extends <<RequestBase>> { + name: <<Name>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesDeleteTemplateResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.indices.diskUsage]] +== `client.indices.diskUsage()` + +Analyzes the disk usage of each field of an index or data stream. + +{ref}/indices-disk-usage.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesDiskUsageRequest, options?: TransportRequestOptions) => Promise<IndicesDiskUsageResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesDiskUsageRequest extends <<RequestBase>> { + index: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + flush?: boolean + ignore_unavailable?: boolean + run_expensive_tasks?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesDiskUsageResponse = any + +---- + + +[discrete] +[[client.indices.downsample]] +== `client.indices.downsample()` + +Aggregates a time series (TSDS) index and stores pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. + +{ref}/indices-downsample-data-stream.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesDownsampleRequest, options?: TransportRequestOptions) => Promise<IndicesDownsampleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesDownsampleRequest extends <<RequestBase>> { + index: <<IndexName>> + target_index: <<IndexName>> + config?: <<IndicesDownsampleConfig>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesDownsampleResponse = any + +---- + + +[discrete] +[[client.indices.exists]] +== `client.indices.exists()` + +Check indices. Checks if one or more indices, index aliases, or data streams exist. + +{ref}/indices-exists.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesExistsRequest, options?: TransportRequestOptions) => Promise<IndicesExistsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesExistsRequest extends <<RequestBase>> { + index: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + flat_settings?: boolean + ignore_unavailable?: boolean + include_defaults?: boolean + local?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesExistsResponse = boolean + +---- + + +[discrete] +[[client.indices.existsAlias]] +== `client.indices.existsAlias()` + +Check aliases. Checks if one or more data stream or index aliases exist. + +{ref}/indices-aliases.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesExistsAliasRequest, options?: TransportRequestOptions) => Promise<IndicesExistsAliasResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesExistsAliasRequest extends <<RequestBase>> { + name: <<Names>> + index?: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_unavailable?: boolean + master_timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesExistsAliasResponse = boolean + +---- + + +[discrete] +[[client.indices.existsIndexTemplate]] +== `client.indices.existsIndexTemplate()` + +Check index templates. Check whether index templates exist. + +{ref}/index-templates.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions) => Promise<IndicesExistsIndexTemplateResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesExistsIndexTemplateRequest extends <<RequestBase>> { + name: <<Name>> + master_timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesExistsIndexTemplateResponse = boolean + +---- + + +[discrete] +[[client.indices.existsTemplate]] +== `client.indices.existsTemplate()` + +Check existence of index templates. Returns information about whether a particular index template exists. + +{ref}/indices-template-exists-v1.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesExistsTemplateRequest, options?: TransportRequestOptions) => Promise<IndicesExistsTemplateResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesExistsTemplateRequest extends <<RequestBase>> { + name: <<Names>> + flat_settings?: boolean + local?: boolean + master_timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesExistsTemplateResponse = boolean + +---- + + +[discrete] +[[client.indices.explainDataLifecycle]] +== `client.indices.explainDataLifecycle()` + +Get the status for a data stream lifecycle. Retrieves information about an index or data stream’s current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. + +{ref}/data-streams-explain-lifecycle.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesExplainDataLifecycleRequest, options?: TransportRequestOptions) => Promise<IndicesExplainDataLifecycleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesExplainDataLifecycleRequest extends <<RequestBase>> { + index: <<Indices>> + include_defaults?: boolean + master_timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IndicesExplainDataLifecycleResponse { + indices: Record<<<IndexName>>, IndicesExplainDataLifecycleDataStreamLifecycleExplain> +} + +---- + + +[discrete] +[[client.indices.fieldUsageStats]] +== `client.indices.fieldUsageStats()` + +Returns field usage information for each shard and field of an index. + +{ref}/field-usage-stats.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesFieldUsageStatsRequest, options?: TransportRequestOptions) => Promise<IndicesFieldUsageStatsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesFieldUsageStatsRequest extends <<RequestBase>> { + index: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_unavailable?: boolean + fields?: <<Fields>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> + wait_for_active_shards?: <<WaitForActiveShards>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesFieldUsageStatsResponse = IndicesFieldUsageStatsFieldsUsageBody + +---- + + +[discrete] +[[client.indices.flush]] +== `client.indices.flush()` + +Flushes one or more data streams or indices. + +{ref}/indices-flush.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesFlushRequest, options?: TransportRequestOptions) => Promise<IndicesFlushResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesFlushRequest extends <<RequestBase>> { + index?: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + force?: boolean + ignore_unavailable?: boolean + wait_if_ongoing?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesFlushResponse = <<ShardsOperationResponseBase>> + +---- + + +[discrete] +[[client.indices.forcemerge]] +== `client.indices.forcemerge()` + +Performs the force merge operation on one or more indices. + +{ref}/indices-forcemerge.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesForcemergeRequest, options?: TransportRequestOptions) => Promise<IndicesForcemergeResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesForcemergeRequest extends <<RequestBase>> { + index?: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + flush?: boolean + ignore_unavailable?: boolean + max_num_segments?: <<long>> + only_expunge_deletes?: boolean + wait_for_completion?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesForcemergeResponse = IndicesForcemergeForceMergeResponseBody + +---- + + +[discrete] +[[client.indices.get]] +== `client.indices.get()` + +Get index information. Returns information about one or more indices. For data streams, the API returns information about the stream’s backing indices. + +{ref}/indices-get-index.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesGetRequest, options?: TransportRequestOptions) => Promise<IndicesGetResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesGetRequest extends <<RequestBase>> { + index: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + flat_settings?: boolean + ignore_unavailable?: boolean + include_defaults?: boolean + local?: boolean + master_timeout?: <<Duration>> + features?: IndicesGetFeatures +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesGetResponse = Record<<<IndexName>>, <<IndicesIndexState>>> + +---- + + +[discrete] +[[client.indices.getAlias]] +== `client.indices.getAlias()` + +Get aliases. Retrieves information for one or more data stream or index aliases. + +{ref}/indices-aliases.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesGetAliasRequest, options?: TransportRequestOptions) => Promise<IndicesGetAliasResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesGetAliasRequest extends <<RequestBase>> { + name?: <<Names>> + index?: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_unavailable?: boolean + master_timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesGetAliasResponse = Record<<<IndexName>>, IndicesGetAliasIndexAliases> + +---- + + +[discrete] +[[client.indices.getDataLifecycle]] +== `client.indices.getDataLifecycle()` + +Get data stream lifecycles. Retrieves the data stream lifecycle configuration of one or more data streams. + +{ref}/data-streams-get-lifecycle.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesGetDataLifecycleRequest, options?: TransportRequestOptions) => Promise<IndicesGetDataLifecycleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesGetDataLifecycleRequest extends <<RequestBase>> { + name: <<DataStreamNames>> + expand_wildcards?: <<ExpandWildcards>> + include_defaults?: boolean + master_timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IndicesGetDataLifecycleResponse { + data_streams: IndicesGetDataLifecycleDataStreamWithLifecycle[] +} + +---- + + +[discrete] +[[client.indices.getDataStream]] +== `client.indices.getDataStream()` + +Get data streams. Retrieves information about one or more data streams. + +{ref}/data-streams.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesGetDataStreamRequest, options?: TransportRequestOptions) => Promise<IndicesGetDataStreamResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesGetDataStreamRequest extends <<RequestBase>> { + name?: <<DataStreamNames>> + expand_wildcards?: <<ExpandWildcards>> + include_defaults?: boolean + master_timeout?: <<Duration>> + verbose?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IndicesGetDataStreamResponse { + data_streams: <<IndicesDataStream>>[] +} + +---- + + +[discrete] +[[client.indices.getFieldMapping]] +== `client.indices.getFieldMapping()` + +Get mapping definitions. Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices. + +{ref}/indices-get-field-mapping.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesGetFieldMappingRequest, options?: TransportRequestOptions) => Promise<IndicesGetFieldMappingResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesGetFieldMappingRequest extends <<RequestBase>> { + fields: <<Fields>> + index?: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_unavailable?: boolean + include_defaults?: boolean + local?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesGetFieldMappingResponse = Record<<<IndexName>>, IndicesGetFieldMappingTypeFieldMappings> + +---- + + +[discrete] +[[client.indices.getIndexTemplate]] +== `client.indices.getIndexTemplate()` + +Get index templates. Returns information about one or more index templates. + +{ref}/indices-get-template.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesGetIndexTemplateRequest, options?: TransportRequestOptions) => Promise<IndicesGetIndexTemplateResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesGetIndexTemplateRequest extends <<RequestBase>> { + name?: <<Name>> + local?: boolean + flat_settings?: boolean + master_timeout?: <<Duration>> + include_defaults?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IndicesGetIndexTemplateResponse { + index_templates: IndicesGetIndexTemplateIndexTemplateItem[] +} + +---- + + +[discrete] +[[client.indices.getMapping]] +== `client.indices.getMapping()` + +Get mapping definitions. Retrieves mapping definitions for one or more indices. For data streams, the API retrieves mappings for the stream’s backing indices. + +{ref}/indices-get-mapping.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesGetMappingRequest, options?: TransportRequestOptions) => Promise<IndicesGetMappingResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesGetMappingRequest extends <<RequestBase>> { + index?: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_unavailable?: boolean + local?: boolean + master_timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesGetMappingResponse = Record<<<IndexName>>, IndicesGetMappingIndexMappingRecord> + +---- + + +[discrete] +[[client.indices.getSettings]] +== `client.indices.getSettings()` + +Get index settings. Returns setting information for one or more indices. For data streams, returns setting information for the stream’s backing indices. + +{ref}/indices-get-settings.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesGetSettingsRequest, options?: TransportRequestOptions) => Promise<IndicesGetSettingsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesGetSettingsRequest extends <<RequestBase>> { + index?: <<Indices>> + name?: <<Names>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + flat_settings?: boolean + ignore_unavailable?: boolean + include_defaults?: boolean + local?: boolean + master_timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesGetSettingsResponse = Record<<<IndexName>>, <<IndicesIndexState>>> + +---- + + +[discrete] +[[client.indices.getTemplate]] +== `client.indices.getTemplate()` + +Get index templates. Retrieves information about one or more index templates. + +{ref}/indices-get-template-v1.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesGetTemplateRequest, options?: TransportRequestOptions) => Promise<IndicesGetTemplateResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesGetTemplateRequest extends <<RequestBase>> { + name?: <<Names>> + flat_settings?: boolean + local?: boolean + master_timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesGetTemplateResponse = Record<string, <<IndicesTemplateMapping>>> + +---- + + +[discrete] +[[client.indices.migrateToDataStream]] +== `client.indices.migrateToDataStream()` + +Convert an index alias to a data stream. Converts an index alias to a data stream. You must have a matching index template that is data stream enabled. The alias must meet the following criteria: The alias must have a write index; All indices for the alias must have a `@timestamp` field mapping of a `date` or `date_nanos` field type; The alias must not have any filters; The alias must not use custom routing. If successful, the request removes the alias and creates a data stream with the same name. The indices for the alias become hidden backing indices for the stream. The write index for the alias becomes the write index for the stream. + +{ref}/data-streams.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions) => Promise<IndicesMigrateToDataStreamResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesMigrateToDataStreamRequest extends <<RequestBase>> { + name: <<IndexName>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesMigrateToDataStreamResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.indices.modifyDataStream]] +== `client.indices.modifyDataStream()` + +Update data streams. Performs one or more data stream modification actions in a single atomic operation. + +{ref}/data-streams.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesModifyDataStreamRequest, options?: TransportRequestOptions) => Promise<IndicesModifyDataStreamResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesModifyDataStreamRequest extends <<RequestBase>> { + actions: IndicesModifyDataStreamAction[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesModifyDataStreamResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.indices.open]] +== `client.indices.open()` + +Opens a closed index. For data streams, the API opens any closed backing indices. + +{ref}/indices-open-close.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesOpenRequest, options?: TransportRequestOptions) => Promise<IndicesOpenResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesOpenRequest extends <<RequestBase>> { + index: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_unavailable?: boolean + master_timeout?: <<Duration>> + timeout?: <<Duration>> + wait_for_active_shards?: <<WaitForActiveShards>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IndicesOpenResponse { + acknowledged: boolean + shards_acknowledged: boolean +} + +---- + + +[discrete] +[[client.indices.promoteDataStream]] +== `client.indices.promoteDataStream()` + +Promotes a data stream from a replicated data stream managed by CCR to a regular data stream + +{ref}/data-streams.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesPromoteDataStreamRequest, options?: TransportRequestOptions) => Promise<IndicesPromoteDataStreamResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesPromoteDataStreamRequest extends <<RequestBase>> { + name: <<IndexName>> + master_timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesPromoteDataStreamResponse = any + +---- + + +[discrete] +[[client.indices.putAlias]] +== `client.indices.putAlias()` + +Create or update an alias. Adds a data stream or index to an alias. + +{ref}/indices-aliases.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesPutAliasRequest, options?: TransportRequestOptions) => Promise<IndicesPutAliasResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesPutAliasRequest extends <<RequestBase>> { + index: <<Indices>> + name: <<Name>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> + filter?: <<QueryDslQueryContainer>> + index_routing?: <<Routing>> + is_write_index?: boolean + routing?: <<Routing>> + search_routing?: <<Routing>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesPutAliasResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.indices.putDataLifecycle]] +== `client.indices.putDataLifecycle()` + +Update data stream lifecycles. Update the data stream lifecycle of the specified data streams. + +{ref}/data-streams-put-lifecycle.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesPutDataLifecycleRequest, options?: TransportRequestOptions) => Promise<IndicesPutDataLifecycleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesPutDataLifecycleRequest extends <<RequestBase>> { + name: <<DataStreamNames>> + expand_wildcards?: <<ExpandWildcards>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> + lifecycle?: <<IndicesDataStreamLifecycle>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesPutDataLifecycleResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.indices.putIndexTemplate]] +== `client.indices.putIndexTemplate()` + +Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. + +{ref}/indices-put-template.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesPutIndexTemplateRequest, options?: TransportRequestOptions) => Promise<IndicesPutIndexTemplateResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesPutIndexTemplateRequest extends <<RequestBase>> { + name: <<Name>> + create?: boolean + master_timeout?: <<Duration>> + cause?: string + index_patterns?: <<Indices>> + composed_of?: <<Name>>[] + template?: IndicesPutIndexTemplateIndexTemplateMapping + data_stream?: <<IndicesDataStreamVisibility>> + priority?: <<long>> + version?: <<VersionNumber>> + _meta?: <<Metadata>> + allow_auto_create?: boolean + ignore_missing_component_templates?: string[] + deprecated?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesPutIndexTemplateResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.indices.putMapping]] +== `client.indices.putMapping()` + +Update field mappings. Adds new fields to an existing data stream or index. You can also use this API to change the search settings of existing fields. For data streams, these changes are applied to all backing indices by default. + +{ref}/indices-put-mapping.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesPutMappingRequest, options?: TransportRequestOptions) => Promise<IndicesPutMappingResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesPutMappingRequest extends <<RequestBase>> { + index: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_unavailable?: boolean + master_timeout?: <<Duration>> + timeout?: <<Duration>> + write_index_only?: boolean + date_detection?: boolean + dynamic?: <<MappingDynamicMapping>> + dynamic_date_formats?: string[] + dynamic_templates?: Record<string, <<MappingDynamicTemplate>>> | Record<string, <<MappingDynamicTemplate>>>[] + _field_names?: <<MappingFieldNamesField>> + _meta?: <<Metadata>> + numeric_detection?: boolean + properties?: Record<<<PropertyName>>, <<MappingProperty>>> + _routing?: <<MappingRoutingField>> + _source?: <<MappingSourceField>> + runtime?: <<MappingRuntimeFields>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesPutMappingResponse = <<IndicesResponseBase>> + +---- + + +[discrete] +[[client.indices.putSettings]] +== `client.indices.putSettings()` + +Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. + +{ref}/indices-update-settings.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesPutSettingsRequest, options?: TransportRequestOptions) => Promise<IndicesPutSettingsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesPutSettingsRequest extends <<RequestBase>> { + index?: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + flat_settings?: boolean + ignore_unavailable?: boolean + master_timeout?: <<Duration>> + preserve_existing?: boolean + timeout?: <<Duration>> + settings?: <<IndicesIndexSettings>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesPutSettingsResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.indices.putTemplate]] +== `client.indices.putTemplate()` + +Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. + +{ref}/indices-templates-v1.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesPutTemplateRequest, options?: TransportRequestOptions) => Promise<IndicesPutTemplateResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesPutTemplateRequest extends <<RequestBase>> { + name: <<Name>> + create?: boolean + master_timeout?: <<Duration>> + cause?: string + aliases?: Record<<<IndexName>>, <<IndicesAlias>>> + index_patterns?: string | string[] + mappings?: <<MappingTypeMapping>> + order?: <<integer>> + settings?: <<IndicesIndexSettings>> + version?: <<VersionNumber>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesPutTemplateResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.indices.recovery]] +== `client.indices.recovery()` + +Returns information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information for the stream’s backing indices. + +{ref}/indices-recovery.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesRecoveryRequest, options?: TransportRequestOptions) => Promise<IndicesRecoveryResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesRecoveryRequest extends <<RequestBase>> { + index?: <<Indices>> + active_only?: boolean + detailed?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesRecoveryResponse = Record<<<IndexName>>, IndicesRecoveryRecoveryStatus> + +---- + + +[discrete] +[[client.indices.refresh]] +== `client.indices.refresh()` + +Refresh an index. A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation on the stream’s backing indices. + +{ref}/indices-refresh.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesRefreshRequest, options?: TransportRequestOptions) => Promise<IndicesRefreshResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesRefreshRequest extends <<RequestBase>> { + index?: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_unavailable?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesRefreshResponse = <<ShardsOperationResponseBase>> + +---- + + +[discrete] +[[client.indices.reloadSearchAnalyzers]] +== `client.indices.reloadSearchAnalyzers()` + +Reloads an index's search analyzers and their resources. + +{ref}/indices-reload-analyzers.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions) => Promise<IndicesReloadSearchAnalyzersResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesReloadSearchAnalyzersRequest extends <<RequestBase>> { + index: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_unavailable?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesReloadSearchAnalyzersResponse = IndicesReloadSearchAnalyzersReloadResult + +---- + + +[discrete] +[[client.indices.resolveCluster]] +== `client.indices.resolveCluster()` + +Resolves the specified index expressions to return information about each cluster, including the local cluster, if included. Multiple patterns and remote clusters are supported. + +{ref}/indices-resolve-cluster-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesResolveClusterRequest, options?: TransportRequestOptions) => Promise<IndicesResolveClusterResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesResolveClusterRequest extends <<RequestBase>> { + name: <<Names>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_throttled?: boolean + ignore_unavailable?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesResolveClusterResponse = Record<<<ClusterAlias>>, IndicesResolveClusterResolveClusterInfo> + +---- + + +[discrete] +[[client.indices.resolveIndex]] +== `client.indices.resolveIndex()` + +Resolve indices. Resolve the names and/or index patterns for indices, aliases, and data streams. Multiple patterns and remote clusters are supported. + +{ref}/indices-resolve-index-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesResolveIndexRequest, options?: TransportRequestOptions) => Promise<IndicesResolveIndexResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesResolveIndexRequest extends <<RequestBase>> { + name: <<Names>> + expand_wildcards?: <<ExpandWildcards>> + ignore_unavailable?: boolean + allow_no_indices?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IndicesResolveIndexResponse { + indices: IndicesResolveIndexResolveIndexItem[] + aliases: IndicesResolveIndexResolveIndexAliasItem[] + data_streams: IndicesResolveIndexResolveIndexDataStreamsItem[] +} + +---- + + +[discrete] +[[client.indices.rollover]] +== `client.indices.rollover()` + +Roll over to a new index. Creates a new index for a data stream or index alias. + +{ref}/indices-rollover-index.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesRolloverRequest, options?: TransportRequestOptions) => Promise<IndicesRolloverResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesRolloverRequest extends <<RequestBase>> { + alias: <<IndexAlias>> + new_index?: <<IndexName>> + dry_run?: boolean + master_timeout?: <<Duration>> + timeout?: <<Duration>> + wait_for_active_shards?: <<WaitForActiveShards>> + aliases?: Record<<<IndexName>>, <<IndicesAlias>>> + conditions?: IndicesRolloverRolloverConditions + mappings?: <<MappingTypeMapping>> + settings?: Record<string, any> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IndicesRolloverResponse { + acknowledged: boolean + conditions: Record<string, boolean> + dry_run: boolean + new_index: string + old_index: string + rolled_over: boolean + shards_acknowledged: boolean +} + +---- + + +[discrete] +[[client.indices.segments]] +== `client.indices.segments()` + +Returns low-level information about the Lucene segments in index shards. For data streams, the API returns information about the stream’s backing indices. + +{ref}/indices-segments.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesSegmentsRequest, options?: TransportRequestOptions) => Promise<IndicesSegmentsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesSegmentsRequest extends <<RequestBase>> { + index?: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_unavailable?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IndicesSegmentsResponse { + indices: Record<string, IndicesSegmentsIndexSegment> + _shards: <<ShardStatistics>> +} + +---- + + +[discrete] +[[client.indices.shardStores]] +== `client.indices.shardStores()` + +Retrieves store information about replica shards in one or more indices. For data streams, the API retrieves store information for the stream’s backing indices. + +{ref}/indices-shards-stores.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesShardStoresRequest, options?: TransportRequestOptions) => Promise<IndicesShardStoresResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesShardStoresRequest extends <<RequestBase>> { + index?: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_unavailable?: boolean + status?: IndicesShardStoresShardStoreStatus | IndicesShardStoresShardStoreStatus[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IndicesShardStoresResponse { + indices: Record<<<IndexName>>, IndicesShardStoresIndicesShardStores> +} + +---- + + +[discrete] +[[client.indices.shrink]] +== `client.indices.shrink()` + +Shrinks an existing index into a new index with fewer primary shards. + +{ref}/indices-shrink-index.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesShrinkRequest, options?: TransportRequestOptions) => Promise<IndicesShrinkResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesShrinkRequest extends <<RequestBase>> { + index: <<IndexName>> + target: <<IndexName>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> + wait_for_active_shards?: <<WaitForActiveShards>> + aliases?: Record<<<IndexName>>, <<IndicesAlias>>> + settings?: Record<string, any> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IndicesShrinkResponse { + acknowledged: boolean + shards_acknowledged: boolean + index: <<IndexName>> +} + +---- + + +[discrete] +[[client.indices.simulateIndexTemplate]] +== `client.indices.simulateIndexTemplate()` + +Simulate an index. Returns the index configuration that would be applied to the specified index from an existing index template. + +{ref}/indices-simulate-index.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions) => Promise<IndicesSimulateIndexTemplateResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesSimulateIndexTemplateRequest extends <<RequestBase>> { + name: <<Name>> + master_timeout?: <<Duration>> + include_defaults?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IndicesSimulateIndexTemplateResponse { + overlapping?: IndicesSimulateTemplateOverlapping[] + template: IndicesSimulateTemplateTemplate +} + +---- + + +[discrete] +[[client.indices.simulateTemplate]] +== `client.indices.simulateTemplate()` + +Simulate an index template. Returns the index configuration that would be applied by a particular index template. + +{ref}/indices-simulate-template.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesSimulateTemplateRequest, options?: TransportRequestOptions) => Promise<IndicesSimulateTemplateResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesSimulateTemplateRequest extends <<RequestBase>> { + name?: <<Name>> + create?: boolean + master_timeout?: <<Duration>> + include_defaults?: boolean + allow_auto_create?: boolean + index_patterns?: <<Indices>> + composed_of?: <<Name>>[] + template?: IndicesPutIndexTemplateIndexTemplateMapping + data_stream?: <<IndicesDataStreamVisibility>> + priority?: <<long>> + version?: <<VersionNumber>> + _meta?: <<Metadata>> + ignore_missing_component_templates?: string[] + deprecated?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IndicesSimulateTemplateResponse { + overlapping?: IndicesSimulateTemplateOverlapping[] + template: IndicesSimulateTemplateTemplate +} + +---- + + +[discrete] +[[client.indices.split]] +== `client.indices.split()` + +Splits an existing index into a new index with more primary shards. + +{ref}/indices-split-index.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesSplitRequest, options?: TransportRequestOptions) => Promise<IndicesSplitResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesSplitRequest extends <<RequestBase>> { + index: <<IndexName>> + target: <<IndexName>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> + wait_for_active_shards?: <<WaitForActiveShards>> + aliases?: Record<<<IndexName>>, <<IndicesAlias>>> + settings?: Record<string, any> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IndicesSplitResponse { + acknowledged: boolean + shards_acknowledged: boolean + index: <<IndexName>> +} + +---- + + +[discrete] +[[client.indices.stats]] +== `client.indices.stats()` + +Returns statistics for one or more indices. For data streams, the API retrieves statistics for the stream’s backing indices. + +{ref}/indices-stats.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesStatsRequest, options?: TransportRequestOptions) => Promise<IndicesStatsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesStatsRequest extends <<RequestBase>> { + metric?: <<Metrics>> + index?: <<Indices>> + completion_fields?: <<Fields>> + expand_wildcards?: <<ExpandWildcards>> + fielddata_fields?: <<Fields>> + fields?: <<Fields>> + forbid_closed_indices?: boolean + groups?: string | string[] + include_segment_file_sizes?: boolean + include_unloaded_segments?: boolean + level?: <<Level>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IndicesStatsResponse { + indices?: Record<string, IndicesStatsIndicesStats> + _shards: <<ShardStatistics>> + _all: IndicesStatsIndicesStats +} + +---- + + +[discrete] +[[client.indices.unfreeze]] +== `client.indices.unfreeze()` + +Unfreezes an index. + +{ref}/unfreeze-index-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesUnfreezeRequest, options?: TransportRequestOptions) => Promise<IndicesUnfreezeResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesUnfreezeRequest extends <<RequestBase>> { + index: <<IndexName>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_unavailable?: boolean + master_timeout?: <<Duration>> + timeout?: <<Duration>> + wait_for_active_shards?: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IndicesUnfreezeResponse { + acknowledged: boolean + shards_acknowledged: boolean +} + +---- + + +[discrete] +[[client.indices.updateAliases]] +== `client.indices.updateAliases()` + +Create or update an alias. Adds a data stream or index to an alias. + +{ref}/indices-aliases.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesUpdateAliasesRequest, options?: TransportRequestOptions) => Promise<IndicesUpdateAliasesResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesUpdateAliasesRequest extends <<RequestBase>> { + master_timeout?: <<Duration>> + timeout?: <<Duration>> + actions?: IndicesUpdateAliasesAction[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IndicesUpdateAliasesResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.indices.validateQuery]] +== `client.indices.validateQuery()` + +Validate a query. Validates a query without running it. + +{ref}/search-validate.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IndicesValidateQueryRequest, options?: TransportRequestOptions) => Promise<IndicesValidateQueryResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IndicesValidateQueryRequest extends <<RequestBase>> { + index?: <<Indices>> + allow_no_indices?: boolean + all_shards?: boolean + analyzer?: string + analyze_wildcard?: boolean + default_operator?: <<QueryDslOperator>> + df?: string + expand_wildcards?: <<ExpandWildcards>> + explain?: boolean + ignore_unavailable?: boolean + lenient?: boolean + rewrite?: boolean + q?: string + query?: <<QueryDslQueryContainer>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IndicesValidateQueryResponse { + explanations?: IndicesValidateQueryIndicesValidationExplanation[] + _shards?: <<ShardStatistics>> + valid: boolean + error?: string +} + +---- + + diff --git a/docs/reference/inference.asciidoc b/docs/reference/inference.asciidoc new file mode 100644 index 000000000..f2890045e --- /dev/null +++ b/docs/reference/inference.asciidoc @@ -0,0 +1,210 @@ +[[reference-inference]] +== client.inference + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.inference.delete]] +== `client.inference.delete()` + +Delete an inference endpoint + +{ref}/delete-inference-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: InferenceDeleteRequest, options?: TransportRequestOptions) => Promise<InferenceDeleteResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface InferenceDeleteRequest extends <<RequestBase>> { + task_type?: <<InferenceTaskType>> + inference_id: <<Id>> + dry_run?: boolean + force?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type InferenceDeleteResponse = <<InferenceDeleteInferenceEndpointResult>> + +---- + + +[discrete] +[[client.inference.get]] +== `client.inference.get()` + +Get an inference endpoint + +{ref}/get-inference-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: InferenceGetRequest, options?: TransportRequestOptions) => Promise<InferenceGetResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface InferenceGetRequest extends <<RequestBase>> { + task_type?: <<InferenceTaskType>> + inference_id?: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface InferenceGetResponse { + endpoints: <<InferenceInferenceEndpointInfo>>[] +} + +---- + + +[discrete] +[[client.inference.inference]] +== `client.inference.inference()` + +Perform inference on the service + +{ref}/post-inference-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: InferenceInferenceRequest, options?: TransportRequestOptions) => Promise<InferenceInferenceResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface InferenceInferenceRequest extends <<RequestBase>> { + task_type?: <<InferenceTaskType>> + inference_id: <<Id>> + timeout?: <<Duration>> + query?: string + input: string | string[] + task_settings?: <<InferenceTaskSettings>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type InferenceInferenceResponse = <<InferenceInferenceResult>> + +---- + + +[discrete] +[[client.inference.put]] +== `client.inference.put()` + +Create an inference endpoint + +{ref}/put-inference-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: InferencePutRequest, options?: TransportRequestOptions) => Promise<InferencePutResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface InferencePutRequest extends <<RequestBase>> { + task_type?: <<InferenceTaskType>> + inference_id: <<Id>> + inference_config?: <<InferenceInferenceEndpoint>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type InferencePutResponse = <<InferenceInferenceEndpointInfo>> + +---- + + +[discrete] +[[client.inference.streamInference]] +== `client.inference.streamInference()` + +Perform streaming inference +[discrete] +=== Function signature + +[source,ts] +---- +(request: InferenceStreamInferenceRequest, options?: TransportRequestOptions) => Promise<InferenceStreamInferenceResponse> +---- + diff --git a/docs/reference/info.asciidoc b/docs/reference/info.asciidoc new file mode 100644 index 000000000..7efb58698 --- /dev/null +++ b/docs/reference/info.asciidoc @@ -0,0 +1,77 @@ +[[reference-info]] +== client.info + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.info]] +== `client.info()` + +Get cluster info. Returns basic information about the cluster. + +{ref}/index.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: InfoRequest, options?: TransportRequestOptions) => Promise<InfoResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface InfoRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface InfoResponse { + cluster_name: <<Name>> + cluster_uuid: <<Uuid>> + name: <<Name>> + tagline: string + version: <<ElasticsearchVersionInfo>> +} + +---- + + diff --git a/docs/reference/ingest.asciidoc b/docs/reference/ingest.asciidoc new file mode 100644 index 000000000..b2d7699ab --- /dev/null +++ b/docs/reference/ingest.asciidoc @@ -0,0 +1,442 @@ +[[reference-ingest]] +== client.ingest + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.ingest.deleteGeoipDatabase]] +== `client.ingest.deleteGeoipDatabase()` + +Delete GeoIP database configurations. Delete one or more IP geolocation database configurations. + +{ref}/delete-geoip-database-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptions) => Promise<IngestDeleteGeoipDatabaseResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IngestDeleteGeoipDatabaseRequest extends <<RequestBase>> { + id: <<Ids>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IngestDeleteGeoipDatabaseResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ingest.deleteIpLocationDatabase]] +== `client.ingest.deleteIpLocationDatabase()` + +Deletes an ip location database configuration + +{ref}/delete-ip-location-database-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptions) => Promise<IngestDeleteIpLocationDatabaseResponse> +---- + +[discrete] +[[client.ingest.deletePipeline]] +== `client.ingest.deletePipeline()` + +Delete pipelines. Delete one or more ingest pipelines. + +{ref}/delete-pipeline-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IngestDeletePipelineRequest, options?: TransportRequestOptions) => Promise<IngestDeletePipelineResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IngestDeletePipelineRequest extends <<RequestBase>> { + id: <<Id>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IngestDeletePipelineResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ingest.geoIpStats]] +== `client.ingest.geoIpStats()` + +Get GeoIP statistics. Get download statistics for GeoIP2 databases that are used with the GeoIP processor. + +{ref}/geoip-processor.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IngestGeoIpStatsRequest, options?: TransportRequestOptions) => Promise<IngestGeoIpStatsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IngestGeoIpStatsRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IngestGeoIpStatsResponse { + stats: IngestGeoIpStatsGeoIpDownloadStatistics + nodes: Record<<<Id>>, IngestGeoIpStatsGeoIpNodeDatabases> +} + +---- + + +[discrete] +[[client.ingest.getGeoipDatabase]] +== `client.ingest.getGeoipDatabase()` + +Get GeoIP database configurations. Get information about one or more IP geolocation database configurations. + +{ref}/get-geoip-database-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IngestGetGeoipDatabaseRequest, options?: TransportRequestOptions) => Promise<IngestGetGeoipDatabaseResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IngestGetGeoipDatabaseRequest extends <<RequestBase>> { + id?: <<Ids>> + master_timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IngestGetGeoipDatabaseResponse { + databases: IngestGetGeoipDatabaseDatabaseConfigurationMetadata[] +} + +---- + + +[discrete] +[[client.ingest.getIpLocationDatabase]] +== `client.ingest.getIpLocationDatabase()` + +Returns the specified ip location database configuration + +{ref}/get-ip-location-database-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptions) => Promise<IngestGetIpLocationDatabaseResponse> +---- + +[discrete] +[[client.ingest.getPipeline]] +== `client.ingest.getPipeline()` + +Get pipelines. Get information about one or more ingest pipelines. This API returns a local reference of the pipeline. + +{ref}/get-pipeline-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IngestGetPipelineRequest, options?: TransportRequestOptions) => Promise<IngestGetPipelineResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IngestGetPipelineRequest extends <<RequestBase>> { + id?: <<Id>> + master_timeout?: <<Duration>> + summary?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IngestGetPipelineResponse = Record<string, <<IngestPipeline>>> + +---- + + +[discrete] +[[client.ingest.processorGrok]] +== `client.ingest.processorGrok()` + +Run a grok processor. Extract structured fields out of a single text field within a document. You must choose which field to extract matched fields from, as well as the grok pattern you expect will match. A grok pattern is like a regular expression that supports aliased expressions that can be reused. + +{ref}/grok-processor.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IngestProcessorGrokRequest, options?: TransportRequestOptions) => Promise<IngestProcessorGrokResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IngestProcessorGrokRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IngestProcessorGrokResponse { + patterns: Record<string, string> +} + +---- + + +[discrete] +[[client.ingest.putGeoipDatabase]] +== `client.ingest.putGeoipDatabase()` + +Create or update GeoIP database configurations. Create or update IP geolocation database configurations. + +{ref}/put-geoip-database-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IngestPutGeoipDatabaseRequest, options?: TransportRequestOptions) => Promise<IngestPutGeoipDatabaseResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IngestPutGeoipDatabaseRequest extends <<RequestBase>> { + id: <<Id>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> + name: <<Name>> + maxmind: <<IngestMaxmind>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IngestPutGeoipDatabaseResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ingest.putIpLocationDatabase]] +== `client.ingest.putIpLocationDatabase()` + +Puts the configuration for a ip location database to be downloaded + +{ref}/put-ip-location-database-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptions) => Promise<IngestPutIpLocationDatabaseResponse> +---- + +[discrete] +[[client.ingest.putPipeline]] +== `client.ingest.putPipeline()` + +Create or update a pipeline. Changes made using this API take effect immediately. + +{ref}/ingest.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IngestPutPipelineRequest, options?: TransportRequestOptions) => Promise<IngestPutPipelineResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IngestPutPipelineRequest extends <<RequestBase>> { + id: <<Id>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> + if_version?: <<VersionNumber>> + _meta?: <<Metadata>> + description?: string + on_failure?: <<IngestProcessorContainer>>[] + processors?: <<IngestProcessorContainer>>[] + version?: <<VersionNumber>> + deprecated?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type IngestPutPipelineResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ingest.simulate]] +== `client.ingest.simulate()` + +Simulate a pipeline. Run an ingest pipeline against a set of provided documents. You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request. + +{ref}/simulate-pipeline-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: IngestSimulateRequest, options?: TransportRequestOptions) => Promise<IngestSimulateResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface IngestSimulateRequest extends <<RequestBase>> { + id?: <<Id>> + verbose?: boolean + docs: IngestSimulateDocument[] + pipeline?: <<IngestPipeline>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface IngestSimulateResponse { + docs: IngestSimulateSimulateDocumentResult[] +} + +---- + + diff --git a/docs/reference/knn_search.asciidoc b/docs/reference/knn_search.asciidoc new file mode 100644 index 000000000..6b2506b4b --- /dev/null +++ b/docs/reference/knn_search.asciidoc @@ -0,0 +1,87 @@ +[[reference-knn_search]] +== client.knnSearch + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.knnSearch]] +== `client.knnSearch()` + +Run a knn search. NOTE: The kNN search API has been replaced by the `knn` option in the search API. Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents. Given a query vector, the API finds the k closest vectors and returns those documents as search hits. Elasticsearch uses the HNSW algorithm to support efficient kNN search. Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed. This means the results returned are not always the true k closest neighbors. The kNN search API supports restricting the search using a filter. The search will return the top k documents that also match the filter query. + +{ref}/search-search.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: KnnSearchRequest, options?: TransportRequestOptions) => Promise<KnnSearchResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface KnnSearchRequest extends <<RequestBase>> { + index: <<Indices>> + routing?: <<Routing>> + _source?: <<SearchSourceConfig>> + docvalue_fields?: (<<QueryDslFieldAndFormat>> | <<Field>>)[] + stored_fields?: <<Fields>> + fields?: <<Fields>> + filter?: <<QueryDslQueryContainer>> | <<QueryDslQueryContainer>>[] + knn: <<KnnSearchQuery>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface KnnSearchResponse<TDocument = unknown> { + took: <<long>> + timed_out: boolean + _shards: <<ShardStatistics>> + hits: <<SearchHitsMetadata>><TDocument> + fields?: Record<string, any> + max_score?: <<double>> +} + +---- + + diff --git a/docs/reference/license.asciidoc b/docs/reference/license.asciidoc new file mode 100644 index 000000000..1bf0de8bf --- /dev/null +++ b/docs/reference/license.asciidoc @@ -0,0 +1,314 @@ +[[reference-license]] +== client.license + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.license.delete]] +== `client.license.delete()` + +Deletes licensing information for the cluster + +{ref}/delete-license.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: LicenseDeleteRequest, options?: TransportRequestOptions) => Promise<LicenseDeleteResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface LicenseDeleteRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type LicenseDeleteResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.license.get]] +== `client.license.get()` + +Get license information. Returns information about your Elastic license, including its type, its status, when it was issued, and when it expires. For more information about the different types of licenses, refer to [Elastic Stack subscriptions](https://www.elastic.co/subscriptions). + +{ref}/get-license.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: LicenseGetRequest, options?: TransportRequestOptions) => Promise<LicenseGetResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface LicenseGetRequest extends <<RequestBase>> { + accept_enterprise?: boolean + local?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface LicenseGetResponse { + license: LicenseGetLicenseInformation +} + +---- + + +[discrete] +[[client.license.getBasicStatus]] +== `client.license.getBasicStatus()` + +Retrieves information about the status of the basic license. + +{ref}/get-basic-status.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: LicenseGetBasicStatusRequest, options?: TransportRequestOptions) => Promise<LicenseGetBasicStatusResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface LicenseGetBasicStatusRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface LicenseGetBasicStatusResponse { + eligible_to_start_basic: boolean +} + +---- + + +[discrete] +[[client.license.getTrialStatus]] +== `client.license.getTrialStatus()` + +Retrieves information about the status of the trial license. + +{ref}/get-trial-status.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: LicenseGetTrialStatusRequest, options?: TransportRequestOptions) => Promise<LicenseGetTrialStatusResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface LicenseGetTrialStatusRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface LicenseGetTrialStatusResponse { + eligible_to_start_trial: boolean +} + +---- + + +[discrete] +[[client.license.post]] +== `client.license.post()` + +Updates the license for the cluster. + +{ref}/update-license.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: LicensePostRequest, options?: TransportRequestOptions) => Promise<LicensePostResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface LicensePostRequest extends <<RequestBase>> { + acknowledge?: boolean + license?: <<LicenseLicense>> + licenses?: <<LicenseLicense>>[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface LicensePostResponse { + acknowledge?: LicensePostAcknowledgement + acknowledged: boolean + license_status: <<LicenseLicenseStatus>> +} + +---- + + +[discrete] +[[client.license.postStartBasic]] +== `client.license.postStartBasic()` + +The start basic API enables you to initiate an indefinite basic license, which gives access to all the basic features. If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true. To check the status of your basic license, use the following API: [Get basic status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html). + +{ref}/start-basic.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: LicensePostStartBasicRequest, options?: TransportRequestOptions) => Promise<LicensePostStartBasicResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface LicensePostStartBasicRequest extends <<RequestBase>> { + acknowledge?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface LicensePostStartBasicResponse { + acknowledged: boolean + basic_was_started: boolean + error_message?: string + type?: <<LicenseLicenseType>> + acknowledge?: Record<string, string | string[]> +} + +---- + + +[discrete] +[[client.license.postStartTrial]] +== `client.license.postStartTrial()` + +The start trial API enables you to start a 30-day trial, which gives access to all subscription features. + +{ref}/start-trial.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: LicensePostStartTrialRequest, options?: TransportRequestOptions) => Promise<LicensePostStartTrialResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface LicensePostStartTrialRequest extends <<RequestBase>> { + acknowledge?: boolean + type_query_string?: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface LicensePostStartTrialResponse { + acknowledged: boolean + error_message?: string + trial_was_started: boolean + type?: <<LicenseLicenseType>> +} + +---- + + diff --git a/docs/reference/logstash.asciidoc b/docs/reference/logstash.asciidoc new file mode 100644 index 000000000..da1b724da --- /dev/null +++ b/docs/reference/logstash.asciidoc @@ -0,0 +1,148 @@ +[[reference-logstash]] +== client.logstash + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.logstash.deletePipeline]] +== `client.logstash.deletePipeline()` + +Deletes a pipeline used for Logstash Central Management. + +{ref}/logstash-api-delete-pipeline.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: LogstashDeletePipelineRequest, options?: TransportRequestOptions) => Promise<LogstashDeletePipelineResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface LogstashDeletePipelineRequest extends <<RequestBase>> { + id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type LogstashDeletePipelineResponse = boolean + +---- + + +[discrete] +[[client.logstash.getPipeline]] +== `client.logstash.getPipeline()` + +Retrieves pipelines used for Logstash Central Management. + +{ref}/logstash-api-get-pipeline.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: LogstashGetPipelineRequest, options?: TransportRequestOptions) => Promise<LogstashGetPipelineResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface LogstashGetPipelineRequest extends <<RequestBase>> { + id?: <<Ids>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type LogstashGetPipelineResponse = Record<<<Id>>, <<LogstashPipeline>>> + +---- + + +[discrete] +[[client.logstash.putPipeline]] +== `client.logstash.putPipeline()` + +Creates or updates a pipeline used for Logstash Central Management. + +{ref}/logstash-api-put-pipeline.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: LogstashPutPipelineRequest, options?: TransportRequestOptions) => Promise<LogstashPutPipelineResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface LogstashPutPipelineRequest extends <<RequestBase>> { + id: <<Id>> + pipeline?: <<LogstashPipeline>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type LogstashPutPipelineResponse = boolean + +---- + + diff --git a/docs/reference/main.asciidoc b/docs/reference/main.asciidoc new file mode 100644 index 000000000..92fbf672a --- /dev/null +++ b/docs/reference/main.asciidoc @@ -0,0 +1,209 @@ +[[api-reference]] + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + + += API Reference + +* <<reference-async_search,client.asyncSearch>> +* <<reference-autoscaling,client.autoscaling>> +* <<reference-bulk,client.bulk>> +* <<reference-cat,client.cat>> +* <<reference-ccr,client.ccr>> +* <<reference-clear_scroll,client.clearScroll>> +* <<reference-close_point_in_time,client.closePointInTime>> +* <<reference-cluster,client.cluster>> +* <<reference-connector,client.connector>> +* <<reference-count,client.count>> +* <<reference-create,client.create>> +* <<reference-dangling_indices,client.danglingIndices>> +* <<reference-delete,client.delete>> +* <<reference-delete_by_query,client.deleteByQuery>> +* <<reference-delete_by_query_rethrottle,client.deleteByQueryRethrottle>> +* <<reference-delete_script,client.deleteScript>> +* <<reference-enrich,client.enrich>> +* <<reference-eql,client.eql>> +* <<reference-esql,client.esql>> +* <<reference-exists,client.exists>> +* <<reference-exists_source,client.existsSource>> +* <<reference-explain,client.explain>> +* <<reference-features,client.features>> +* <<reference-field_caps,client.fieldCaps>> +* <<reference-fleet,client.fleet>> +* <<reference-get,client.get>> +* <<reference-get_script,client.getScript>> +* <<reference-get_script_context,client.getScriptContext>> +* <<reference-get_script_languages,client.getScriptLanguages>> +* <<reference-get_source,client.getSource>> +* <<reference-graph,client.graph>> +* <<reference-health_report,client.healthReport>> +* <<reference-ilm,client.ilm>> +* <<reference-index,client.index>> +* <<reference-indices,client.indices>> +* <<reference-inference,client.inference>> +* <<reference-info,client.info>> +* <<reference-ingest,client.ingest>> +* <<reference-knn_search,client.knnSearch>> +* <<reference-license,client.license>> +* <<reference-logstash,client.logstash>> +* <<reference-mget,client.mget>> +* <<reference-migration,client.migration>> +* <<reference-ml,client.ml>> +* <<reference-monitoring,client.monitoring>> +* <<reference-msearch,client.msearch>> +* <<reference-msearch_template,client.msearchTemplate>> +* <<reference-mtermvectors,client.mtermvectors>> +* <<reference-nodes,client.nodes>> +* <<reference-open_point_in_time,client.openPointInTime>> +* <<reference-ping,client.ping>> +* <<reference-put_script,client.putScript>> +* <<reference-query_rules,client.queryRules>> +* <<reference-rank_eval,client.rankEval>> +* <<reference-reindex,client.reindex>> +* <<reference-reindex_rethrottle,client.reindexRethrottle>> +* <<reference-render_search_template,client.renderSearchTemplate>> +* <<reference-rollup,client.rollup>> +* <<reference-scripts_painless_execute,client.scriptsPainlessExecute>> +* <<reference-scroll,client.scroll>> +* <<reference-search,client.search>> +* <<reference-search_application,client.searchApplication>> +* <<reference-search_mvt,client.searchMvt>> +* <<reference-search_shards,client.searchShards>> +* <<reference-search_template,client.searchTemplate>> +* <<reference-searchable_snapshots,client.searchableSnapshots>> +* <<reference-security,client.security>> +* <<reference-shutdown,client.shutdown>> +* <<reference-simulate,client.simulate>> +* <<reference-slm,client.slm>> +* <<reference-snapshot,client.snapshot>> +* <<reference-sql,client.sql>> +* <<reference-ssl,client.ssl>> +* <<reference-synonyms,client.synonyms>> +* <<reference-tasks,client.tasks>> +* <<reference-terms_enum,client.termsEnum>> +* <<reference-termvectors,client.termvectors>> +* <<reference-text_structure,client.textStructure>> +* <<reference-transform,client.transform>> +* <<reference-update,client.update>> +* <<reference-update_by_query,client.updateByQuery>> +* <<reference-update_by_query_rethrottle,client.updateByQueryRethrottle>> +* <<reference-watcher,client.watcher>> +* <<reference-xpack,client.xpack>> + + +include::async_search.asciidoc[] +include::autoscaling.asciidoc[] +include::bulk.asciidoc[] +include::cat.asciidoc[] +include::ccr.asciidoc[] +include::clear_scroll.asciidoc[] +include::close_point_in_time.asciidoc[] +include::cluster.asciidoc[] +include::connector.asciidoc[] +include::count.asciidoc[] +include::create.asciidoc[] +include::dangling_indices.asciidoc[] +include::delete.asciidoc[] +include::delete_by_query.asciidoc[] +include::delete_by_query_rethrottle.asciidoc[] +include::delete_script.asciidoc[] +include::enrich.asciidoc[] +include::eql.asciidoc[] +include::esql.asciidoc[] +include::exists.asciidoc[] +include::exists_source.asciidoc[] +include::explain.asciidoc[] +include::features.asciidoc[] +include::field_caps.asciidoc[] +include::fleet.asciidoc[] +include::get.asciidoc[] +include::get_script.asciidoc[] +include::get_script_context.asciidoc[] +include::get_script_languages.asciidoc[] +include::get_source.asciidoc[] +include::graph.asciidoc[] +include::health_report.asciidoc[] +include::ilm.asciidoc[] +include::index.asciidoc[] +include::indices.asciidoc[] +include::inference.asciidoc[] +include::info.asciidoc[] +include::ingest.asciidoc[] +include::knn_search.asciidoc[] +include::license.asciidoc[] +include::logstash.asciidoc[] +include::mget.asciidoc[] +include::migration.asciidoc[] +include::ml.asciidoc[] +include::monitoring.asciidoc[] +include::msearch.asciidoc[] +include::msearch_template.asciidoc[] +include::mtermvectors.asciidoc[] +include::nodes.asciidoc[] +include::open_point_in_time.asciidoc[] +include::ping.asciidoc[] +include::put_script.asciidoc[] +include::query_rules.asciidoc[] +include::rank_eval.asciidoc[] +include::reindex.asciidoc[] +include::reindex_rethrottle.asciidoc[] +include::render_search_template.asciidoc[] +include::rollup.asciidoc[] +include::scripts_painless_execute.asciidoc[] +include::scroll.asciidoc[] +include::search.asciidoc[] +include::search_application.asciidoc[] +include::search_mvt.asciidoc[] +include::search_shards.asciidoc[] +include::search_template.asciidoc[] +include::searchable_snapshots.asciidoc[] +include::security.asciidoc[] +include::shutdown.asciidoc[] +include::simulate.asciidoc[] +include::slm.asciidoc[] +include::snapshot.asciidoc[] +include::sql.asciidoc[] +include::ssl.asciidoc[] +include::synonyms.asciidoc[] +include::tasks.asciidoc[] +include::terms_enum.asciidoc[] +include::termvectors.asciidoc[] +include::text_structure.asciidoc[] +include::transform.asciidoc[] +include::update.asciidoc[] +include::update_by_query.asciidoc[] +include::update_by_query_rethrottle.asciidoc[] +include::watcher.asciidoc[] +include::xpack.asciidoc[] +include::shared-types/index.asciidoc[] diff --git a/docs/reference/mget.asciidoc b/docs/reference/mget.asciidoc new file mode 100644 index 000000000..91c8b52be --- /dev/null +++ b/docs/reference/mget.asciidoc @@ -0,0 +1,86 @@ +[[reference-mget]] +== client.mget + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.mget]] +== `client.mget()` + +Get multiple documents. Get multiple JSON documents by ID from one or more indices. If you specify an index in the request URI, you only need to specify the document IDs in the request body. To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. + +{ref}/docs-multi-get.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MgetRequest, options?: TransportRequestOptions) => Promise<MgetResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MgetRequest extends <<RequestBase>> { + index?: <<IndexName>> + force_synthetic_source?: boolean + preference?: string + realtime?: boolean + refresh?: boolean + routing?: <<Routing>> + _source?: <<SearchSourceConfigParam>> + _source_excludes?: <<Fields>> + _source_includes?: <<Fields>> + stored_fields?: <<Fields>> + docs?: <<MgetOperation>>[] + ids?: <<Ids>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MgetResponse<TDocument = unknown> { + docs: <<MgetResponseItem>><TDocument>[] +} + +---- + + diff --git a/docs/reference/migration.asciidoc b/docs/reference/migration.asciidoc new file mode 100644 index 000000000..e9d4825b1 --- /dev/null +++ b/docs/reference/migration.asciidoc @@ -0,0 +1,155 @@ +[[reference-migration]] +== client.migration + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.migration.deprecations]] +== `client.migration.deprecations()` + +Retrieves information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. + +{ref}/migration-api-deprecation.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MigrationDeprecationsRequest, options?: TransportRequestOptions) => Promise<MigrationDeprecationsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MigrationDeprecationsRequest extends <<RequestBase>> { + index?: <<IndexName>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MigrationDeprecationsResponse { + cluster_settings: MigrationDeprecationsDeprecation[] + index_settings: Record<string, MigrationDeprecationsDeprecation[]> + data_streams: Record<string, MigrationDeprecationsDeprecation[]> + node_settings: MigrationDeprecationsDeprecation[] + ml_settings: MigrationDeprecationsDeprecation[] +} + +---- + + +[discrete] +[[client.migration.getFeatureUpgradeStatus]] +== `client.migration.getFeatureUpgradeStatus()` + +Find out whether system features need to be upgraded or not + +{ref}/migration-api-feature-upgrade.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptions) => Promise<MigrationGetFeatureUpgradeStatusResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MigrationGetFeatureUpgradeStatusRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MigrationGetFeatureUpgradeStatusResponse { + features: MigrationGetFeatureUpgradeStatusMigrationFeature[] + migration_status: MigrationGetFeatureUpgradeStatusMigrationStatus +} + +---- + + +[discrete] +[[client.migration.postFeatureUpgrade]] +== `client.migration.postFeatureUpgrade()` + +Begin upgrades for system features + +{ref}/migration-api-feature-upgrade.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptions) => Promise<MigrationPostFeatureUpgradeResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MigrationPostFeatureUpgradeRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MigrationPostFeatureUpgradeResponse { + accepted: boolean + features: MigrationPostFeatureUpgradeMigrationFeature[] +} + +---- + + diff --git a/docs/reference/ml.asciidoc b/docs/reference/ml.asciidoc new file mode 100644 index 000000000..3070edee5 --- /dev/null +++ b/docs/reference/ml.asciidoc @@ -0,0 +1,3200 @@ +[[reference-ml]] +== client.ml + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.ml.clearTrainedModelDeploymentCache]] +== `client.ml.clearTrainedModelDeploymentCache()` + +Clear trained model deployment cache. Cache will be cleared on all nodes where the trained model is assigned. A trained model deployment may have an inference cache enabled. As requests are handled by each allocated node, their responses may be cached on that individual node. Calling this API clears the caches without restarting the deployment. + +{ref}/clear-trained-model-deployment-cache.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptions) => Promise<MlClearTrainedModelDeploymentCacheResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlClearTrainedModelDeploymentCacheRequest extends <<RequestBase>> { + model_id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlClearTrainedModelDeploymentCacheResponse { + cleared: boolean +} + +---- + + +[discrete] +[[client.ml.closeJob]] +== `client.ml.closeJob()` + +Close anomaly detection jobs. A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. When a datafeed that has a specified end date stops, it automatically closes its associated job. + +{ref}/ml-close-job.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlCloseJobRequest, options?: TransportRequestOptions) => Promise<MlCloseJobResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlCloseJobRequest extends <<RequestBase>> { + job_id: <<Id>> + allow_no_match?: boolean + force?: boolean + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlCloseJobResponse { + closed: boolean +} + +---- + + +[discrete] +[[client.ml.deleteCalendar]] +== `client.ml.deleteCalendar()` + +Delete a calendar. Removes all scheduled events from a calendar, then deletes it. + +{ref}/ml-delete-calendar.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlDeleteCalendarRequest, options?: TransportRequestOptions) => Promise<MlDeleteCalendarResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlDeleteCalendarRequest extends <<RequestBase>> { + calendar_id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type MlDeleteCalendarResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ml.deleteCalendarEvent]] +== `client.ml.deleteCalendarEvent()` + +Delete events from a calendar. + +{ref}/ml-delete-calendar-event.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlDeleteCalendarEventRequest, options?: TransportRequestOptions) => Promise<MlDeleteCalendarEventResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlDeleteCalendarEventRequest extends <<RequestBase>> { + calendar_id: <<Id>> + event_id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type MlDeleteCalendarEventResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ml.deleteCalendarJob]] +== `client.ml.deleteCalendarJob()` + +Delete anomaly jobs from a calendar. + +{ref}/ml-delete-calendar-job.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlDeleteCalendarJobRequest, options?: TransportRequestOptions) => Promise<MlDeleteCalendarJobResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlDeleteCalendarJobRequest extends <<RequestBase>> { + calendar_id: <<Id>> + job_id: <<Ids>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlDeleteCalendarJobResponse { + calendar_id: <<Id>> + description?: string + job_ids: <<Ids>> +} + +---- + + +[discrete] +[[client.ml.deleteDataFrameAnalytics]] +== `client.ml.deleteDataFrameAnalytics()` + +Delete a data frame analytics job. + +{ref}/delete-dfanalytics.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise<MlDeleteDataFrameAnalyticsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlDeleteDataFrameAnalyticsRequest extends <<RequestBase>> { + id: <<Id>> + force?: boolean + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type MlDeleteDataFrameAnalyticsResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ml.deleteDatafeed]] +== `client.ml.deleteDatafeed()` + +Delete a datafeed. + +{ref}/ml-delete-datafeed.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlDeleteDatafeedRequest, options?: TransportRequestOptions) => Promise<MlDeleteDatafeedResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlDeleteDatafeedRequest extends <<RequestBase>> { + datafeed_id: <<Id>> + force?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type MlDeleteDatafeedResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ml.deleteExpiredData]] +== `client.ml.deleteExpiredData()` + +Delete expired ML data. Deletes all job results, model snapshots and forecast data that have exceeded their retention days period. Machine learning state documents that are not associated with any job are also deleted. You can limit the request to a single or set of anomaly detection jobs by using a job identifier, a group name, a list of jobs, or a wildcard expression. You can delete expired data for all anomaly detection jobs by using _all, by specifying * as the <job_id>, or by omitting the <job_id>. + +{ref}/ml-delete-expired-data.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlDeleteExpiredDataRequest, options?: TransportRequestOptions) => Promise<MlDeleteExpiredDataResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlDeleteExpiredDataRequest extends <<RequestBase>> { + job_id?: <<Id>> + requests_per_second?: <<float>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlDeleteExpiredDataResponse { + deleted: boolean +} + +---- + + +[discrete] +[[client.ml.deleteFilter]] +== `client.ml.deleteFilter()` + +Delete a filter. If an anomaly detection job references the filter, you cannot delete the filter. You must update or delete the job before you can delete the filter. + +{ref}/ml-delete-filter.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlDeleteFilterRequest, options?: TransportRequestOptions) => Promise<MlDeleteFilterResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlDeleteFilterRequest extends <<RequestBase>> { + filter_id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type MlDeleteFilterResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ml.deleteForecast]] +== `client.ml.deleteForecast()` + +Delete forecasts from a job. By default, forecasts are retained for 14 days. You can specify a different retention period with the `expires_in` parameter in the forecast jobs API. The delete forecast API enables you to delete one or more forecasts before they expire. + +{ref}/ml-delete-forecast.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlDeleteForecastRequest, options?: TransportRequestOptions) => Promise<MlDeleteForecastResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlDeleteForecastRequest extends <<RequestBase>> { + job_id: <<Id>> + forecast_id?: <<Id>> + allow_no_forecasts?: boolean + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type MlDeleteForecastResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ml.deleteJob]] +== `client.ml.deleteJob()` + +Delete an anomaly detection job. All job configuration, model state and results are deleted. It is not currently possible to delete multiple jobs using wildcards or a comma separated list. If you delete a job that has a datafeed, the request first tries to delete the datafeed. This behavior is equivalent to calling the delete datafeed API with the same timeout and force parameters as the delete job request. + +{ref}/ml-delete-job.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlDeleteJobRequest, options?: TransportRequestOptions) => Promise<MlDeleteJobResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlDeleteJobRequest extends <<RequestBase>> { + job_id: <<Id>> + force?: boolean + delete_user_annotations?: boolean + wait_for_completion?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type MlDeleteJobResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ml.deleteModelSnapshot]] +== `client.ml.deleteModelSnapshot()` + +Delete a model snapshot. You cannot delete the active model snapshot. To delete that snapshot, first revert to a different one. To identify the active model snapshot, refer to the `model_snapshot_id` in the results from the get jobs API. + +{ref}/ml-delete-snapshot.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlDeleteModelSnapshotRequest, options?: TransportRequestOptions) => Promise<MlDeleteModelSnapshotResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlDeleteModelSnapshotRequest extends <<RequestBase>> { + job_id: <<Id>> + snapshot_id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type MlDeleteModelSnapshotResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ml.deleteTrainedModel]] +== `client.ml.deleteTrainedModel()` + +Delete an unreferenced trained model. The request deletes a trained inference model that is not referenced by an ingest pipeline. + +{ref}/delete-trained-models.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlDeleteTrainedModelRequest, options?: TransportRequestOptions) => Promise<MlDeleteTrainedModelResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlDeleteTrainedModelRequest extends <<RequestBase>> { + model_id: <<Id>> + force?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type MlDeleteTrainedModelResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ml.deleteTrainedModelAlias]] +== `client.ml.deleteTrainedModelAlias()` + +Delete a trained model alias. This API deletes an existing model alias that refers to a trained model. If the model alias is missing or refers to a model other than the one identified by the `model_id`, this API returns an error. + +{ref}/delete-trained-models-aliases.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions) => Promise<MlDeleteTrainedModelAliasResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlDeleteTrainedModelAliasRequest extends <<RequestBase>> { + model_alias: <<Name>> + model_id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type MlDeleteTrainedModelAliasResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ml.estimateModelMemory]] +== `client.ml.estimateModelMemory()` + +Estimate job model memory usage. Makes an estimation of the memory usage for an anomaly detection job model. It is based on analysis configuration details for the job and cardinality estimates for the fields it references. + +{ref}/ml-apis.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlEstimateModelMemoryRequest, options?: TransportRequestOptions) => Promise<MlEstimateModelMemoryResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlEstimateModelMemoryRequest extends <<RequestBase>> { + analysis_config?: <<MlAnalysisConfig>> + max_bucket_cardinality?: Record<<<Field>>, <<long>>> + overall_cardinality?: Record<<<Field>>, <<long>>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlEstimateModelMemoryResponse { + model_memory_estimate: string +} + +---- + + +[discrete] +[[client.ml.evaluateDataFrame]] +== `client.ml.evaluateDataFrame()` + +Evaluate data frame analytics. The API packages together commonly used evaluation metrics for various types of machine learning features. This has been designed for use on indexes created by data frame analytics. Evaluation requires both a ground truth field and an analytics result field to be present. + +{ref}/evaluate-dfanalytics.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlEvaluateDataFrameRequest, options?: TransportRequestOptions) => Promise<MlEvaluateDataFrameResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlEvaluateDataFrameRequest extends <<RequestBase>> { + evaluation: <<MlDataframeEvaluationContainer>> + index: <<IndexName>> + query?: <<QueryDslQueryContainer>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlEvaluateDataFrameResponse { + classification?: MlEvaluateDataFrameDataframeClassificationSummary + outlier_detection?: MlEvaluateDataFrameDataframeOutlierDetectionSummary + regression?: MlEvaluateDataFrameDataframeRegressionSummary +} + +---- + + +[discrete] +[[client.ml.explainDataFrameAnalytics]] +== `client.ml.explainDataFrameAnalytics()` + +Explain data frame analytics config. This API provides explanations for a data frame analytics config that either exists already or one that has not been created yet. The following explanations are provided: * which fields are included or not in the analysis and why, * how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on. If you have object fields or fields that are excluded via source filtering, they are not included in the explanation. + +{ref}/explain-dfanalytics.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise<MlExplainDataFrameAnalyticsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlExplainDataFrameAnalyticsRequest extends <<RequestBase>> { + id?: <<Id>> + source?: <<MlDataframeAnalyticsSource>> + dest?: <<MlDataframeAnalyticsDestination>> + analysis?: <<MlDataframeAnalysisContainer>> + description?: string + model_memory_limit?: string + max_num_threads?: <<integer>> + analyzed_fields?: <<MlDataframeAnalysisAnalyzedFields>> | string[] + allow_lazy_start?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlExplainDataFrameAnalyticsResponse { + field_selection: <<MlDataframeAnalyticsFieldSelection>>[] + memory_estimation: <<MlDataframeAnalyticsMemoryEstimation>> +} + +---- + + +[discrete] +[[client.ml.flushJob]] +== `client.ml.flushJob()` + +Force buffered data to be processed. The flush jobs API is only applicable when sending data for analysis using the post data API. Depending on the content of the buffer, then it might additionally calculate new results. Both flush and close operations are similar, however the flush is more efficient if you are expecting to send more data for analysis. When flushing, the job remains open and is available to continue analyzing data. A close operation additionally prunes and persists the model state to disk and the job must be opened again before analyzing further data. + +{ref}/ml-flush-job.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlFlushJobRequest, options?: TransportRequestOptions) => Promise<MlFlushJobResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlFlushJobRequest extends <<RequestBase>> { + job_id: <<Id>> + advance_time?: <<DateTime>> + calc_interim?: boolean + end?: <<DateTime>> + skip_time?: <<DateTime>> + start?: <<DateTime>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlFlushJobResponse { + flushed: boolean + last_finalized_bucket_end?: <<integer>> +} + +---- + + +[discrete] +[[client.ml.forecast]] +== `client.ml.forecast()` + +Predict future behavior of a time series. Forecasts are not supported for jobs that perform population analysis; an error occurs if you try to create a forecast for a job that has an `over_field_name` in its configuration. Forcasts predict future behavior based on historical data. + +{ref}/ml-forecast.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlForecastRequest, options?: TransportRequestOptions) => Promise<MlForecastResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlForecastRequest extends <<RequestBase>> { + job_id: <<Id>> + duration?: <<Duration>> + expires_in?: <<Duration>> + max_model_memory?: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlForecastResponse { + acknowledged: boolean + forecast_id: <<Id>> +} + +---- + + +[discrete] +[[client.ml.getBuckets]] +== `client.ml.getBuckets()` + +Get anomaly detection job results for buckets. The API presents a chronological view of the records, grouped by bucket. + +{ref}/ml-get-bucket.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlGetBucketsRequest, options?: TransportRequestOptions) => Promise<MlGetBucketsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlGetBucketsRequest extends <<RequestBase>> { + job_id: <<Id>> + timestamp?: <<DateTime>> + from?: <<integer>> + size?: <<integer>> + anomaly_score?: <<double>> + desc?: boolean + end?: <<DateTime>> + exclude_interim?: boolean + expand?: boolean + page?: <<MlPage>> + sort?: <<Field>> + start?: <<DateTime>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlGetBucketsResponse { + buckets: <<MlBucketSummary>>[] + count: <<long>> +} + +---- + + +[discrete] +[[client.ml.getCalendarEvents]] +== `client.ml.getCalendarEvents()` + +Get info about events in calendars. + +{ref}/ml-get-calendar-event.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlGetCalendarEventsRequest, options?: TransportRequestOptions) => Promise<MlGetCalendarEventsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlGetCalendarEventsRequest extends <<RequestBase>> { + calendar_id: <<Id>> + end?: <<DateTime>> + from?: <<integer>> + job_id?: <<Id>> + size?: <<integer>> + start?: <<DateTime>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlGetCalendarEventsResponse { + count: <<long>> + events: <<MlCalendarEvent>>[] +} + +---- + + +[discrete] +[[client.ml.getCalendars]] +== `client.ml.getCalendars()` + +Get calendar configuration info. + +{ref}/ml-get-calendar.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlGetCalendarsRequest, options?: TransportRequestOptions) => Promise<MlGetCalendarsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlGetCalendarsRequest extends <<RequestBase>> { + calendar_id?: <<Id>> + from?: <<integer>> + size?: <<integer>> + page?: <<MlPage>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlGetCalendarsResponse { + calendars: MlGetCalendarsCalendar[] + count: <<long>> +} + +---- + + +[discrete] +[[client.ml.getCategories]] +== `client.ml.getCategories()` + +Get anomaly detection job results for categories. + +{ref}/ml-get-category.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlGetCategoriesRequest, options?: TransportRequestOptions) => Promise<MlGetCategoriesResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlGetCategoriesRequest extends <<RequestBase>> { + job_id: <<Id>> + category_id?: <<CategoryId>> + from?: <<integer>> + partition_field_value?: string + size?: <<integer>> + page?: <<MlPage>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlGetCategoriesResponse { + categories: <<MlCategory>>[] + count: <<long>> +} + +---- + + +[discrete] +[[client.ml.getDataFrameAnalytics]] +== `client.ml.getDataFrameAnalytics()` + +Get data frame analytics job configuration info. You can get information for multiple data frame analytics jobs in a single API request by using a list of data frame analytics jobs or a wildcard expression. + +{ref}/get-dfanalytics.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise<MlGetDataFrameAnalyticsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlGetDataFrameAnalyticsRequest extends <<RequestBase>> { + id?: <<Id>> + allow_no_match?: boolean + from?: <<integer>> + size?: <<integer>> + exclude_generated?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlGetDataFrameAnalyticsResponse { + count: <<integer>> + data_frame_analytics: <<MlDataframeAnalyticsSummary>>[] +} + +---- + + +[discrete] +[[client.ml.getDataFrameAnalyticsStats]] +== `client.ml.getDataFrameAnalyticsStats()` + +Get data frame analytics jobs usage info. + +{ref}/get-dfanalytics-stats.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions) => Promise<MlGetDataFrameAnalyticsStatsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlGetDataFrameAnalyticsStatsRequest extends <<RequestBase>> { + id?: <<Id>> + allow_no_match?: boolean + from?: <<integer>> + size?: <<integer>> + verbose?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlGetDataFrameAnalyticsStatsResponse { + count: <<long>> + data_frame_analytics: <<MlDataframeAnalytics>>[] +} + +---- + + +[discrete] +[[client.ml.getDatafeedStats]] +== `client.ml.getDatafeedStats()` + +Get datafeeds usage info. You can get statistics for multiple datafeeds in a single API request by using a list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the `<feed_id>`, or by omitting the `<feed_id>`. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds. + +{ref}/ml-get-datafeed-stats.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlGetDatafeedStatsRequest, options?: TransportRequestOptions) => Promise<MlGetDatafeedStatsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlGetDatafeedStatsRequest extends <<RequestBase>> { + datafeed_id?: <<Ids>> + allow_no_match?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlGetDatafeedStatsResponse { + count: <<long>> + datafeeds: <<MlDatafeedStats>>[] +} + +---- + + +[discrete] +[[client.ml.getDatafeeds]] +== `client.ml.getDatafeeds()` + +Get datafeeds configuration info. You can get information for multiple datafeeds in a single API request by using a list of datafeeds or a wildcard expression. You can get information for all datafeeds by using `_all`, by specifying `*` as the `<feed_id>`, or by omitting the `<feed_id>`. This API returns a maximum of 10,000 datafeeds. + +{ref}/ml-get-datafeed.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlGetDatafeedsRequest, options?: TransportRequestOptions) => Promise<MlGetDatafeedsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlGetDatafeedsRequest extends <<RequestBase>> { + datafeed_id?: <<Ids>> + allow_no_match?: boolean + exclude_generated?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlGetDatafeedsResponse { + count: <<long>> + datafeeds: <<MlDatafeed>>[] +} + +---- + + +[discrete] +[[client.ml.getFilters]] +== `client.ml.getFilters()` + +Get filters. You can get a single filter or all filters. + +{ref}/ml-get-filter.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlGetFiltersRequest, options?: TransportRequestOptions) => Promise<MlGetFiltersResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlGetFiltersRequest extends <<RequestBase>> { + filter_id?: <<Ids>> + from?: <<integer>> + size?: <<integer>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlGetFiltersResponse { + count: <<long>> + filters: <<MlFilter>>[] +} + +---- + + +[discrete] +[[client.ml.getInfluencers]] +== `client.ml.getInfluencers()` + +Get anomaly detection job results for influencers. Influencers are the entities that have contributed to, or are to blame for, the anomalies. Influencer results are available only if an `influencer_field_name` is specified in the job configuration. + +{ref}/ml-get-influencer.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlGetInfluencersRequest, options?: TransportRequestOptions) => Promise<MlGetInfluencersResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlGetInfluencersRequest extends <<RequestBase>> { + job_id: <<Id>> + desc?: boolean + end?: <<DateTime>> + exclude_interim?: boolean + influencer_score?: <<double>> + from?: <<integer>> + size?: <<integer>> + sort?: <<Field>> + start?: <<DateTime>> + page?: <<MlPage>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlGetInfluencersResponse { + count: <<long>> + influencers: <<MlInfluencer>>[] +} + +---- + + +[discrete] +[[client.ml.getJobStats]] +== `client.ml.getJobStats()` + +Get anomaly detection jobs usage info. + +{ref}/ml-get-job-stats.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlGetJobStatsRequest, options?: TransportRequestOptions) => Promise<MlGetJobStatsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlGetJobStatsRequest extends <<RequestBase>> { + job_id?: <<Id>> + allow_no_match?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlGetJobStatsResponse { + count: <<long>> + jobs: <<MlJobStats>>[] +} + +---- + + +[discrete] +[[client.ml.getJobs]] +== `client.ml.getJobs()` + +Get anomaly detection jobs configuration info. You can get information for multiple anomaly detection jobs in a single API request by using a group name, a list of jobs, or a wildcard expression. You can get information for all anomaly detection jobs by using `_all`, by specifying `*` as the `<job_id>`, or by omitting the `<job_id>`. + +{ref}/ml-get-job.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlGetJobsRequest, options?: TransportRequestOptions) => Promise<MlGetJobsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlGetJobsRequest extends <<RequestBase>> { + job_id?: <<Ids>> + allow_no_match?: boolean + exclude_generated?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlGetJobsResponse { + count: <<long>> + jobs: <<MlJob>>[] +} + +---- + + +[discrete] +[[client.ml.getMemoryStats]] +== `client.ml.getMemoryStats()` + +Get machine learning memory usage info. Get information about how machine learning jobs and trained models are using memory, on each node, both within the JVM heap, and natively, outside of the JVM. + +{ref}/get-ml-memory.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlGetMemoryStatsRequest, options?: TransportRequestOptions) => Promise<MlGetMemoryStatsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlGetMemoryStatsRequest extends <<RequestBase>> { + node_id?: <<Id>> + human?: boolean + master_timeout?: <<Duration>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlGetMemoryStatsResponse { + _nodes: <<NodeStatistics>> + cluster_name: <<Name>> + nodes: Record<<<Id>>, MlGetMemoryStatsMemory> +} + +---- + + +[discrete] +[[client.ml.getModelSnapshotUpgradeStats]] +== `client.ml.getModelSnapshotUpgradeStats()` + +Get anomaly detection job model snapshot upgrade usage info. + +{ref}/ml-get-job-model-snapshot-upgrade-stats.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptions) => Promise<MlGetModelSnapshotUpgradeStatsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlGetModelSnapshotUpgradeStatsRequest extends <<RequestBase>> { + job_id: <<Id>> + snapshot_id: <<Id>> + allow_no_match?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlGetModelSnapshotUpgradeStatsResponse { + count: <<long>> + model_snapshot_upgrades: <<MlModelSnapshotUpgrade>>[] +} + +---- + + +[discrete] +[[client.ml.getModelSnapshots]] +== `client.ml.getModelSnapshots()` + +Get model snapshots info. + +{ref}/ml-get-snapshot.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlGetModelSnapshotsRequest, options?: TransportRequestOptions) => Promise<MlGetModelSnapshotsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlGetModelSnapshotsRequest extends <<RequestBase>> { + job_id: <<Id>> + snapshot_id?: <<Id>> + from?: <<integer>> + size?: <<integer>> + desc?: boolean + end?: <<DateTime>> + page?: <<MlPage>> + sort?: <<Field>> + start?: <<DateTime>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlGetModelSnapshotsResponse { + count: <<long>> + model_snapshots: <<MlModelSnapshot>>[] +} + +---- + + +[discrete] +[[client.ml.getOverallBuckets]] +== `client.ml.getOverallBuckets()` + +Get overall bucket results. Retrievs overall bucket results that summarize the bucket results of multiple anomaly detection jobs. The `overall_score` is calculated by combining the scores of all the buckets within the overall bucket span. First, the maximum `anomaly_score` per anomaly detection job in the overall bucket is calculated. Then the `top_n` of those scores are averaged to result in the `overall_score`. This means that you can fine-tune the `overall_score` so that it is more or less sensitive to the number of jobs that detect an anomaly at the same time. For example, if you set `top_n` to `1`, the `overall_score` is the maximum bucket score in the overall bucket. Alternatively, if you set `top_n` to the number of jobs, the `overall_score` is high only when all jobs detect anomalies in that overall bucket. If you set the `bucket_span` parameter (to a value greater than its default), the `overall_score` is the maximum `overall_score` of the overall buckets that have a span equal to the jobs' largest bucket span. + +{ref}/ml-get-overall-buckets.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlGetOverallBucketsRequest, options?: TransportRequestOptions) => Promise<MlGetOverallBucketsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlGetOverallBucketsRequest extends <<RequestBase>> { + job_id: <<Id>> + allow_no_match?: boolean + bucket_span?: <<Duration>> + end?: <<DateTime>> + exclude_interim?: boolean + overall_score?: <<double>> | string + start?: <<DateTime>> + top_n?: <<integer>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlGetOverallBucketsResponse { + count: <<long>> + overall_buckets: <<MlOverallBucket>>[] +} + +---- + + +[discrete] +[[client.ml.getRecords]] +== `client.ml.getRecords()` + +Get anomaly records for an anomaly detection job. Records contain the detailed analytical results. They describe the anomalous activity that has been identified in the input data based on the detector configuration. There can be many anomaly records depending on the characteristics and size of the input data. In practice, there are often too many to be able to manually process them. The machine learning features therefore perform a sophisticated aggregation of the anomaly records into buckets. The number of record results depends on the number of anomalies found in each bucket, which relates to the number of time series being modeled and the number of detectors. + +{ref}/ml-get-record.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlGetRecordsRequest, options?: TransportRequestOptions) => Promise<MlGetRecordsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlGetRecordsRequest extends <<RequestBase>> { + job_id: <<Id>> + from?: <<integer>> + size?: <<integer>> + desc?: boolean + end?: <<DateTime>> + exclude_interim?: boolean + page?: <<MlPage>> + record_score?: <<double>> + sort?: <<Field>> + start?: <<DateTime>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlGetRecordsResponse { + count: <<long>> + records: <<MlAnomaly>>[] +} + +---- + + +[discrete] +[[client.ml.getTrainedModels]] +== `client.ml.getTrainedModels()` + +Get trained model configuration info. + +{ref}/get-trained-models.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlGetTrainedModelsRequest, options?: TransportRequestOptions) => Promise<MlGetTrainedModelsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlGetTrainedModelsRequest extends <<RequestBase>> { + model_id?: <<Ids>> + allow_no_match?: boolean + decompress_definition?: boolean + exclude_generated?: boolean + from?: <<integer>> + include?: <<MlInclude>> + size?: <<integer>> + tags?: string | string[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlGetTrainedModelsResponse { + count: <<integer>> + trained_model_configs: <<MlTrainedModelConfig>>[] +} + +---- + + +[discrete] +[[client.ml.getTrainedModelsStats]] +== `client.ml.getTrainedModelsStats()` + +Get trained models usage info. You can get usage information for multiple trained models in a single API request by using a list of model IDs or a wildcard expression. + +{ref}/get-trained-models-stats.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions) => Promise<MlGetTrainedModelsStatsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlGetTrainedModelsStatsRequest extends <<RequestBase>> { + model_id?: <<Ids>> + allow_no_match?: boolean + from?: <<integer>> + size?: <<integer>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlGetTrainedModelsStatsResponse { + count: <<integer>> + trained_model_stats: <<MlTrainedModelStats>>[] +} + +---- + + +[discrete] +[[client.ml.inferTrainedModel]] +== `client.ml.inferTrainedModel()` + +Evaluate a trained model. + +{ref}/infer-trained-model.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlInferTrainedModelRequest, options?: TransportRequestOptions) => Promise<MlInferTrainedModelResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlInferTrainedModelRequest extends <<RequestBase>> { + model_id: <<Id>> + timeout?: <<Duration>> + docs: Record<string, any>[] + inference_config?: <<MlInferenceConfigUpdateContainer>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlInferTrainedModelResponse { + inference_results: <<MlInferenceResponseResult>>[] +} + +---- + + +[discrete] +[[client.ml.info]] +== `client.ml.info()` + +Return ML defaults and limits. Returns defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out what those defaults are. It also provides information about the maximum size of machine learning jobs that could run in the current cluster configuration. + +{ref}/get-ml-info.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlInfoRequest, options?: TransportRequestOptions) => Promise<MlInfoResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlInfoRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlInfoResponse { + defaults: MlInfoDefaults + limits: MlInfoLimits + upgrade_mode: boolean + native_code: MlInfoNativeCode +} + +---- + + +[discrete] +[[client.ml.openJob]] +== `client.ml.openJob()` + +Open anomaly detection jobs. An anomaly detection job must be opened to be ready to receive and analyze data. It can be opened and closed multiple times throughout its lifecycle. When you open a new job, it starts with an empty model. When you open an existing job, the most recent model state is automatically loaded. The job is ready to resume its analysis from where it left off, once new data is received. + +{ref}/ml-open-job.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlOpenJobRequest, options?: TransportRequestOptions) => Promise<MlOpenJobResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlOpenJobRequest extends <<RequestBase>> { + job_id: <<Id>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlOpenJobResponse { + opened: boolean + node: <<NodeId>> +} + +---- + + +[discrete] +[[client.ml.postCalendarEvents]] +== `client.ml.postCalendarEvents()` + +Add scheduled events to the calendar. + +{ref}/ml-post-calendar-event.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlPostCalendarEventsRequest, options?: TransportRequestOptions) => Promise<MlPostCalendarEventsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlPostCalendarEventsRequest extends <<RequestBase>> { + calendar_id: <<Id>> + events: <<MlCalendarEvent>>[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlPostCalendarEventsResponse { + events: <<MlCalendarEvent>>[] +} + +---- + + +[discrete] +[[client.ml.postData]] +== `client.ml.postData()` + +Send data to an anomaly detection job for analysis. IMPORTANT: For each job, data can be accepted from only a single connection at a time. It is not currently possible to post data to multiple jobs using wildcards or a list. + +{ref}/ml-post-data.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlPostDataRequest, options?: TransportRequestOptions) => Promise<MlPostDataResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlPostDataRequest<TData = unknown> extends <<RequestBase>> { + job_id: <<Id>> + reset_end?: <<DateTime>> + reset_start?: <<DateTime>> + data?: TData[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlPostDataResponse { + bucket_count: <<long>> + earliest_record_timestamp: <<long>> + empty_bucket_count: <<long>> + input_bytes: <<long>> + input_field_count: <<long>> + input_record_count: <<long>> + invalid_date_count: <<long>> + job_id: <<Id>> + last_data_time: <<integer>> + latest_record_timestamp: <<long>> + missing_field_count: <<long>> + out_of_order_timestamp_count: <<long>> + processed_field_count: <<long>> + processed_record_count: <<long>> + sparse_bucket_count: <<long>> +} + +---- + + +[discrete] +[[client.ml.previewDataFrameAnalytics]] +== `client.ml.previewDataFrameAnalytics()` + +Preview features used by data frame analytics. Previews the extracted features used by a data frame analytics config. + +{ref}/preview-dfanalytics.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise<MlPreviewDataFrameAnalyticsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlPreviewDataFrameAnalyticsRequest extends <<RequestBase>> { + id?: <<Id>> + config?: MlPreviewDataFrameAnalyticsDataframePreviewConfig +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlPreviewDataFrameAnalyticsResponse { + feature_values: Record<<<Field>>, string>[] +} + +---- + + +[discrete] +[[client.ml.previewDatafeed]] +== `client.ml.previewDatafeed()` + +Preview a datafeed. This API returns the first "page" of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data that will be passed to the anomaly detection engine. IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. You can also use secondary authorization headers to supply the credentials. + +{ref}/ml-preview-datafeed.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlPreviewDatafeedRequest, options?: TransportRequestOptions) => Promise<MlPreviewDatafeedResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlPreviewDatafeedRequest extends <<RequestBase>> { + datafeed_id?: <<Id>> + start?: <<DateTime>> + end?: <<DateTime>> + datafeed_config?: <<MlDatafeedConfig>> + job_config?: <<MlJobConfig>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type MlPreviewDatafeedResponse<TDocument = unknown> = TDocument[] + +---- + + +[discrete] +[[client.ml.putCalendar]] +== `client.ml.putCalendar()` + +Create a calendar. + +{ref}/ml-put-calendar.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlPutCalendarRequest, options?: TransportRequestOptions) => Promise<MlPutCalendarResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlPutCalendarRequest extends <<RequestBase>> { + calendar_id: <<Id>> + job_ids?: <<Id>>[] + description?: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlPutCalendarResponse { + calendar_id: <<Id>> + description?: string + job_ids: <<Ids>> +} + +---- + + +[discrete] +[[client.ml.putCalendarJob]] +== `client.ml.putCalendarJob()` + +Add anomaly detection job to calendar. + +{ref}/ml-put-calendar-job.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlPutCalendarJobRequest, options?: TransportRequestOptions) => Promise<MlPutCalendarJobResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlPutCalendarJobRequest extends <<RequestBase>> { + calendar_id: <<Id>> + job_id: <<Ids>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlPutCalendarJobResponse { + calendar_id: <<Id>> + description?: string + job_ids: <<Ids>> +} + +---- + + +[discrete] +[[client.ml.putDataFrameAnalytics]] +== `client.ml.putDataFrameAnalytics()` + +Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index. + +{ref}/put-dfanalytics.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise<MlPutDataFrameAnalyticsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlPutDataFrameAnalyticsRequest extends <<RequestBase>> { + id: <<Id>> + allow_lazy_start?: boolean + analysis: <<MlDataframeAnalysisContainer>> + analyzed_fields?: <<MlDataframeAnalysisAnalyzedFields>> | string[] + description?: string + dest: <<MlDataframeAnalyticsDestination>> + max_num_threads?: <<integer>> + model_memory_limit?: string + source: <<MlDataframeAnalyticsSource>> + headers?: <<HttpHeaders>> + version?: <<VersionString>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlPutDataFrameAnalyticsResponse { + authorization?: <<MlDataframeAnalyticsAuthorization>> + allow_lazy_start: boolean + analysis: <<MlDataframeAnalysisContainer>> + analyzed_fields?: <<MlDataframeAnalysisAnalyzedFields>> | string[] + create_time: <<EpochTime>><<<UnitMillis>>> + description?: string + dest: <<MlDataframeAnalyticsDestination>> + id: <<Id>> + max_num_threads: <<integer>> + model_memory_limit: string + source: <<MlDataframeAnalyticsSource>> + version: <<VersionString>> +} + +---- + + +[discrete] +[[client.ml.putDatafeed]] +== `client.ml.putDatafeed()` + +Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay`) at each interval. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. + +{ref}/ml-put-datafeed.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlPutDatafeedRequest, options?: TransportRequestOptions) => Promise<MlPutDatafeedResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlPutDatafeedRequest extends <<RequestBase>> { + datafeed_id: <<Id>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_throttled?: boolean + ignore_unavailable?: boolean + aggregations?: Record<string, <<AggregationsAggregationContainer>>> + chunking_config?: <<MlChunkingConfig>> + delayed_data_check_config?: <<MlDelayedDataCheckConfig>> + frequency?: <<Duration>> + indices?: <<Indices>> + pass:[/**] @alias indices */ + indexes?: <<Indices>> + indices_options?: <<IndicesOptions>> + job_id?: <<Id>> + max_empty_searches?: <<integer>> + query?: <<QueryDslQueryContainer>> + query_delay?: <<Duration>> + runtime_mappings?: <<MappingRuntimeFields>> + script_fields?: Record<string, <<ScriptField>>> + scroll_size?: <<integer>> + headers?: <<HttpHeaders>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlPutDatafeedResponse { + aggregations?: Record<string, <<AggregationsAggregationContainer>>> + authorization?: <<MlDatafeedAuthorization>> + chunking_config: <<MlChunkingConfig>> + delayed_data_check_config?: <<MlDelayedDataCheckConfig>> + datafeed_id: <<Id>> + frequency?: <<Duration>> + indices: string[] + job_id: <<Id>> + indices_options?: <<IndicesOptions>> + max_empty_searches?: <<integer>> + query: <<QueryDslQueryContainer>> + query_delay: <<Duration>> + runtime_mappings?: <<MappingRuntimeFields>> + script_fields?: Record<string, <<ScriptField>>> + scroll_size: <<integer>> +} + +---- + + +[discrete] +[[client.ml.putFilter]] +== `client.ml.putFilter()` + +Create a filter. A filter contains a list of strings. It can be used by one or more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. + +{ref}/ml-put-filter.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlPutFilterRequest, options?: TransportRequestOptions) => Promise<MlPutFilterResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlPutFilterRequest extends <<RequestBase>> { + filter_id: <<Id>> + description?: string + items?: string[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlPutFilterResponse { + description: string + filter_id: <<Id>> + items: string[] +} + +---- + + +[discrete] +[[client.ml.putJob]] +== `client.ml.putJob()` + +Create an anomaly detection job. If you include a `datafeed_config`, you must have read index privileges on the source index. + +{ref}/ml-put-job.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlPutJobRequest, options?: TransportRequestOptions) => Promise<MlPutJobResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlPutJobRequest extends <<RequestBase>> { + job_id: <<Id>> + allow_lazy_open?: boolean + analysis_config: <<MlAnalysisConfig>> + analysis_limits?: <<MlAnalysisLimits>> + background_persist_interval?: <<Duration>> + custom_settings?: <<MlCustomSettings>> + daily_model_snapshot_retention_after_days?: <<long>> + data_description: <<MlDataDescription>> + datafeed_config?: <<MlDatafeedConfig>> + description?: string + groups?: string[] + model_plot_config?: <<MlModelPlotConfig>> + model_snapshot_retention_days?: <<long>> + renormalization_window_days?: <<long>> + results_index_name?: <<IndexName>> + results_retention_days?: <<long>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlPutJobResponse { + allow_lazy_open: boolean + analysis_config: <<MlAnalysisConfigRead>> + analysis_limits: <<MlAnalysisLimits>> + background_persist_interval?: <<Duration>> + create_time: <<DateTime>> + custom_settings?: <<MlCustomSettings>> + daily_model_snapshot_retention_after_days: <<long>> + data_description: <<MlDataDescription>> + datafeed_config?: <<MlDatafeed>> + description?: string + groups?: string[] + job_id: <<Id>> + job_type: string + job_version: string + model_plot_config?: <<MlModelPlotConfig>> + model_snapshot_id?: <<Id>> + model_snapshot_retention_days: <<long>> + renormalization_window_days?: <<long>> + results_index_name: string + results_retention_days?: <<long>> +} + +---- + + +[discrete] +[[client.ml.putTrainedModel]] +== `client.ml.putTrainedModel()` + +Create a trained model. Enable you to supply a trained model that is not created by data frame analytics. + +{ref}/put-trained-models.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlPutTrainedModelRequest, options?: TransportRequestOptions) => Promise<MlPutTrainedModelResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlPutTrainedModelRequest extends <<RequestBase>> { + model_id: <<Id>> + defer_definition_decompression?: boolean + wait_for_completion?: boolean + compressed_definition?: string + definition?: MlPutTrainedModelDefinition + description?: string + inference_config?: <<MlInferenceConfigCreateContainer>> + input?: MlPutTrainedModelInput + metadata?: any + model_type?: <<MlTrainedModelType>> + model_size_bytes?: <<long>> + platform_architecture?: string + tags?: string[] + prefix_strings?: <<MlTrainedModelPrefixStrings>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type MlPutTrainedModelResponse = <<MlTrainedModelConfig>> + +---- + + +[discrete] +[[client.ml.putTrainedModelAlias]] +== `client.ml.putTrainedModelAlias()` + +Create or update a trained model alias. A trained model alias is a logical name used to reference a single trained model. You can use aliases instead of trained model identifiers to make it easier to reference your models. For example, you can use aliases in inference aggregations and processors. An alias must be unique and refer to only a single trained model. However, you can have multiple aliases for each trained model. If you use this API to update an alias such that it references a different trained model ID and the model uses a different type of data frame analytics, an error occurs. For example, this situation occurs if you have a trained model for regression analysis and a trained model for classification analysis; you cannot reassign an alias from one type of trained model to another. If you use this API to update an alias and there are very few input fields in common between the old and new trained models for the model alias, the API returns a warning. + +{ref}/put-trained-models-aliases.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlPutTrainedModelAliasRequest, options?: TransportRequestOptions) => Promise<MlPutTrainedModelAliasResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlPutTrainedModelAliasRequest extends <<RequestBase>> { + model_alias: <<Name>> + model_id: <<Id>> + reassign?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type MlPutTrainedModelAliasResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ml.putTrainedModelDefinitionPart]] +== `client.ml.putTrainedModelDefinitionPart()` + +Create part of a trained model definition. + +{ref}/put-trained-model-definition-part.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptions) => Promise<MlPutTrainedModelDefinitionPartResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlPutTrainedModelDefinitionPartRequest extends <<RequestBase>> { + model_id: <<Id>> + part: <<integer>> + definition: string + total_definition_length: <<long>> + total_parts: <<integer>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type MlPutTrainedModelDefinitionPartResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ml.putTrainedModelVocabulary]] +== `client.ml.putTrainedModelVocabulary()` + +Create a trained model vocabulary. This API is supported only for natural language processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. + +{ref}/put-trained-model-vocabulary.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions) => Promise<MlPutTrainedModelVocabularyResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlPutTrainedModelVocabularyRequest extends <<RequestBase>> { + model_id: <<Id>> + vocabulary: string[] + merges?: string[] + scores?: <<double>>[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type MlPutTrainedModelVocabularyResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ml.resetJob]] +== `client.ml.resetJob()` + +Reset an anomaly detection job. All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list. + +{ref}/ml-reset-job.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlResetJobRequest, options?: TransportRequestOptions) => Promise<MlResetJobResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlResetJobRequest extends <<RequestBase>> { + job_id: <<Id>> + wait_for_completion?: boolean + delete_user_annotations?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type MlResetJobResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ml.revertModelSnapshot]] +== `client.ml.revertModelSnapshot()` + +Revert to a snapshot. The machine learning features react quickly to anomalous input, learning new behaviors in data. Highly anomalous input increases the variance in the models whilst the system learns whether this is a new step-change in behavior or a one-off event. In the case where this anomalous input is known to be a one-off, then it might be appropriate to reset the model state to a time before this event. For example, you might consider reverting to a saved snapshot after Black Friday or a critical system failure. + +{ref}/ml-revert-snapshot.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlRevertModelSnapshotRequest, options?: TransportRequestOptions) => Promise<MlRevertModelSnapshotResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlRevertModelSnapshotRequest extends <<RequestBase>> { + job_id: <<Id>> + snapshot_id: <<Id>> + delete_intervening_results?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlRevertModelSnapshotResponse { + model: <<MlModelSnapshot>> +} + +---- + + +[discrete] +[[client.ml.setUpgradeMode]] +== `client.ml.setUpgradeMode()` + +Set upgrade_mode for ML indices. Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. When upgrading your cluster, in some circumstances you must restart your nodes and reindex your machine learning indices. In those circumstances, there must be no machine learning jobs running. You can close the machine learning jobs, do the upgrade, then open all the jobs again. Alternatively, you can use this API to temporarily halt tasks associated with the jobs and datafeeds and prevent new jobs from opening. You can also use this API during upgrades that do not require you to reindex your machine learning indices, though stopping jobs is not a requirement in that case. You can see the current value for the upgrade_mode setting by using the get machine learning info API. + +{ref}/ml-set-upgrade-mode.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlSetUpgradeModeRequest, options?: TransportRequestOptions) => Promise<MlSetUpgradeModeResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlSetUpgradeModeRequest extends <<RequestBase>> { + enabled?: boolean + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type MlSetUpgradeModeResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.ml.startDataFrameAnalytics]] +== `client.ml.startDataFrameAnalytics()` + +Start a data frame analytics job. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. If the destination index does not exist, it is created automatically the first time you start the data frame analytics job. The `index.number_of_shards` and `index.number_of_replicas` settings for the destination index are copied from the source index. If there are multiple source indices, the destination index copies the highest setting values. The mappings for the destination index are also copied from the source indices. If there are any mapping conflicts, the job fails to start. If the destination index exists, it is used as is. You can therefore set up the destination index in advance with custom settings and mappings. + +{ref}/start-dfanalytics.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise<MlStartDataFrameAnalyticsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlStartDataFrameAnalyticsRequest extends <<RequestBase>> { + id: <<Id>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlStartDataFrameAnalyticsResponse { + acknowledged: boolean + node: <<NodeId>> +} + +---- + + +[discrete] +[[client.ml.startDatafeed]] +== `client.ml.startDatafeed()` + +Start datafeeds. A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs. If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. If new data was indexed for that exact millisecond between stopping and starting, it will be ignored. When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or update it had at the time of creation or update and runs the query using those same roles. If you provided secondary authorization headers when you created or updated the datafeed, those credentials are used instead. + +{ref}/ml-start-datafeed.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlStartDatafeedRequest, options?: TransportRequestOptions) => Promise<MlStartDatafeedResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlStartDatafeedRequest extends <<RequestBase>> { + datafeed_id: <<Id>> + end?: <<DateTime>> + start?: <<DateTime>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlStartDatafeedResponse { + node: <<NodeIds>> + started: boolean +} + +---- + + +[discrete] +[[client.ml.startTrainedModelDeployment]] +== `client.ml.startTrainedModelDeployment()` + +Start a trained model deployment. It allocates the model to every machine learning node. + +{ref}/start-trained-model-deployment.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions) => Promise<MlStartTrainedModelDeploymentResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlStartTrainedModelDeploymentRequest extends <<RequestBase>> { + model_id: <<Id>> + cache_size?: <<ByteSize>> + deployment_id?: string + number_of_allocations?: <<integer>> + priority?: <<MlTrainingPriority>> + queue_capacity?: <<integer>> + threads_per_allocation?: <<integer>> + timeout?: <<Duration>> + wait_for?: <<MlDeploymentAllocationState>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlStartTrainedModelDeploymentResponse { + assignment: <<MlTrainedModelAssignment>> +} + +---- + + +[discrete] +[[client.ml.stopDataFrameAnalytics]] +== `client.ml.stopDataFrameAnalytics()` + +Stop data frame analytics jobs. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. + +{ref}/stop-dfanalytics.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise<MlStopDataFrameAnalyticsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlStopDataFrameAnalyticsRequest extends <<RequestBase>> { + id: <<Id>> + allow_no_match?: boolean + force?: boolean + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlStopDataFrameAnalyticsResponse { + stopped: boolean +} + +---- + + +[discrete] +[[client.ml.stopDatafeed]] +== `client.ml.stopDatafeed()` + +Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. + +{ref}/ml-stop-datafeed.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlStopDatafeedRequest, options?: TransportRequestOptions) => Promise<MlStopDatafeedResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlStopDatafeedRequest extends <<RequestBase>> { + datafeed_id: <<Id>> + allow_no_match?: boolean + force?: boolean + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlStopDatafeedResponse { + stopped: boolean +} + +---- + + +[discrete] +[[client.ml.stopTrainedModelDeployment]] +== `client.ml.stopTrainedModelDeployment()` + +Stop a trained model deployment. + +{ref}/stop-trained-model-deployment.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions) => Promise<MlStopTrainedModelDeploymentResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlStopTrainedModelDeploymentRequest extends <<RequestBase>> { + model_id: <<Id>> + allow_no_match?: boolean + force?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlStopTrainedModelDeploymentResponse { + stopped: boolean +} + +---- + + +[discrete] +[[client.ml.updateDataFrameAnalytics]] +== `client.ml.updateDataFrameAnalytics()` + +Update a data frame analytics job. + +{ref}/update-dfanalytics.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions) => Promise<MlUpdateDataFrameAnalyticsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlUpdateDataFrameAnalyticsRequest extends <<RequestBase>> { + id: <<Id>> + description?: string + model_memory_limit?: string + max_num_threads?: <<integer>> + allow_lazy_start?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlUpdateDataFrameAnalyticsResponse { + authorization?: <<MlDataframeAnalyticsAuthorization>> + allow_lazy_start: boolean + analysis: <<MlDataframeAnalysisContainer>> + analyzed_fields?: <<MlDataframeAnalysisAnalyzedFields>> | string[] + create_time: <<long>> + description?: string + dest: <<MlDataframeAnalyticsDestination>> + id: <<Id>> + max_num_threads: <<integer>> + model_memory_limit: string + source: <<MlDataframeAnalyticsSource>> + version: <<VersionString>> +} + +---- + + +[discrete] +[[client.ml.updateDatafeed]] +== `client.ml.updateDatafeed()` + +Update a datafeed. You must stop and start the datafeed for the changes to be applied. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at the time of the update and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. + +{ref}/ml-update-datafeed.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlUpdateDatafeedRequest, options?: TransportRequestOptions) => Promise<MlUpdateDatafeedResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlUpdateDatafeedRequest extends <<RequestBase>> { + datafeed_id: <<Id>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_throttled?: boolean + ignore_unavailable?: boolean + aggregations?: Record<string, <<AggregationsAggregationContainer>>> + chunking_config?: <<MlChunkingConfig>> + delayed_data_check_config?: <<MlDelayedDataCheckConfig>> + frequency?: <<Duration>> + indices?: string[] + pass:[/**] @alias indices */ + indexes?: string[] + indices_options?: <<IndicesOptions>> + job_id?: <<Id>> + max_empty_searches?: <<integer>> + query?: <<QueryDslQueryContainer>> + query_delay?: <<Duration>> + runtime_mappings?: <<MappingRuntimeFields>> + script_fields?: Record<string, <<ScriptField>>> + scroll_size?: <<integer>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlUpdateDatafeedResponse { + authorization?: <<MlDatafeedAuthorization>> + aggregations?: Record<string, <<AggregationsAggregationContainer>>> + chunking_config: <<MlChunkingConfig>> + delayed_data_check_config?: <<MlDelayedDataCheckConfig>> + datafeed_id: <<Id>> + frequency?: <<Duration>> + indices: string[] + indices_options?: <<IndicesOptions>> + job_id: <<Id>> + max_empty_searches?: <<integer>> + query: <<QueryDslQueryContainer>> + query_delay: <<Duration>> + runtime_mappings?: <<MappingRuntimeFields>> + script_fields?: Record<string, <<ScriptField>>> + scroll_size: <<integer>> +} + +---- + + +[discrete] +[[client.ml.updateFilter]] +== `client.ml.updateFilter()` + +Update a filter. Updates the description of a filter, adds items, or removes items from the list. + +{ref}/ml-update-filter.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlUpdateFilterRequest, options?: TransportRequestOptions) => Promise<MlUpdateFilterResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlUpdateFilterRequest extends <<RequestBase>> { + filter_id: <<Id>> + add_items?: string[] + description?: string + remove_items?: string[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlUpdateFilterResponse { + description: string + filter_id: <<Id>> + items: string[] +} + +---- + + +[discrete] +[[client.ml.updateJob]] +== `client.ml.updateJob()` + +Update an anomaly detection job. Updates certain properties of an anomaly detection job. + +{ref}/ml-update-job.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlUpdateJobRequest, options?: TransportRequestOptions) => Promise<MlUpdateJobResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlUpdateJobRequest extends <<RequestBase>> { + job_id: <<Id>> + allow_lazy_open?: boolean + analysis_limits?: <<MlAnalysisMemoryLimit>> + background_persist_interval?: <<Duration>> + custom_settings?: Record<string, any> + categorization_filters?: string[] + description?: string + model_plot_config?: <<MlModelPlotConfig>> + model_prune_window?: <<Duration>> + daily_model_snapshot_retention_after_days?: <<long>> + model_snapshot_retention_days?: <<long>> + renormalization_window_days?: <<long>> + results_retention_days?: <<long>> + groups?: string[] + detectors?: <<MlDetector>>[] + per_partition_categorization?: <<MlPerPartitionCategorization>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlUpdateJobResponse { + allow_lazy_open: boolean + analysis_config: <<MlAnalysisConfigRead>> + analysis_limits: <<MlAnalysisLimits>> + background_persist_interval?: <<Duration>> + create_time: <<EpochTime>><<<UnitMillis>>> + finished_time?: <<EpochTime>><<<UnitMillis>>> + custom_settings?: Record<string, string> + daily_model_snapshot_retention_after_days: <<long>> + data_description: <<MlDataDescription>> + datafeed_config?: <<MlDatafeed>> + description?: string + groups?: string[] + job_id: <<Id>> + job_type: string + job_version: <<VersionString>> + model_plot_config?: <<MlModelPlotConfig>> + model_snapshot_id?: <<Id>> + model_snapshot_retention_days: <<long>> + renormalization_window_days?: <<long>> + results_index_name: <<IndexName>> + results_retention_days?: <<long>> +} + +---- + + +[discrete] +[[client.ml.updateModelSnapshot]] +== `client.ml.updateModelSnapshot()` + +Update a snapshot. Updates certain properties of a snapshot. + +{ref}/ml-update-snapshot.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlUpdateModelSnapshotRequest, options?: TransportRequestOptions) => Promise<MlUpdateModelSnapshotResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlUpdateModelSnapshotRequest extends <<RequestBase>> { + job_id: <<Id>> + snapshot_id: <<Id>> + description?: string + retain?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlUpdateModelSnapshotResponse { + acknowledged: boolean + model: <<MlModelSnapshot>> +} + +---- + + +[discrete] +[[client.ml.updateTrainedModelDeployment]] +== `client.ml.updateTrainedModelDeployment()` + +Update a trained model deployment. + +{ref}/update-trained-model-deployment.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptions) => Promise<MlUpdateTrainedModelDeploymentResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlUpdateTrainedModelDeploymentRequest extends <<RequestBase>> { + model_id: <<Id>> + number_of_allocations?: <<integer>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlUpdateTrainedModelDeploymentResponse { + assignment: <<MlTrainedModelAssignment>> +} + +---- + + +[discrete] +[[client.ml.upgradeJobSnapshot]] +== `client.ml.upgradeJobSnapshot()` + +Upgrade a snapshot. Upgrades an anomaly detection model snapshot to the latest major version. Over time, older snapshot formats are deprecated and removed. Anomaly detection jobs support only snapshots that are from the current or previous major version. This API provides a means to upgrade a snapshot to the current major version. This aids in preparing the cluster for an upgrade to the next major version. Only one snapshot per anomaly detection job can be upgraded at a time and the upgraded snapshot cannot be the current snapshot of the anomaly detection job. + +{ref}/ml-upgrade-job-model-snapshot.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions) => Promise<MlUpgradeJobSnapshotResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MlUpgradeJobSnapshotRequest extends <<RequestBase>> { + job_id: <<Id>> + snapshot_id: <<Id>> + wait_for_completion?: boolean + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MlUpgradeJobSnapshotResponse { + node: <<NodeId>> + completed: boolean +} + +---- + + diff --git a/docs/reference/monitoring.asciidoc b/docs/reference/monitoring.asciidoc new file mode 100644 index 000000000..1fa760822 --- /dev/null +++ b/docs/reference/monitoring.asciidoc @@ -0,0 +1,82 @@ +[[reference-monitoring]] +== client.monitoring + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.monitoring.bulk]] +== `client.monitoring.bulk()` + +Used by the monitoring features to send monitoring data. + +{ref}/monitor-elasticsearch-cluster.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MonitoringBulkRequest, options?: TransportRequestOptions) => Promise<MonitoringBulkResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MonitoringBulkRequest<TDocument = unknown, TPartialDocument = unknown> extends <<RequestBase>> { + type?: string + system_id: string + system_api_version: string + interval: <<Duration>> + operations?: (<<BulkOperationContainer>> | <<BulkUpdateAction>><TDocument, TPartialDocument> | TDocument)[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MonitoringBulkResponse { + error?: <<ErrorCause>> + errors: boolean + ignored: boolean + took: <<long>> +} + +---- + + diff --git a/docs/reference/msearch.asciidoc b/docs/reference/msearch.asciidoc new file mode 100644 index 000000000..027fb192e --- /dev/null +++ b/docs/reference/msearch.asciidoc @@ -0,0 +1,87 @@ +[[reference-msearch]] +== client.msearch + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.msearch]] +== `client.msearch()` + +Run multiple searches. The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format. The structure is as follows: ``` header\n body\n header\n body\n ``` This structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node. IMPORTANT: The final line of data must end with a newline character `\n`. Each newline character may be preceded by a carriage return `\r`. When sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. + +{ref}/search-multi-search.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MsearchRequest, options?: TransportRequestOptions) => Promise<MsearchResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MsearchRequest extends <<RequestBase>> { + index?: <<Indices>> + allow_no_indices?: boolean + ccs_minimize_roundtrips?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_throttled?: boolean + ignore_unavailable?: boolean + include_named_queries_score?: boolean + max_concurrent_searches?: <<long>> + max_concurrent_shard_requests?: <<long>> + pre_filter_shard_size?: <<long>> + rest_total_hits_as_int?: boolean + routing?: <<Routing>> + search_type?: <<SearchType>> + typed_keys?: boolean + searches?: <<MsearchRequestItem>>[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type MsearchResponse<TDocument = unknown, TAggregations = Record<<<AggregateName>>, <<AggregationsAggregate>>>> = <<MsearchMultiSearchResult>><TDocument, TAggregations> + +---- + + diff --git a/docs/reference/msearch_template.asciidoc b/docs/reference/msearch_template.asciidoc new file mode 100644 index 000000000..f6e06b63f --- /dev/null +++ b/docs/reference/msearch_template.asciidoc @@ -0,0 +1,79 @@ +[[reference-msearch_template]] +== client.msearchTemplate + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.msearchTemplate]] +== `client.msearchTemplate()` + +Run multiple templated searches. + +{ref}/search-multi-search.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MsearchTemplateRequest, options?: TransportRequestOptions) => Promise<MsearchTemplateResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MsearchTemplateRequest extends <<RequestBase>> { + index?: <<Indices>> + ccs_minimize_roundtrips?: boolean + max_concurrent_searches?: <<long>> + search_type?: <<SearchType>> + rest_total_hits_as_int?: boolean + typed_keys?: boolean + search_templates?: <<MsearchTemplateRequestItem>>[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type MsearchTemplateResponse<TDocument = unknown, TAggregations = Record<<<AggregateName>>, <<AggregationsAggregate>>>> = <<MsearchMultiSearchResult>><TDocument, TAggregations> + +---- + + diff --git a/docs/reference/mtermvectors.asciidoc b/docs/reference/mtermvectors.asciidoc new file mode 100644 index 000000000..b24113821 --- /dev/null +++ b/docs/reference/mtermvectors.asciidoc @@ -0,0 +1,88 @@ +[[reference-mtermvectors]] +== client.mtermvectors + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.mtermvectors]] +== `client.mtermvectors()` + +Get multiple term vectors. You can specify existing documents by index and ID or provide artificial documents in the body of the request. You can specify the index in the request body or request URI. The response contains a `docs` array with all the fetched termvectors. Each element has the structure provided by the termvectors API. + +{ref}/docs-multi-termvectors.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: MtermvectorsRequest, options?: TransportRequestOptions) => Promise<MtermvectorsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface MtermvectorsRequest extends <<RequestBase>> { + index?: <<IndexName>> + fields?: <<Fields>> + field_statistics?: boolean + offsets?: boolean + payloads?: boolean + positions?: boolean + preference?: string + realtime?: boolean + routing?: <<Routing>> + term_statistics?: boolean + version?: <<VersionNumber>> + version_type?: <<VersionType>> + docs?: <<MtermvectorsOperation>>[] + ids?: <<Id>>[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface MtermvectorsResponse { + docs: <<MtermvectorsTermVectorsResult>>[] +} + +---- + + diff --git a/docs/reference/nodes.asciidoc b/docs/reference/nodes.asciidoc new file mode 100644 index 000000000..e9fc7cebe --- /dev/null +++ b/docs/reference/nodes.asciidoc @@ -0,0 +1,324 @@ +[[reference-nodes]] +== client.nodes + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.nodes.clearRepositoriesMeteringArchive]] +== `client.nodes.clearRepositoriesMeteringArchive()` + +You can use this API to clear the archived repositories metering information in the cluster. + +{ref}/clear-repositories-metering-archive-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptions) => Promise<NodesClearRepositoriesMeteringArchiveResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface NodesClearRepositoriesMeteringArchiveRequest extends <<RequestBase>> { + node_id: <<NodeIds>> + max_archive_version: <<long>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type NodesClearRepositoriesMeteringArchiveResponse = NodesClearRepositoriesMeteringArchiveResponseBase + +---- + + +[discrete] +[[client.nodes.getRepositoriesMeteringInfo]] +== `client.nodes.getRepositoriesMeteringInfo()` + +You can use the cluster repositories metering API to retrieve repositories metering information in a cluster. This API exposes monotonically non-decreasing counters and it’s expected that clients would durably store the information needed to compute aggregations over a period of time. Additionally, the information exposed by this API is volatile, meaning that it won’t be present after node restarts. + +{ref}/get-repositories-metering-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptions) => Promise<NodesGetRepositoriesMeteringInfoResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface NodesGetRepositoriesMeteringInfoRequest extends <<RequestBase>> { + node_id: <<NodeIds>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type NodesGetRepositoriesMeteringInfoResponse = NodesGetRepositoriesMeteringInfoResponseBase + +---- + + +[discrete] +[[client.nodes.hotThreads]] +== `client.nodes.hotThreads()` + +This API yields a breakdown of the hot threads on each selected node in the cluster. The output is plain text with a breakdown of each node’s top hot threads. + +{ref}/cluster-nodes-hot-threads.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: NodesHotThreadsRequest, options?: TransportRequestOptions) => Promise<NodesHotThreadsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface NodesHotThreadsRequest extends <<RequestBase>> { + node_id?: <<NodeIds>> + ignore_idle_threads?: boolean + interval?: <<Duration>> + snapshots?: <<long>> + master_timeout?: <<Duration>> + threads?: <<long>> + timeout?: <<Duration>> + type?: <<ThreadType>> + sort?: <<ThreadType>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface NodesHotThreadsResponse {} + +---- + + +[discrete] +[[client.nodes.info]] +== `client.nodes.info()` + +Returns cluster nodes information. + +{ref}/cluster-nodes-info.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: NodesInfoRequest, options?: TransportRequestOptions) => Promise<NodesInfoResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface NodesInfoRequest extends <<RequestBase>> { + node_id?: <<NodeIds>> + metric?: <<Metrics>> + flat_settings?: boolean + master_timeout?: <<Duration>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type NodesInfoResponse = NodesInfoResponseBase + +---- + + +[discrete] +[[client.nodes.reloadSecureSettings]] +== `client.nodes.reloadSecureSettings()` + +Reloads the keystore on nodes in the cluster. + +{ref}/secure-settings.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: NodesReloadSecureSettingsRequest, options?: TransportRequestOptions) => Promise<NodesReloadSecureSettingsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface NodesReloadSecureSettingsRequest extends <<RequestBase>> { + node_id?: <<NodeIds>> + timeout?: <<Duration>> + secure_settings_password?: <<Password>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type NodesReloadSecureSettingsResponse = NodesReloadSecureSettingsResponseBase + +---- + + +[discrete] +[[client.nodes.stats]] +== `client.nodes.stats()` + +Returns cluster nodes statistics. + +{ref}/cluster-nodes-stats.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: NodesStatsRequest, options?: TransportRequestOptions) => Promise<NodesStatsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface NodesStatsRequest extends <<RequestBase>> { + node_id?: <<NodeIds>> + metric?: <<Metrics>> + index_metric?: <<Metrics>> + completion_fields?: <<Fields>> + fielddata_fields?: <<Fields>> + fields?: <<Fields>> + groups?: boolean + include_segment_file_sizes?: boolean + level?: <<Level>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> + types?: string[] + include_unloaded_segments?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type NodesStatsResponse = NodesStatsResponseBase + +---- + + +[discrete] +[[client.nodes.usage]] +== `client.nodes.usage()` + +Returns information on the usage of features. + +{ref}/cluster-nodes-usage.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: NodesUsageRequest, options?: TransportRequestOptions) => Promise<NodesUsageResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface NodesUsageRequest extends <<RequestBase>> { + node_id?: <<NodeIds>> + metric?: <<Metrics>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type NodesUsageResponse = NodesUsageResponseBase + +---- + + diff --git a/docs/reference/open_point_in_time.asciidoc b/docs/reference/open_point_in_time.asciidoc new file mode 100644 index 000000000..ca9325fd8 --- /dev/null +++ b/docs/reference/open_point_in_time.asciidoc @@ -0,0 +1,83 @@ +[[reference-open_point_in_time]] +== client.openPointInTime + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.openPointInTime]] +== `client.openPointInTime()` + +Open a point in time. A search request by default runs against the most recent visible data of the target indices, which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple search requests using the same point in time. For example, if refreshes happen between `search_after` requests, then the results of those requests might not be consistent as changes happening between searches are only visible to the more recent point in time. A point in time must be opened explicitly before being used in search requests. The `keep_alive` parameter tells Elasticsearch how long it should persist. + +{ref}/point-in-time-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: OpenPointInTimeRequest, options?: TransportRequestOptions) => Promise<OpenPointInTimeResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface OpenPointInTimeRequest extends <<RequestBase>> { + index: <<Indices>> + keep_alive: <<Duration>> + ignore_unavailable?: boolean + preference?: string + routing?: <<Routing>> + expand_wildcards?: <<ExpandWildcards>> + allow_partial_search_results?: boolean + index_filter?: <<QueryDslQueryContainer>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface OpenPointInTimeResponse { + _shards: <<ShardStatistics>> + id: <<Id>> +} + +---- + + diff --git a/docs/reference/ping.asciidoc b/docs/reference/ping.asciidoc new file mode 100644 index 000000000..229c65d2e --- /dev/null +++ b/docs/reference/ping.asciidoc @@ -0,0 +1,71 @@ +[[reference-ping]] +== client.ping + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.ping]] +== `client.ping()` + +Ping the cluster. Returns whether the cluster is running. + +{ref}/index.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: PingRequest, options?: TransportRequestOptions) => Promise<PingResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface PingRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type PingResponse = boolean + +---- + + diff --git a/docs/reference/put_script.asciidoc b/docs/reference/put_script.asciidoc new file mode 100644 index 000000000..90948ad15 --- /dev/null +++ b/docs/reference/put_script.asciidoc @@ -0,0 +1,77 @@ +[[reference-put_script]] +== client.putScript + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.putScript]] +== `client.putScript()` + +Create or update a script or search template. Creates or updates a stored script or search template. + +{ref}/modules-scripting.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: PutScriptRequest, options?: TransportRequestOptions) => Promise<PutScriptResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface PutScriptRequest extends <<RequestBase>> { + id: <<Id>> + context?: <<Name>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> + script: <<StoredScript>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type PutScriptResponse = <<AcknowledgedResponseBase>> + +---- + + diff --git a/docs/reference/query_rules.asciidoc b/docs/reference/query_rules.asciidoc new file mode 100644 index 000000000..74810f338 --- /dev/null +++ b/docs/reference/query_rules.asciidoc @@ -0,0 +1,352 @@ +[[reference-query_rules]] +== client.queryRules + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.queryRules.deleteRule]] +== `client.queryRules.deleteRule()` + +Delete a query rule. Delete a query rule within a query ruleset. + +{ref}/delete-query-rule.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: QueryRulesDeleteRuleRequest, options?: TransportRequestOptions) => Promise<QueryRulesDeleteRuleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface QueryRulesDeleteRuleRequest extends <<RequestBase>> { + ruleset_id: <<Id>> + rule_id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type QueryRulesDeleteRuleResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.queryRules.deleteRuleset]] +== `client.queryRules.deleteRuleset()` + +Delete a query ruleset. + +{ref}/delete-query-ruleset.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: QueryRulesDeleteRulesetRequest, options?: TransportRequestOptions) => Promise<QueryRulesDeleteRulesetResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface QueryRulesDeleteRulesetRequest extends <<RequestBase>> { + ruleset_id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type QueryRulesDeleteRulesetResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.queryRules.getRule]] +== `client.queryRules.getRule()` + +Get a query rule. Get details about a query rule within a query ruleset. + +{ref}/get-query-rule.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: QueryRulesGetRuleRequest, options?: TransportRequestOptions) => Promise<QueryRulesGetRuleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface QueryRulesGetRuleRequest extends <<RequestBase>> { + ruleset_id: <<Id>> + rule_id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type QueryRulesGetRuleResponse = <<QueryRulesQueryRule>> + +---- + + +[discrete] +[[client.queryRules.getRuleset]] +== `client.queryRules.getRuleset()` + +Get a query ruleset. Get details about a query ruleset. + +{ref}/get-query-ruleset.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: QueryRulesGetRulesetRequest, options?: TransportRequestOptions) => Promise<QueryRulesGetRulesetResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface QueryRulesGetRulesetRequest extends <<RequestBase>> { + ruleset_id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type QueryRulesGetRulesetResponse = <<QueryRulesQueryRuleset>> + +---- + + +[discrete] +[[client.queryRules.listRulesets]] +== `client.queryRules.listRulesets()` + +Get all query rulesets. Get summarized information about the query rulesets. + +{ref}/list-query-rulesets.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: QueryRulesListRulesetsRequest, options?: TransportRequestOptions) => Promise<QueryRulesListRulesetsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface QueryRulesListRulesetsRequest extends <<RequestBase>> { + from?: <<integer>> + size?: <<integer>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface QueryRulesListRulesetsResponse { + count: <<long>> + results: QueryRulesListRulesetsQueryRulesetListItem[] +} + +---- + + +[discrete] +[[client.queryRules.putRule]] +== `client.queryRules.putRule()` + +Create or update a query rule. Create or update a query rule within a query ruleset. + +{ref}/put-query-rule.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: QueryRulesPutRuleRequest, options?: TransportRequestOptions) => Promise<QueryRulesPutRuleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface QueryRulesPutRuleRequest extends <<RequestBase>> { + ruleset_id: <<Id>> + rule_id: <<Id>> + type: <<QueryRulesQueryRuleType>> + criteria: <<QueryRulesQueryRuleCriteria>> | <<QueryRulesQueryRuleCriteria>>[] + actions: <<QueryRulesQueryRuleActions>> + priority?: <<integer>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface QueryRulesPutRuleResponse { + result: <<Result>> +} + +---- + + +[discrete] +[[client.queryRules.putRuleset]] +== `client.queryRules.putRuleset()` + +Create or update a query ruleset. + +{ref}/put-query-ruleset.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: QueryRulesPutRulesetRequest, options?: TransportRequestOptions) => Promise<QueryRulesPutRulesetResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface QueryRulesPutRulesetRequest extends <<RequestBase>> { + ruleset_id: <<Id>> + rules: <<QueryRulesQueryRule>> | <<QueryRulesQueryRule>>[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface QueryRulesPutRulesetResponse { + result: <<Result>> +} + +---- + + +[discrete] +[[client.queryRules.test]] +== `client.queryRules.test()` + +Test a query ruleset. Evaluate match criteria against a query ruleset to identify the rules that would match that criteria. + +{ref}/test-query-ruleset.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: QueryRulesTestRequest, options?: TransportRequestOptions) => Promise<QueryRulesTestResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface QueryRulesTestRequest extends <<RequestBase>> { + ruleset_id: <<Id>> + match_criteria: Record<string, any> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface QueryRulesTestResponse { + total_matched_rules: <<integer>> + matched_rules: QueryRulesTestQueryRulesetMatchedRule[] +} + +---- + + diff --git a/docs/reference/rank_eval.asciidoc b/docs/reference/rank_eval.asciidoc new file mode 100644 index 000000000..4e7afde21 --- /dev/null +++ b/docs/reference/rank_eval.asciidoc @@ -0,0 +1,83 @@ +[[reference-rank_eval]] +== client.rankEval + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.rankEval]] +== `client.rankEval()` + +Evaluate ranked search results. Evaluate the quality of ranked search results over a set of typical search queries. + +{ref}/search-rank-eval.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: RankEvalRequest, options?: TransportRequestOptions) => Promise<RankEvalResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface RankEvalRequest extends <<RequestBase>> { + index?: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_unavailable?: boolean + search_type?: string + requests: <<RankEvalRankEvalRequestItem>>[] + metric?: <<RankEvalRankEvalMetric>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface RankEvalResponse { + metric_score: <<double>> + details: Record<<<Id>>, <<RankEvalRankEvalMetricDetail>>> + failures: Record<string, any> +} + +---- + + diff --git a/docs/reference/reindex.asciidoc b/docs/reference/reindex.asciidoc new file mode 100644 index 000000000..5778daa78 --- /dev/null +++ b/docs/reference/reindex.asciidoc @@ -0,0 +1,103 @@ +[[reference-reindex]] +== client.reindex + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.reindex]] +== `client.reindex()` + +Reindex documents. Copies documents from a source to a destination. The source can be any existing index, alias, or data stream. The destination must differ from the source. For example, you cannot reindex a data stream into itself. + +{ref}/docs-reindex.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ReindexRequest, options?: TransportRequestOptions) => Promise<ReindexResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ReindexRequest extends <<RequestBase>> { + refresh?: boolean + requests_per_second?: <<float>> + scroll?: <<Duration>> + slices?: <<Slices>> + timeout?: <<Duration>> + wait_for_active_shards?: <<WaitForActiveShards>> + wait_for_completion?: boolean + require_alias?: boolean + conflicts?: <<Conflicts>> + dest: <<ReindexDestination>> + max_docs?: <<long>> + script?: <<Script>> | string + size?: <<long>> + source: <<ReindexSource>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ReindexResponse { + batches?: <<long>> + created?: <<long>> + deleted?: <<long>> + failures?: <<BulkIndexByScrollFailure>>[] + noops?: <<long>> + retries?: <<Retries>> + requests_per_second?: <<float>> + slice_id?: <<integer>> + task?: <<TaskId>> + throttled_millis?: <<EpochTime>><<<UnitMillis>>> + throttled_until_millis?: <<EpochTime>><<<UnitMillis>>> + timed_out?: boolean + took?: <<DurationValue>><<<UnitMillis>>> + total?: <<long>> + updated?: <<long>> + version_conflicts?: <<long>> +} + +---- + + diff --git a/docs/reference/reindex_rethrottle.asciidoc b/docs/reference/reindex_rethrottle.asciidoc new file mode 100644 index 000000000..5273b6bce --- /dev/null +++ b/docs/reference/reindex_rethrottle.asciidoc @@ -0,0 +1,76 @@ +[[reference-reindex_rethrottle]] +== client.reindexRethrottle + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.reindexRethrottle]] +== `client.reindexRethrottle()` + +Throttle a reindex operation. Change the number of requests per second for a particular reindex operation. + +{ref}/docs-reindex.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ReindexRethrottleRequest, options?: TransportRequestOptions) => Promise<ReindexRethrottleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ReindexRethrottleRequest extends <<RequestBase>> { + task_id: <<Id>> + requests_per_second?: <<float>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ReindexRethrottleResponse { + nodes: Record<string, <<ReindexRethrottleReindexNode>>> +} + +---- + + diff --git a/docs/reference/render_search_template.asciidoc b/docs/reference/render_search_template.asciidoc new file mode 100644 index 000000000..ecf898676 --- /dev/null +++ b/docs/reference/render_search_template.asciidoc @@ -0,0 +1,78 @@ +[[reference-render_search_template]] +== client.renderSearchTemplate + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.renderSearchTemplate]] +== `client.renderSearchTemplate()` + +Render a search template. Render a search template as a search request body. + +{ref}/render-search-template-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: RenderSearchTemplateRequest, options?: TransportRequestOptions) => Promise<RenderSearchTemplateResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface RenderSearchTemplateRequest extends <<RequestBase>> { + id?: <<Id>> + file?: string + params?: Record<string, any> + source?: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface RenderSearchTemplateResponse { + template_output: Record<string, any> +} + +---- + + diff --git a/docs/reference/rollup.asciidoc b/docs/reference/rollup.asciidoc new file mode 100644 index 000000000..a7dee9e94 --- /dev/null +++ b/docs/reference/rollup.asciidoc @@ -0,0 +1,365 @@ +[[reference-rollup]] +== client.rollup + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.rollup.deleteJob]] +== `client.rollup.deleteJob()` + +Deletes an existing rollup job. + +{ref}/rollup-delete-job.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: RollupDeleteJobRequest, options?: TransportRequestOptions) => Promise<RollupDeleteJobResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface RollupDeleteJobRequest extends <<RequestBase>> { + id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface RollupDeleteJobResponse { + acknowledged: boolean + task_failures?: <<TaskFailure>>[] +} + +---- + + +[discrete] +[[client.rollup.getJobs]] +== `client.rollup.getJobs()` + +Retrieves the configuration, stats, and status of rollup jobs. + +{ref}/rollup-get-job.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: RollupGetJobsRequest, options?: TransportRequestOptions) => Promise<RollupGetJobsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface RollupGetJobsRequest extends <<RequestBase>> { + id?: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface RollupGetJobsResponse { + jobs: RollupGetJobsRollupJob[] +} + +---- + + +[discrete] +[[client.rollup.getRollupCaps]] +== `client.rollup.getRollupCaps()` + +Returns the capabilities of any rollup jobs that have been configured for a specific index or index pattern. + +{ref}/rollup-get-rollup-caps.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: RollupGetRollupCapsRequest, options?: TransportRequestOptions) => Promise<RollupGetRollupCapsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface RollupGetRollupCapsRequest extends <<RequestBase>> { + id?: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type RollupGetRollupCapsResponse = Record<<<IndexName>>, RollupGetRollupCapsRollupCapabilities> + +---- + + +[discrete] +[[client.rollup.getRollupIndexCaps]] +== `client.rollup.getRollupIndexCaps()` + +Returns the rollup capabilities of all jobs inside of a rollup index (for example, the index where rollup data is stored). + +{ref}/rollup-get-rollup-index-caps.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions) => Promise<RollupGetRollupIndexCapsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface RollupGetRollupIndexCapsRequest extends <<RequestBase>> { + index: <<Ids>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type RollupGetRollupIndexCapsResponse = Record<<<IndexName>>, RollupGetRollupIndexCapsIndexCapabilities> + +---- + + +[discrete] +[[client.rollup.putJob]] +== `client.rollup.putJob()` + +Creates a rollup job. + +{ref}/rollup-put-job.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: RollupPutJobRequest, options?: TransportRequestOptions) => Promise<RollupPutJobResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface RollupPutJobRequest extends <<RequestBase>> { + id: <<Id>> + cron: string + groups: <<RollupGroupings>> + index_pattern: string + metrics?: <<RollupFieldMetric>>[] + page_size: <<integer>> + rollup_index: <<IndexName>> + timeout?: <<Duration>> + headers?: <<HttpHeaders>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type RollupPutJobResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.rollup.rollupSearch]] +== `client.rollup.rollupSearch()` + +Enables searching rolled-up data using the standard Query DSL. + +{ref}/rollup-search.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: RollupRollupSearchRequest, options?: TransportRequestOptions) => Promise<RollupRollupSearchResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface RollupRollupSearchRequest extends <<RequestBase>> { + index: <<Indices>> + rest_total_hits_as_int?: boolean + typed_keys?: boolean + aggregations?: Record<string, <<AggregationsAggregationContainer>>> + pass:[/**] @alias aggregations */ + aggs?: Record<string, <<AggregationsAggregationContainer>>> + query?: <<QueryDslQueryContainer>> + size?: <<integer>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface RollupRollupSearchResponse<TDocument = unknown, TAggregations = Record<<<AggregateName>>, <<AggregationsAggregate>>>> { + took: <<long>> + timed_out: boolean + terminated_early?: boolean + _shards: <<ShardStatistics>> + hits: <<SearchHitsMetadata>><TDocument> + aggregations?: TAggregations +} + +---- + + +[discrete] +[[client.rollup.startJob]] +== `client.rollup.startJob()` + +Starts an existing, stopped rollup job. + +{ref}/rollup-start-job.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: RollupStartJobRequest, options?: TransportRequestOptions) => Promise<RollupStartJobResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface RollupStartJobRequest extends <<RequestBase>> { + id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface RollupStartJobResponse { + started: boolean +} + +---- + + +[discrete] +[[client.rollup.stopJob]] +== `client.rollup.stopJob()` + +Stops an existing, started rollup job. + +{ref}/rollup-stop-job.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: RollupStopJobRequest, options?: TransportRequestOptions) => Promise<RollupStopJobResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface RollupStopJobRequest extends <<RequestBase>> { + id: <<Id>> + timeout?: <<Duration>> + wait_for_completion?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface RollupStopJobResponse { + stopped: boolean +} + +---- + + diff --git a/docs/reference/scripts_painless_execute.asciidoc b/docs/reference/scripts_painless_execute.asciidoc new file mode 100644 index 000000000..fef4cad05 --- /dev/null +++ b/docs/reference/scripts_painless_execute.asciidoc @@ -0,0 +1,77 @@ +[[reference-scripts_painless_execute]] +== client.scriptsPainlessExecute + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.scriptsPainlessExecute]] +== `client.scriptsPainlessExecute()` + +Run a script. Runs a script and returns a result. + +{painless}/painless-execute-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ScriptsPainlessExecuteRequest, options?: TransportRequestOptions) => Promise<ScriptsPainlessExecuteResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ScriptsPainlessExecuteRequest extends <<RequestBase>> { + context?: string + context_setup?: <<ScriptsPainlessExecutePainlessContextSetup>> + script?: <<Script>> | string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ScriptsPainlessExecuteResponse<TResult = unknown> { + result: TResult +} + +---- + + diff --git a/docs/reference/scroll.asciidoc b/docs/reference/scroll.asciidoc new file mode 100644 index 000000000..91051bc1b --- /dev/null +++ b/docs/reference/scroll.asciidoc @@ -0,0 +1,75 @@ +[[reference-scroll]] +== client.scroll + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.scroll]] +== `client.scroll()` + +Run a scrolling search. IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT). The scroll API gets large sets of results from a single scrolling search request. To get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter. The `scroll` parameter indicates how long Elasticsearch should retain the search context for the request. The search response returns a scroll ID in the `_scroll_id` response body parameter. You can then use the scroll ID with the scroll API to retrieve the next batch of results for the request. If the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search. You can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context. IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests. + +{ref}/search-request-body.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ScrollRequest, options?: TransportRequestOptions) => Promise<ScrollResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ScrollRequest extends <<RequestBase>> { + scroll_id?: <<ScrollId>> + rest_total_hits_as_int?: boolean + scroll?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type ScrollResponse<TDocument = unknown, TAggregations = Record<<<AggregateName>>, <<AggregationsAggregate>>>> = <<SearchResponseBody>><TDocument, TAggregations> + +---- + + diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc new file mode 100644 index 000000000..53fd74a9c --- /dev/null +++ b/docs/reference/search.asciidoc @@ -0,0 +1,139 @@ +[[reference-search]] +== client.search + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.search]] +== `client.search()` + +Run a search. Get search hits that match the query defined in the request. You can provide search queries using the `q` query string parameter or the request body. If both are specified, only the query parameter is used. + +{ref}/search-search.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SearchRequest, options?: TransportRequestOptions) => Promise<SearchResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SearchRequest extends <<RequestBase>> { + index?: <<Indices>> + allow_no_indices?: boolean + allow_partial_search_results?: boolean + analyzer?: string + analyze_wildcard?: boolean + batched_reduce_size?: <<long>> + ccs_minimize_roundtrips?: boolean + default_operator?: <<QueryDslOperator>> + df?: string + expand_wildcards?: <<ExpandWildcards>> + ignore_throttled?: boolean + ignore_unavailable?: boolean + include_named_queries_score?: boolean + lenient?: boolean + max_concurrent_shard_requests?: <<long>> + preference?: string + pre_filter_shard_size?: <<long>> + request_cache?: boolean + routing?: <<Routing>> + scroll?: <<Duration>> + search_type?: <<SearchType>> + suggest_field?: <<Field>> + suggest_mode?: <<SuggestMode>> + suggest_size?: <<long>> + suggest_text?: string + typed_keys?: boolean + rest_total_hits_as_int?: boolean + _source_excludes?: <<Fields>> + _source_includes?: <<Fields>> + q?: string + force_synthetic_source?: boolean + aggregations?: Record<string, <<AggregationsAggregationContainer>>> + pass:[/**] @alias aggregations */ + aggs?: Record<string, <<AggregationsAggregationContainer>>> + collapse?: <<SearchFieldCollapse>> + explain?: boolean + ext?: Record<string, any> + from?: <<integer>> + highlight?: <<SearchHighlight>> + track_total_hits?: <<SearchTrackHits>> + indices_boost?: Record<<<IndexName>>, <<double>>>[] + docvalue_fields?: (<<QueryDslFieldAndFormat>> | <<Field>>)[] + knn?: <<KnnSearch>> | <<KnnSearch>>[] + rank?: <<RankContainer>> + min_score?: <<double>> + post_filter?: <<QueryDslQueryContainer>> + profile?: boolean + query?: <<QueryDslQueryContainer>> + rescore?: <<SearchRescore>> | <<SearchRescore>>[] + retriever?: <<RetrieverContainer>> + script_fields?: Record<string, <<ScriptField>>> + search_after?: <<SortResults>> + size?: <<integer>> + slice?: <<SlicedScroll>> + sort?: <<Sort>> + _source?: <<SearchSourceConfig>> + fields?: (<<QueryDslFieldAndFormat>> | <<Field>>)[] + suggest?: <<SearchSuggester>> + terminate_after?: <<long>> + timeout?: string + track_scores?: boolean + version?: boolean + seq_no_primary_term?: boolean + stored_fields?: <<Fields>> + pit?: <<SearchPointInTimeReference>> + runtime_mappings?: <<MappingRuntimeFields>> + stats?: string[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SearchResponse<TDocument = unknown, TAggregations = Record<<<AggregateName>>, <<AggregationsAggregate>>>> = <<SearchResponseBody>><TDocument, TAggregations> + +---- + + diff --git a/docs/reference/search_application.asciidoc b/docs/reference/search_application.asciidoc new file mode 100644 index 000000000..a8ba3995e --- /dev/null +++ b/docs/reference/search_application.asciidoc @@ -0,0 +1,373 @@ +[[reference-search_application]] +== client.searchApplication + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.searchApplication.delete]] +== `client.searchApplication.delete()` + +Delete a search application. Remove a search application and its associated alias. Indices attached to the search application are not removed. + +{ref}/delete-search-application.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SearchApplicationDeleteRequest, options?: TransportRequestOptions) => Promise<SearchApplicationDeleteResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SearchApplicationDeleteRequest extends <<RequestBase>> { + name: <<Name>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SearchApplicationDeleteResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.searchApplication.deleteBehavioralAnalytics]] +== `client.searchApplication.deleteBehavioralAnalytics()` + +Delete a behavioral analytics collection. The associated data stream is also deleted. + +{ref}/delete-analytics-collection.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptions) => Promise<SearchApplicationDeleteBehavioralAnalyticsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SearchApplicationDeleteBehavioralAnalyticsRequest extends <<RequestBase>> { + name: <<Name>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SearchApplicationDeleteBehavioralAnalyticsResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.searchApplication.get]] +== `client.searchApplication.get()` + +Get search application details. + +{ref}/get-search-application.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SearchApplicationGetRequest, options?: TransportRequestOptions) => Promise<SearchApplicationGetResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SearchApplicationGetRequest extends <<RequestBase>> { + name: <<Name>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SearchApplicationGetResponse = <<SearchApplicationSearchApplication>> + +---- + + +[discrete] +[[client.searchApplication.getBehavioralAnalytics]] +== `client.searchApplication.getBehavioralAnalytics()` + +Get behavioral analytics collections. + +{ref}/list-analytics-collection.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptions) => Promise<SearchApplicationGetBehavioralAnalyticsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SearchApplicationGetBehavioralAnalyticsRequest extends <<RequestBase>> { + name?: <<Name>>[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SearchApplicationGetBehavioralAnalyticsResponse = Record<<<Name>>, <<SearchApplicationAnalyticsCollection>>> + +---- + + +[discrete] +[[client.searchApplication.list]] +== `client.searchApplication.list()` + +Returns the existing search applications. + +{ref}/list-search-applications.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SearchApplicationListRequest, options?: TransportRequestOptions) => Promise<SearchApplicationListResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SearchApplicationListRequest extends <<RequestBase>> { + q?: string + from?: <<integer>> + size?: <<integer>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SearchApplicationListResponse { + count: <<long>> + results: <<SearchApplicationSearchApplication>>[] +} + +---- + + +[discrete] +[[client.searchApplication.postBehavioralAnalyticsEvent]] +== `client.searchApplication.postBehavioralAnalyticsEvent()` + +Creates a behavioral analytics event for existing collection. + +http://todo.com/tbd[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptions) => Promise<SearchApplicationPostBehavioralAnalyticsEventResponse> +---- + +[discrete] +[[client.searchApplication.put]] +== `client.searchApplication.put()` + +Create or update a search application. + +{ref}/put-search-application.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SearchApplicationPutRequest, options?: TransportRequestOptions) => Promise<SearchApplicationPutResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SearchApplicationPutRequest extends <<RequestBase>> { + name: <<Name>> + create?: boolean + search_application?: <<SearchApplicationSearchApplicationParameters>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SearchApplicationPutResponse { + result: <<Result>> +} + +---- + + +[discrete] +[[client.searchApplication.putBehavioralAnalytics]] +== `client.searchApplication.putBehavioralAnalytics()` + +Create a behavioral analytics collection. + +{ref}/put-analytics-collection.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptions) => Promise<SearchApplicationPutBehavioralAnalyticsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SearchApplicationPutBehavioralAnalyticsRequest extends <<RequestBase>> { + name: <<Name>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SearchApplicationPutBehavioralAnalyticsResponse = SearchApplicationPutBehavioralAnalyticsAnalyticsAcknowledgeResponseBase + +---- + + +[discrete] +[[client.searchApplication.renderQuery]] +== `client.searchApplication.renderQuery()` + +Renders a query for given search application search parameters + +{ref}/search-application-render-query.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SearchApplicationRenderQueryRequest, options?: TransportRequestOptions) => Promise<SearchApplicationRenderQueryResponse> +---- + +[discrete] +[[client.searchApplication.search]] +== `client.searchApplication.search()` + +Run a search application search. Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. Unspecified template parameters are assigned their default values if applicable. + +{ref}/search-application-search.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SearchApplicationSearchRequest, options?: TransportRequestOptions) => Promise<SearchApplicationSearchResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SearchApplicationSearchRequest extends <<RequestBase>> { + name: <<Name>> + typed_keys?: boolean + params?: Record<string, any> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SearchApplicationSearchResponse<TDocument = unknown, TAggregations = Record<<<AggregateName>>, <<AggregationsAggregate>>>> = <<SearchResponseBody>><TDocument, TAggregations> + +---- + + diff --git a/docs/reference/search_mvt.asciidoc b/docs/reference/search_mvt.asciidoc new file mode 100644 index 000000000..535de71a0 --- /dev/null +++ b/docs/reference/search_mvt.asciidoc @@ -0,0 +1,91 @@ +[[reference-search_mvt]] +== client.searchMvt + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.searchMvt]] +== `client.searchMvt()` + +Search a vector tile. Search a vector tile for geospatial values. + +{ref}/search-vector-tile-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SearchMvtRequest, options?: TransportRequestOptions) => Promise<SearchMvtResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SearchMvtRequest extends <<RequestBase>> { + index: <<Indices>> + field: <<Field>> + zoom: <<SearchMvtZoomLevel>> + x: <<SearchMvtCoordinate>> + y: <<SearchMvtCoordinate>> + aggs?: Record<string, <<AggregationsAggregationContainer>>> + buffer?: <<integer>> + exact_bounds?: boolean + extent?: <<integer>> + fields?: <<Fields>> + grid_agg?: <<SearchMvtGridAggregationType>> + grid_precision?: <<integer>> + grid_type?: <<SearchMvtGridType>> + query?: <<QueryDslQueryContainer>> + runtime_mappings?: <<MappingRuntimeFields>> + size?: <<integer>> + sort?: <<Sort>> + track_total_hits?: <<SearchTrackHits>> + with_labels?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SearchMvtResponse = <<MapboxVectorTiles>> + +---- + + diff --git a/docs/reference/search_shards.asciidoc b/docs/reference/search_shards.asciidoc new file mode 100644 index 000000000..375b4509a --- /dev/null +++ b/docs/reference/search_shards.asciidoc @@ -0,0 +1,83 @@ +[[reference-search_shards]] +== client.searchShards + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.searchShards]] +== `client.searchShards()` + +Get the search shards. Get the indices and shards that a search request would be run against. This information can be useful for working out issues or planning optimizations with routing and shard preferences. When filtered aliases are used, the filter is returned as part of the indices section. + +{ref}/search-shards.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SearchShardsRequest, options?: TransportRequestOptions) => Promise<SearchShardsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SearchShardsRequest extends <<RequestBase>> { + index?: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_unavailable?: boolean + local?: boolean + preference?: string + routing?: <<Routing>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SearchShardsResponse { + nodes: Record<<<NodeId>>, <<SearchShardsSearchShardsNodeAttributes>>> + shards: <<NodeShard>>[][] + indices: Record<<<IndexName>>, <<SearchShardsShardStoreIndex>>> +} + +---- + + diff --git a/docs/reference/search_template.asciidoc b/docs/reference/search_template.asciidoc new file mode 100644 index 000000000..2998d095d --- /dev/null +++ b/docs/reference/search_template.asciidoc @@ -0,0 +1,104 @@ +[[reference-search_template]] +== client.searchTemplate + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.searchTemplate]] +== `client.searchTemplate()` + +Run a search with a search template. + +{ref}/search-template.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SearchTemplateRequest, options?: TransportRequestOptions) => Promise<SearchTemplateResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SearchTemplateRequest extends <<RequestBase>> { + index?: <<Indices>> + allow_no_indices?: boolean + ccs_minimize_roundtrips?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_throttled?: boolean + ignore_unavailable?: boolean + preference?: string + routing?: <<Routing>> + scroll?: <<Duration>> + search_type?: <<SearchType>> + rest_total_hits_as_int?: boolean + typed_keys?: boolean + explain?: boolean + id?: <<Id>> + params?: Record<string, any> + profile?: boolean + source?: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SearchTemplateResponse<TDocument = unknown> { + took: <<long>> + timed_out: boolean + _shards: <<ShardStatistics>> + hits: <<SearchHitsMetadata>><TDocument> + aggregations?: Record<<<AggregateName>>, <<AggregationsAggregate>>> + _clusters?: <<ClusterStatistics>> + fields?: Record<string, any> + max_score?: <<double>> + num_reduce_phases?: <<long>> + profile?: <<SearchProfile>> + pit_id?: <<Id>> + _scroll_id?: <<ScrollId>> + suggest?: Record<<<SuggestionName>>, <<SearchSuggest>><TDocument>[]> + terminated_early?: boolean +} + +---- + + diff --git a/docs/reference/searchable_snapshots.asciidoc b/docs/reference/searchable_snapshots.asciidoc new file mode 100644 index 000000000..773a55334 --- /dev/null +++ b/docs/reference/searchable_snapshots.asciidoc @@ -0,0 +1,206 @@ +[[reference-searchable_snapshots]] +== client.searchableSnapshots + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.searchableSnapshots.cacheStats]] +== `client.searchableSnapshots.cacheStats()` + +Retrieve node-level cache statistics about searchable snapshots. + +{ref}/searchable-snapshots-apis.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptions) => Promise<SearchableSnapshotsCacheStatsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SearchableSnapshotsCacheStatsRequest extends <<RequestBase>> { + node_id?: <<NodeIds>> + master_timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SearchableSnapshotsCacheStatsResponse { + nodes: Record<string, SearchableSnapshotsCacheStatsNode> +} + +---- + + +[discrete] +[[client.searchableSnapshots.clearCache]] +== `client.searchableSnapshots.clearCache()` + +Clear the cache of searchable snapshots. + +{ref}/searchable-snapshots-apis.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions) => Promise<SearchableSnapshotsClearCacheResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SearchableSnapshotsClearCacheRequest extends <<RequestBase>> { + index?: <<Indices>> + expand_wildcards?: <<ExpandWildcards>> + allow_no_indices?: boolean + ignore_unavailable?: boolean + pretty?: boolean + human?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SearchableSnapshotsClearCacheResponse = any + +---- + + +[discrete] +[[client.searchableSnapshots.mount]] +== `client.searchableSnapshots.mount()` + +Mount a snapshot as a searchable index. + +{ref}/searchable-snapshots-api-mount-snapshot.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SearchableSnapshotsMountRequest, options?: TransportRequestOptions) => Promise<SearchableSnapshotsMountResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SearchableSnapshotsMountRequest extends <<RequestBase>> { + repository: <<Name>> + snapshot: <<Name>> + master_timeout?: <<Duration>> + wait_for_completion?: boolean + storage?: string + index: <<IndexName>> + renamed_index?: <<IndexName>> + index_settings?: Record<string, any> + ignore_index_settings?: string[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SearchableSnapshotsMountResponse { + snapshot: SearchableSnapshotsMountMountedSnapshot +} + +---- + + +[discrete] +[[client.searchableSnapshots.stats]] +== `client.searchableSnapshots.stats()` + +Retrieve shard-level statistics about searchable snapshots. + +{ref}/searchable-snapshots-apis.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SearchableSnapshotsStatsRequest, options?: TransportRequestOptions) => Promise<SearchableSnapshotsStatsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SearchableSnapshotsStatsRequest extends <<RequestBase>> { + index?: <<Indices>> + level?: <<SearchableSnapshotsStatsLevel>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SearchableSnapshotsStatsResponse { + stats: any + total: any +} + +---- + + diff --git a/docs/reference/security.asciidoc b/docs/reference/security.asciidoc new file mode 100644 index 000000000..ce49aaba6 --- /dev/null +++ b/docs/reference/security.asciidoc @@ -0,0 +1,2539 @@ +[[reference-security]] +== client.security + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.security.activateUserProfile]] +== `client.security.activateUserProfile()` + +Activate a user profile. Create or update a user profile on behalf of another user. + +{ref}/security-api-activate-user-profile.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityActivateUserProfileRequest, options?: TransportRequestOptions) => Promise<SecurityActivateUserProfileResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityActivateUserProfileRequest extends <<RequestBase>> { + access_token?: string + grant_type: <<SecurityGrantType>> + password?: string + username?: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SecurityActivateUserProfileResponse = <<SecurityUserProfileWithMetadata>> + +---- + + +[discrete] +[[client.security.authenticate]] +== `client.security.authenticate()` + +Authenticate a user. Authenticates a user and returns information about the authenticated user. Include the user information in a [basic auth header](https://en.wikipedia.org/wiki/Basic_access_authentication). A successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user. If the user cannot be authenticated, this API returns a 401 status code. + +{ref}/security-api-authenticate.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityAuthenticateRequest, options?: TransportRequestOptions) => Promise<SecurityAuthenticateResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityAuthenticateRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityAuthenticateResponse { + api_key?: SecurityAuthenticateAuthenticateApiKey + authentication_realm: <<SecurityRealmInfo>> + email?: string | null + full_name?: <<Name>> | null + lookup_realm: <<SecurityRealmInfo>> + metadata: <<Metadata>> + roles: string[] + username: <<Username>> + enabled: boolean + authentication_type: string + token?: SecurityAuthenticateToken +} + +---- + + +[discrete] +[[client.security.bulkDeleteRole]] +== `client.security.bulkDeleteRole()` + +Bulk delete roles. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk delete roles API cannot delete roles that are defined in roles files. + +{ref}/security-api-bulk-delete-role.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityBulkDeleteRoleRequest, options?: TransportRequestOptions) => Promise<SecurityBulkDeleteRoleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityBulkDeleteRoleRequest extends <<RequestBase>> { + refresh?: <<Refresh>> + names: string[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityBulkDeleteRoleResponse { + deleted?: string[] + not_found?: string[] + errors?: <<SecurityBulkError>> +} + +---- + + +[discrete] +[[client.security.bulkPutRole]] +== `client.security.bulkPutRole()` + +Bulk create or update roles. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk create or update roles API cannot update roles that are defined in roles files. + +{ref}/security-api-bulk-put-role.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityBulkPutRoleRequest, options?: TransportRequestOptions) => Promise<SecurityBulkPutRoleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityBulkPutRoleRequest extends <<RequestBase>> { + refresh?: <<Refresh>> + roles: Record<string, <<SecurityRoleDescriptor>>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityBulkPutRoleResponse { + created?: string[] + updated?: string[] + noop?: string[] + errors?: <<SecurityBulkError>> +} + +---- + + +[discrete] +[[client.security.bulkUpdateApiKeys]] +== `client.security.bulkUpdateApiKeys()` + +Updates the attributes of multiple existing API keys. + +{ref}/security-api-bulk-update-api-keys.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptions) => Promise<SecurityBulkUpdateApiKeysResponse> +---- + +[discrete] +[[client.security.changePassword]] +== `client.security.changePassword()` + +Change passwords. Change the passwords of users in the native realm and built-in users. + +{ref}/security-api-change-password.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityChangePasswordRequest, options?: TransportRequestOptions) => Promise<SecurityChangePasswordResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityChangePasswordRequest extends <<RequestBase>> { + username?: <<Username>> + refresh?: <<Refresh>> + password?: <<Password>> + password_hash?: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityChangePasswordResponse {} + +---- + + +[discrete] +[[client.security.clearApiKeyCache]] +== `client.security.clearApiKeyCache()` + +Clear the API key cache. Evict a subset of all entries from the API key cache. The cache is also automatically cleared on state changes of the security index. + +{ref}/security-api-clear-api-key-cache.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions) => Promise<SecurityClearApiKeyCacheResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityClearApiKeyCacheRequest extends <<RequestBase>> { + ids: <<Ids>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityClearApiKeyCacheResponse { + _nodes: <<NodeStatistics>> + cluster_name: <<Name>> + nodes: Record<string, <<SecurityClusterNode>>> +} + +---- + + +[discrete] +[[client.security.clearCachedPrivileges]] +== `client.security.clearCachedPrivileges()` + +Clear the privileges cache. Evict privileges from the native application privilege cache. The cache is also automatically cleared for applications that have their privileges updated. + +{ref}/security-api-clear-privilege-cache.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions) => Promise<SecurityClearCachedPrivilegesResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityClearCachedPrivilegesRequest extends <<RequestBase>> { + application: <<Name>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityClearCachedPrivilegesResponse { + _nodes: <<NodeStatistics>> + cluster_name: <<Name>> + nodes: Record<string, <<SecurityClusterNode>>> +} + +---- + + +[discrete] +[[client.security.clearCachedRealms]] +== `client.security.clearCachedRealms()` + +Clear the user cache. Evict users from the user cache. You can completely clear the cache or evict specific users. + +{ref}/security-api-clear-cache.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityClearCachedRealmsRequest, options?: TransportRequestOptions) => Promise<SecurityClearCachedRealmsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityClearCachedRealmsRequest extends <<RequestBase>> { + realms: <<Names>> + usernames?: string[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityClearCachedRealmsResponse { + _nodes: <<NodeStatistics>> + cluster_name: <<Name>> + nodes: Record<string, <<SecurityClusterNode>>> +} + +---- + + +[discrete] +[[client.security.clearCachedRoles]] +== `client.security.clearCachedRoles()` + +Clear the roles cache. Evict roles from the native role cache. + +{ref}/security-api-clear-role-cache.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityClearCachedRolesRequest, options?: TransportRequestOptions) => Promise<SecurityClearCachedRolesResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityClearCachedRolesRequest extends <<RequestBase>> { + name: <<Names>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityClearCachedRolesResponse { + _nodes: <<NodeStatistics>> + cluster_name: <<Name>> + nodes: Record<string, <<SecurityClusterNode>>> +} + +---- + + +[discrete] +[[client.security.clearCachedServiceTokens]] +== `client.security.clearCachedServiceTokens()` + +Clear service account token caches. Evict a subset of all entries from the service account token caches. + +{ref}/security-api-clear-service-token-caches.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions) => Promise<SecurityClearCachedServiceTokensResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityClearCachedServiceTokensRequest extends <<RequestBase>> { + namespace: <<Namespace>> + service: <<Service>> + name: <<Names>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityClearCachedServiceTokensResponse { + _nodes: <<NodeStatistics>> + cluster_name: <<Name>> + nodes: Record<string, <<SecurityClusterNode>>> +} + +---- + + +[discrete] +[[client.security.createApiKey]] +== `client.security.createApiKey()` + +Create an API key. Create an API key for access without requiring basic authentication. A successful request returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys. + +{ref}/security-api-create-api-key.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityCreateApiKeyRequest, options?: TransportRequestOptions) => Promise<SecurityCreateApiKeyResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityCreateApiKeyRequest extends <<RequestBase>> { + refresh?: <<Refresh>> + expiration?: <<Duration>> + name?: <<Name>> + role_descriptors?: Record<string, <<SecurityRoleDescriptor>>> + metadata?: <<Metadata>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityCreateApiKeyResponse { + api_key: string + expiration?: <<long>> + id: <<Id>> + name: <<Name>> + encoded: string +} + +---- + + +[discrete] +[[client.security.createCrossClusterApiKey]] +== `client.security.createCrossClusterApiKey()` + +Create a cross-cluster API key. Create an API key of the `cross_cluster` type for the API key based remote cluster access. A `cross_cluster` API key cannot be used to authenticate through the REST interface. IMPORTANT: To authenticate this request you must use a credential that is not an API key. Even if you use an API key that has the required privilege, the API returns an error. Cross-cluster API keys are created by the Elasticsearch API key service, which is automatically enabled. NOTE: Unlike REST API keys, a cross-cluster API key does not capture permissions of the authenticated user. The API key’s effective permission is exactly as specified with the `access` property. A successful request returns a JSON structure that contains the API key, its unique ID, and its name. If applicable, it also returns expiration information for the API key in milliseconds. By default, API keys never expire. You can specify expiration information when you create the API keys. Cross-cluster API keys can only be updated with the update cross-cluster API key API. Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error. + +{ref}/security-api-create-cross-cluster-api-key.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptions) => Promise<SecurityCreateCrossClusterApiKeyResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityCreateCrossClusterApiKeyRequest extends <<RequestBase>> { + access: <<SecurityAccess>> + expiration?: <<Duration>> + metadata?: <<Metadata>> + name: <<Name>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityCreateCrossClusterApiKeyResponse { + api_key: string + expiration?: <<DurationValue>><<<UnitMillis>>> + id: <<Id>> + name: <<Name>> + encoded: string +} + +---- + + +[discrete] +[[client.security.createServiceToken]] +== `client.security.createServiceToken()` + +Create a service account token. Create a service accounts token for access without requiring basic authentication. + +{ref}/security-api-create-service-token.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityCreateServiceTokenRequest, options?: TransportRequestOptions) => Promise<SecurityCreateServiceTokenResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityCreateServiceTokenRequest extends <<RequestBase>> { + namespace: <<Namespace>> + service: <<Service>> + name?: <<Name>> + refresh?: <<Refresh>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityCreateServiceTokenResponse { + created: boolean + token: SecurityCreateServiceTokenToken +} + +---- + + +[discrete] +[[client.security.deletePrivileges]] +== `client.security.deletePrivileges()` + +Delete application privileges. + +{ref}/security-api-delete-privilege.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityDeletePrivilegesRequest, options?: TransportRequestOptions) => Promise<SecurityDeletePrivilegesResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityDeletePrivilegesRequest extends <<RequestBase>> { + application: <<Name>> + name: <<Names>> + refresh?: <<Refresh>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SecurityDeletePrivilegesResponse = Record<string, Record<string, SecurityDeletePrivilegesFoundStatus>> + +---- + + +[discrete] +[[client.security.deleteRole]] +== `client.security.deleteRole()` + +Delete roles. Delete roles in the native realm. + +{ref}/security-api-delete-role.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityDeleteRoleRequest, options?: TransportRequestOptions) => Promise<SecurityDeleteRoleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityDeleteRoleRequest extends <<RequestBase>> { + name: <<Name>> + refresh?: <<Refresh>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityDeleteRoleResponse { + found: boolean +} + +---- + + +[discrete] +[[client.security.deleteRoleMapping]] +== `client.security.deleteRoleMapping()` + +Delete role mappings. + +{ref}/security-api-delete-role-mapping.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions) => Promise<SecurityDeleteRoleMappingResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityDeleteRoleMappingRequest extends <<RequestBase>> { + name: <<Name>> + refresh?: <<Refresh>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityDeleteRoleMappingResponse { + found: boolean +} + +---- + + +[discrete] +[[client.security.deleteServiceToken]] +== `client.security.deleteServiceToken()` + +Delete service account tokens. Delete service account tokens for a service in a specified namespace. + +{ref}/security-api-delete-service-token.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions) => Promise<SecurityDeleteServiceTokenResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityDeleteServiceTokenRequest extends <<RequestBase>> { + namespace: <<Namespace>> + service: <<Service>> + name: <<Name>> + refresh?: <<Refresh>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityDeleteServiceTokenResponse { + found: boolean +} + +---- + + +[discrete] +[[client.security.deleteUser]] +== `client.security.deleteUser()` + +Delete users. Delete users from the native realm. + +{ref}/security-api-delete-user.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityDeleteUserRequest, options?: TransportRequestOptions) => Promise<SecurityDeleteUserResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityDeleteUserRequest extends <<RequestBase>> { + username: <<Username>> + refresh?: <<Refresh>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityDeleteUserResponse { + found: boolean +} + +---- + + +[discrete] +[[client.security.disableUser]] +== `client.security.disableUser()` + +Disable users. Disable users in the native realm. + +{ref}/security-api-disable-user.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityDisableUserRequest, options?: TransportRequestOptions) => Promise<SecurityDisableUserResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityDisableUserRequest extends <<RequestBase>> { + username: <<Username>> + refresh?: <<Refresh>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityDisableUserResponse {} + +---- + + +[discrete] +[[client.security.disableUserProfile]] +== `client.security.disableUserProfile()` + +Disable a user profile. Disable user profiles so that they are not visible in user profile searches. + +{ref}/security-api-disable-user-profile.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityDisableUserProfileRequest, options?: TransportRequestOptions) => Promise<SecurityDisableUserProfileResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityDisableUserProfileRequest extends <<RequestBase>> { + uid: <<SecurityUserProfileId>> + refresh?: <<Refresh>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SecurityDisableUserProfileResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.security.enableUser]] +== `client.security.enableUser()` + +Enable users. Enable users in the native realm. + +{ref}/security-api-enable-user.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityEnableUserRequest, options?: TransportRequestOptions) => Promise<SecurityEnableUserResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityEnableUserRequest extends <<RequestBase>> { + username: <<Username>> + refresh?: <<Refresh>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityEnableUserResponse {} + +---- + + +[discrete] +[[client.security.enableUserProfile]] +== `client.security.enableUserProfile()` + +Enable a user profile. Enable user profiles to make them visible in user profile searches. + +{ref}/security-api-enable-user-profile.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityEnableUserProfileRequest, options?: TransportRequestOptions) => Promise<SecurityEnableUserProfileResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityEnableUserProfileRequest extends <<RequestBase>> { + uid: <<SecurityUserProfileId>> + refresh?: <<Refresh>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SecurityEnableUserProfileResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.security.enrollKibana]] +== `client.security.enrollKibana()` + +Enroll Kibana. Enable a Kibana instance to configure itself for communication with a secured Elasticsearch cluster. + +{ref}/security-api-kibana-enrollment.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityEnrollKibanaRequest, options?: TransportRequestOptions) => Promise<SecurityEnrollKibanaResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityEnrollKibanaRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityEnrollKibanaResponse { + token: SecurityEnrollKibanaToken + http_ca: string +} + +---- + + +[discrete] +[[client.security.enrollNode]] +== `client.security.enrollNode()` + +Enroll a node. Enroll a new node to allow it to join an existing cluster with security features enabled. + +{ref}/security-api-node-enrollment.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityEnrollNodeRequest, options?: TransportRequestOptions) => Promise<SecurityEnrollNodeResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityEnrollNodeRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityEnrollNodeResponse { + http_ca_key: string + http_ca_cert: string + transport_ca_cert: string + transport_key: string + transport_cert: string + nodes_addresses: string[] +} + +---- + + +[discrete] +[[client.security.getApiKey]] +== `client.security.getApiKey()` + +Get API key information. Retrieves information for one or more API keys. NOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. + +{ref}/security-api-get-api-key.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityGetApiKeyRequest, options?: TransportRequestOptions) => Promise<SecurityGetApiKeyResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityGetApiKeyRequest extends <<RequestBase>> { + id?: <<Id>> + name?: <<Name>> + owner?: boolean + realm_name?: <<Name>> + username?: <<Username>> + with_limited_by?: boolean + active_only?: boolean + with_profile_uid?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityGetApiKeyResponse { + api_keys: <<SecurityApiKey>>[] +} + +---- + + +[discrete] +[[client.security.getBuiltinPrivileges]] +== `client.security.getBuiltinPrivileges()` + +Get builtin privileges. Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch. + +{ref}/security-api-get-builtin-privileges.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions) => Promise<SecurityGetBuiltinPrivilegesResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityGetBuiltinPrivilegesRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityGetBuiltinPrivilegesResponse { + cluster: <<SecurityClusterPrivilege>>[] + index: <<IndexName>>[] + remote_cluster: <<SecurityRemoteClusterPrivilege>>[] +} + +---- + + +[discrete] +[[client.security.getPrivileges]] +== `client.security.getPrivileges()` + +Get application privileges. + +{ref}/security-api-get-privileges.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityGetPrivilegesRequest, options?: TransportRequestOptions) => Promise<SecurityGetPrivilegesResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityGetPrivilegesRequest extends <<RequestBase>> { + application?: <<Name>> + name?: <<Names>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SecurityGetPrivilegesResponse = Record<string, Record<string, SecurityPutPrivilegesActions>> + +---- + + +[discrete] +[[client.security.getRole]] +== `client.security.getRole()` + +Get roles. Get roles in the native realm. + +{ref}/security-api-get-role.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityGetRoleRequest, options?: TransportRequestOptions) => Promise<SecurityGetRoleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityGetRoleRequest extends <<RequestBase>> { + name?: <<Names>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SecurityGetRoleResponse = Record<string, SecurityGetRoleRole> + +---- + + +[discrete] +[[client.security.getRoleMapping]] +== `client.security.getRoleMapping()` + +Get role mappings. Role mappings define which roles are assigned to each user. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The get role mappings API cannot retrieve role mappings that are defined in role mapping files. + +{ref}/security-api-get-role-mapping.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityGetRoleMappingRequest, options?: TransportRequestOptions) => Promise<SecurityGetRoleMappingResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityGetRoleMappingRequest extends <<RequestBase>> { + name?: <<Names>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SecurityGetRoleMappingResponse = Record<string, <<SecurityRoleMapping>>> + +---- + + +[discrete] +[[client.security.getServiceAccounts]] +== `client.security.getServiceAccounts()` + +Get service accounts. Get a list of service accounts that match the provided path parameters. + +{ref}/security-api-get-service-accounts.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityGetServiceAccountsRequest, options?: TransportRequestOptions) => Promise<SecurityGetServiceAccountsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityGetServiceAccountsRequest extends <<RequestBase>> { + namespace?: <<Namespace>> + service?: <<Service>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SecurityGetServiceAccountsResponse = Record<string, SecurityGetServiceAccountsRoleDescriptorWrapper> + +---- + + +[discrete] +[[client.security.getServiceCredentials]] +== `client.security.getServiceCredentials()` + +Get service account credentials. + +{ref}/security-api-get-service-credentials.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions) => Promise<SecurityGetServiceCredentialsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityGetServiceCredentialsRequest extends <<RequestBase>> { + namespace: <<Namespace>> + service: <<Name>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityGetServiceCredentialsResponse { + service_account: string + count: <<integer>> + tokens: Record<string, <<Metadata>>> + nodes_credentials: SecurityGetServiceCredentialsNodesCredentials +} + +---- + + +[discrete] +[[client.security.getSettings]] +== `client.security.getSettings()` + +Retrieve settings for the security system indices + +{ref}/security-api-get-settings.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityGetSettingsRequest, options?: TransportRequestOptions) => Promise<SecurityGetSettingsResponse> +---- + +[discrete] +[[client.security.getToken]] +== `client.security.getToken()` + +Get a token. Create a bearer token for access without requiring basic authentication. + +{ref}/security-api-get-token.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityGetTokenRequest, options?: TransportRequestOptions) => Promise<SecurityGetTokenResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityGetTokenRequest extends <<RequestBase>> { + grant_type?: SecurityGetTokenAccessTokenGrantType + scope?: string + password?: <<Password>> + kerberos_ticket?: string + refresh_token?: string + username?: <<Username>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityGetTokenResponse { + access_token: string + expires_in: <<long>> + scope?: string + type: string + refresh_token?: string + kerberos_authentication_response_token?: string + authentication: SecurityGetTokenAuthenticatedUser +} + +---- + + +[discrete] +[[client.security.getUser]] +== `client.security.getUser()` + +Get users. Get information about users in the native realm and built-in users. + +{ref}/security-api-get-user.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityGetUserRequest, options?: TransportRequestOptions) => Promise<SecurityGetUserResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityGetUserRequest extends <<RequestBase>> { + username?: <<Username>> | <<Username>>[] + with_profile_uid?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SecurityGetUserResponse = Record<string, <<SecurityUser>>> + +---- + + +[discrete] +[[client.security.getUserPrivileges]] +== `client.security.getUserPrivileges()` + +Get user privileges. + +{ref}/security-api-get-user-privileges.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions) => Promise<SecurityGetUserPrivilegesResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityGetUserPrivilegesRequest extends <<RequestBase>> { + application?: <<Name>> + priviledge?: <<Name>> + username?: <<Name>> | null +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityGetUserPrivilegesResponse { + applications: <<SecurityApplicationPrivileges>>[] + cluster: string[] + global: <<SecurityGlobalPrivilege>>[] + indices: <<SecurityUserIndicesPrivileges>>[] + run_as: string[] +} + +---- + + +[discrete] +[[client.security.getUserProfile]] +== `client.security.getUserProfile()` + +Get a user profile. Get a user's profile using the unique profile ID. + +{ref}/security-api-get-user-profile.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityGetUserProfileRequest, options?: TransportRequestOptions) => Promise<SecurityGetUserProfileResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityGetUserProfileRequest extends <<RequestBase>> { + uid: <<SecurityUserProfileId>> | <<SecurityUserProfileId>>[] + data?: string | string[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityGetUserProfileResponse { + profiles: <<SecurityUserProfileWithMetadata>>[] + errors?: SecurityGetUserProfileGetUserProfileErrors +} + +---- + + +[discrete] +[[client.security.grantApiKey]] +== `client.security.grantApiKey()` + +Grant an API key. Create an API key on behalf of another user. This API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API. The caller must have authentication credentials (either an access token, or a username and password) for the user on whose behalf the API key will be created. It is not possible to use this API to create an API key without that user’s credentials. The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. In this case, the API key will be created on behalf of the impersonated user. This API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf. A successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. By default, API keys never expire. You can specify expiration information when you create the API keys. + +{ref}/security-api-grant-api-key.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityGrantApiKeyRequest, options?: TransportRequestOptions) => Promise<SecurityGrantApiKeyResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityGrantApiKeyRequest extends <<RequestBase>> { + api_key: SecurityGrantApiKeyGrantApiKey + grant_type: SecurityGrantApiKeyApiKeyGrantType + access_token?: string + username?: <<Username>> + password?: <<Password>> + run_as?: <<Username>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityGrantApiKeyResponse { + api_key: string + id: <<Id>> + name: <<Name>> + expiration?: <<EpochTime>><<<UnitMillis>>> + encoded: string +} + +---- + + +[discrete] +[[client.security.hasPrivileges]] +== `client.security.hasPrivileges()` + +Check user privileges. Determine whether the specified user has a specified list of privileges. + +{ref}/security-api-has-privileges.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityHasPrivilegesRequest, options?: TransportRequestOptions) => Promise<SecurityHasPrivilegesResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityHasPrivilegesRequest extends <<RequestBase>> { + user?: <<Name>> + application?: SecurityHasPrivilegesApplicationPrivilegesCheck[] + cluster?: <<SecurityClusterPrivilege>>[] + index?: SecurityHasPrivilegesIndexPrivilegesCheck[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityHasPrivilegesResponse { + application: SecurityHasPrivilegesApplicationsPrivileges + cluster: Record<string, boolean> + has_all_requested: boolean + index: Record<<<IndexName>>, SecurityHasPrivilegesPrivileges> + username: <<Username>> +} + +---- + + +[discrete] +[[client.security.hasPrivilegesUserProfile]] +== `client.security.hasPrivilegesUserProfile()` + +Check user profile privileges. Determine whether the users associated with the specified user profile IDs have all the requested privileges. + +{ref}/security-api-has-privileges-user-profile.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptions) => Promise<SecurityHasPrivilegesUserProfileResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityHasPrivilegesUserProfileRequest extends <<RequestBase>> { + uids: <<SecurityUserProfileId>>[] + privileges: SecurityHasPrivilegesUserProfilePrivilegesCheck +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityHasPrivilegesUserProfileResponse { + has_privilege_uids: <<SecurityUserProfileId>>[] + errors?: SecurityHasPrivilegesUserProfileHasPrivilegesUserProfileErrors +} + +---- + + +[discrete] +[[client.security.invalidateApiKey]] +== `client.security.invalidateApiKey()` + +Invalidate API keys. This API invalidates API keys created by the create API key or grant API key APIs. Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted. The `manage_api_key` privilege allows deleting any API keys. The `manage_own_api_key` only allows deleting API keys that are owned by the user. In addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats: - Set the parameter `owner=true`. - Or, set both `username` and `realm_name` to match the user’s identity. - Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field. + +{ref}/security-api-invalidate-api-key.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions) => Promise<SecurityInvalidateApiKeyResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityInvalidateApiKeyRequest extends <<RequestBase>> { + id?: <<Id>> + ids?: <<Id>>[] + name?: <<Name>> + owner?: boolean + realm_name?: string + username?: <<Username>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityInvalidateApiKeyResponse { + error_count: <<integer>> + error_details?: <<ErrorCause>>[] + invalidated_api_keys: string[] + previously_invalidated_api_keys: string[] +} + +---- + + +[discrete] +[[client.security.invalidateToken]] +== `client.security.invalidateToken()` + +Invalidate a token. The access tokens returned by the get token API have a finite period of time for which they are valid. After that time period, they can no longer be used. The time period is defined by the `xpack.security.authc.token.timeout` setting. The refresh tokens returned by the get token API are only valid for 24 hours. They can also be used exactly once. If you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API. + +{ref}/security-api-invalidate-token.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityInvalidateTokenRequest, options?: TransportRequestOptions) => Promise<SecurityInvalidateTokenResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityInvalidateTokenRequest extends <<RequestBase>> { + token?: string + refresh_token?: string + realm_name?: <<Name>> + username?: <<Username>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityInvalidateTokenResponse { + error_count: <<long>> + error_details?: <<ErrorCause>>[] + invalidated_tokens: <<long>> + previously_invalidated_tokens: <<long>> +} + +---- + + +[discrete] +[[client.security.oidcAuthenticate]] +== `client.security.oidcAuthenticate()` + +Exchanges an OpenID Connection authentication response message for an Elasticsearch access token and refresh token pair + +{ref}/security-api-oidc-authenticate.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityOidcAuthenticateRequest, options?: TransportRequestOptions) => Promise<SecurityOidcAuthenticateResponse> +---- + +[discrete] +[[client.security.oidcLogout]] +== `client.security.oidcLogout()` + +Invalidates a refresh token and access token that was generated from the OpenID Connect Authenticate API + +{ref}/security-api-oidc-logout.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityOidcLogoutRequest, options?: TransportRequestOptions) => Promise<SecurityOidcLogoutResponse> +---- + +[discrete] +[[client.security.oidcPrepareAuthentication]] +== `client.security.oidcPrepareAuthentication()` + +Creates an OAuth 2.0 authentication request as a URL string + +{ref}/security-api-oidc-prepare-authentication.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptions) => Promise<SecurityOidcPrepareAuthenticationResponse> +---- + +[discrete] +[[client.security.putPrivileges]] +== `client.security.putPrivileges()` + +Create or update application privileges. + +{ref}/security-api-put-privileges.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityPutPrivilegesRequest, options?: TransportRequestOptions) => Promise<SecurityPutPrivilegesResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityPutPrivilegesRequest extends <<RequestBase>> { + refresh?: <<Refresh>> + privileges?: Record<string, Record<string, SecurityPutPrivilegesActions>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SecurityPutPrivilegesResponse = Record<string, Record<string, <<SecurityCreatedStatus>>>> + +---- + + +[discrete] +[[client.security.putRole]] +== `client.security.putRole()` + +Create or update roles. The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management. The create or update roles API cannot update roles that are defined in roles files. File-based role management is not available in Elastic Serverless. + +{ref}/security-api-put-role.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityPutRoleRequest, options?: TransportRequestOptions) => Promise<SecurityPutRoleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityPutRoleRequest extends <<RequestBase>> { + name: <<Name>> + refresh?: <<Refresh>> + applications?: <<SecurityApplicationPrivileges>>[] + cluster?: <<SecurityClusterPrivilege>>[] + global?: Record<string, any> + indices?: <<SecurityIndicesPrivileges>>[] + remote_indices?: <<SecurityRemoteIndicesPrivileges>>[] + remote_cluster?: <<SecurityRemoteClusterPrivileges>>[] + metadata?: <<Metadata>> + run_as?: string[] + description?: string + transient_metadata?: Record<string, any> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityPutRoleResponse { + role: <<SecurityCreatedStatus>> +} + +---- + + +[discrete] +[[client.security.putRoleMapping]] +== `client.security.putRoleMapping()` + +Create or update role mappings. Role mappings define which roles are assigned to each user. Each mapping has rules that identify users and a list of roles that are granted to those users. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files. This API does not create roles. Rather, it maps users to existing roles. Roles can be created by using the create or update roles API or roles files. + +{ref}/security-api-put-role-mapping.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityPutRoleMappingRequest, options?: TransportRequestOptions) => Promise<SecurityPutRoleMappingResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityPutRoleMappingRequest extends <<RequestBase>> { + name: <<Name>> + refresh?: <<Refresh>> + enabled?: boolean + metadata?: <<Metadata>> + roles?: string[] + role_templates?: <<SecurityRoleTemplate>>[] + rules?: <<SecurityRoleMappingRule>> + run_as?: string[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityPutRoleMappingResponse { + created?: boolean + role_mapping: <<SecurityCreatedStatus>> +} + +---- + + +[discrete] +[[client.security.putUser]] +== `client.security.putUser()` + +Create or update users. A password is required for adding a new user but is optional when updating an existing user. To change a user’s password without updating any other fields, use the change password API. + +{ref}/security-api-put-user.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityPutUserRequest, options?: TransportRequestOptions) => Promise<SecurityPutUserResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityPutUserRequest extends <<RequestBase>> { + username: <<Username>> + refresh?: <<Refresh>> + email?: string | null + full_name?: string | null + metadata?: <<Metadata>> + password?: <<Password>> + password_hash?: string + roles?: string[] + enabled?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityPutUserResponse { + created: boolean +} + +---- + + +[discrete] +[[client.security.queryApiKeys]] +== `client.security.queryApiKeys()` + +Find API keys with a query. Get a paginated list of API keys and their information. You can optionally filter the results with a query. + +{ref}/security-api-query-api-key.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityQueryApiKeysRequest, options?: TransportRequestOptions) => Promise<SecurityQueryApiKeysResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityQueryApiKeysRequest extends <<RequestBase>> { + with_limited_by?: boolean + with_profile_uid?: boolean + typed_keys?: boolean + aggregations?: Record<string, SecurityQueryApiKeysApiKeyAggregationContainer> + pass:[/**] @alias aggregations */ + aggs?: Record<string, SecurityQueryApiKeysApiKeyAggregationContainer> + query?: SecurityQueryApiKeysApiKeyQueryContainer + from?: <<integer>> + sort?: <<Sort>> + size?: <<integer>> + search_after?: <<SortResults>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityQueryApiKeysResponse { + total: <<integer>> + count: <<integer>> + api_keys: <<SecurityApiKey>>[] + aggregations?: Record<<<AggregateName>>, SecurityQueryApiKeysApiKeyAggregate> +} + +---- + + +[discrete] +[[client.security.queryRole]] +== `client.security.queryRole()` + +Find roles with a query. Get roles in a paginated manner. You can optionally filter the results with a query. + +{ref}/security-api-query-role.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityQueryRoleRequest, options?: TransportRequestOptions) => Promise<SecurityQueryRoleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityQueryRoleRequest extends <<RequestBase>> { + query?: SecurityQueryRoleRoleQueryContainer + from?: <<integer>> + sort?: <<Sort>> + size?: <<integer>> + search_after?: <<SortResults>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityQueryRoleResponse { + total: <<integer>> + count: <<integer>> + roles: SecurityQueryRoleQueryRole[] +} + +---- + + +[discrete] +[[client.security.queryUser]] +== `client.security.queryUser()` + +Find users with a query. Get information for users in a paginated manner. You can optionally filter the results with a query. + +{ref}/security-api-query-user.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityQueryUserRequest, options?: TransportRequestOptions) => Promise<SecurityQueryUserResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityQueryUserRequest extends <<RequestBase>> { + with_profile_uid?: boolean + query?: SecurityQueryUserUserQueryContainer + from?: <<integer>> + sort?: <<Sort>> + size?: <<integer>> + search_after?: <<SortResults>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityQueryUserResponse { + total: <<integer>> + count: <<integer>> + users: SecurityQueryUserQueryUser[] +} + +---- + + +[discrete] +[[client.security.samlAuthenticate]] +== `client.security.samlAuthenticate()` + +Authenticate SAML. Submits a SAML response message to Elasticsearch for consumption. + +{ref}/security-api-saml-authenticate.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecuritySamlAuthenticateRequest, options?: TransportRequestOptions) => Promise<SecuritySamlAuthenticateResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecuritySamlAuthenticateRequest extends <<RequestBase>> { + content: string + ids: <<Ids>> + realm?: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecuritySamlAuthenticateResponse { + access_token: string + username: string + expires_in: <<integer>> + refresh_token: string + realm: string +} + +---- + + +[discrete] +[[client.security.samlCompleteLogout]] +== `client.security.samlCompleteLogout()` + +Logout of SAML completely. Verifies the logout response sent from the SAML IdP. + +{ref}/security-api-saml-complete-logout.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptions) => Promise<SecuritySamlCompleteLogoutResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecuritySamlCompleteLogoutRequest extends <<RequestBase>> { + realm: string + ids: <<Ids>> + query_string?: string + content?: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SecuritySamlCompleteLogoutResponse = boolean + +---- + + +[discrete] +[[client.security.samlInvalidate]] +== `client.security.samlInvalidate()` + +Invalidate SAML. Submits a SAML LogoutRequest message to Elasticsearch for consumption. + +{ref}/security-api-saml-invalidate.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecuritySamlInvalidateRequest, options?: TransportRequestOptions) => Promise<SecuritySamlInvalidateResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecuritySamlInvalidateRequest extends <<RequestBase>> { + acs?: string + query_string: string + realm?: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecuritySamlInvalidateResponse { + invalidated: <<integer>> + realm: string + redirect: string +} + +---- + + +[discrete] +[[client.security.samlLogout]] +== `client.security.samlLogout()` + +Logout of SAML. Submits a request to invalidate an access token and refresh token. + +{ref}/security-api-saml-logout.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecuritySamlLogoutRequest, options?: TransportRequestOptions) => Promise<SecuritySamlLogoutResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecuritySamlLogoutRequest extends <<RequestBase>> { + token: string + refresh_token?: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecuritySamlLogoutResponse { + redirect: string +} + +---- + + +[discrete] +[[client.security.samlPrepareAuthentication]] +== `client.security.samlPrepareAuthentication()` + +Prepare SAML authentication. Creates a SAML authentication request (`<AuthnRequest>`) as a URL string, based on the configuration of the respective SAML realm in Elasticsearch. + +{ref}/security-api-saml-prepare-authentication.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptions) => Promise<SecuritySamlPrepareAuthenticationResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecuritySamlPrepareAuthenticationRequest extends <<RequestBase>> { + acs?: string + realm?: string + relay_state?: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecuritySamlPrepareAuthenticationResponse { + id: <<Id>> + realm: string + redirect: string +} + +---- + + +[discrete] +[[client.security.samlServiceProviderMetadata]] +== `client.security.samlServiceProviderMetadata()` + +Create SAML service provider metadata. Generate SAML metadata for a SAML 2.0 Service Provider. + +{ref}/security-api-saml-sp-metadata.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptions) => Promise<SecuritySamlServiceProviderMetadataResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecuritySamlServiceProviderMetadataRequest extends <<RequestBase>> { + realm_name: <<Name>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecuritySamlServiceProviderMetadataResponse { + metadata: string +} + +---- + + +[discrete] +[[client.security.suggestUserProfiles]] +== `client.security.suggestUserProfiles()` + +Suggest a user profile. Get suggestions for user profiles that match specified search criteria. + +{ref}/security-api-suggest-user-profile.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecuritySuggestUserProfilesRequest, options?: TransportRequestOptions) => Promise<SecuritySuggestUserProfilesResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecuritySuggestUserProfilesRequest extends <<RequestBase>> { + name?: string + size?: <<long>> + data?: string | string[] + hint?: SecuritySuggestUserProfilesHint +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecuritySuggestUserProfilesResponse { + total: SecuritySuggestUserProfilesTotalUserProfiles + took: <<long>> + profiles: <<SecurityUserProfile>>[] +} + +---- + + +[discrete] +[[client.security.updateApiKey]] +== `client.security.updateApiKey()` + +Update an API key. Updates attributes of an existing API key. Users can only update API keys that they created or that were granted to them. Use this API to update API keys created by the create API Key or grant API Key APIs. If you need to apply the same update to many API keys, you can use bulk update API Keys to reduce overhead. It’s not possible to update expired API keys, or API keys that have been invalidated by invalidate API Key. This API supports updates to an API key’s access scope and metadata. The access scope of an API key is derived from the `role_descriptors` you specify in the request, and a snapshot of the owner user’s permissions at the time of the request. The snapshot of the owner’s permissions is updated automatically on every call. If you don’t specify `role_descriptors` in the request, a call to this API might still change the API key’s access scope. This change can occur if the owner user’s permissions have changed since the API key was created or last modified. To update another user’s API key, use the `run_as` feature to submit a request on behalf of another user. IMPORTANT: It’s not possible to use an API key as the authentication credential for this API. To update an API key, the owner user’s credentials are required. + +{ref}/security-api-update-api-key.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityUpdateApiKeyRequest, options?: TransportRequestOptions) => Promise<SecurityUpdateApiKeyResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityUpdateApiKeyRequest extends <<RequestBase>> { + id: <<Id>> + role_descriptors?: Record<string, <<SecurityRoleDescriptor>>> + metadata?: <<Metadata>> + expiration?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityUpdateApiKeyResponse { + updated: boolean +} + +---- + + +[discrete] +[[client.security.updateCrossClusterApiKey]] +== `client.security.updateCrossClusterApiKey()` + +Update a cross-cluster API key. Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access. + +{ref}/security-api-update-cross-cluster-api-key.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptions) => Promise<SecurityUpdateCrossClusterApiKeyResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityUpdateCrossClusterApiKeyRequest extends <<RequestBase>> { + id: <<Id>> + access: <<SecurityAccess>> + expiration?: <<Duration>> + metadata?: <<Metadata>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SecurityUpdateCrossClusterApiKeyResponse { + updated: boolean +} + +---- + + +[discrete] +[[client.security.updateSettings]] +== `client.security.updateSettings()` + +Update settings for the security system index + +{ref}/security-api-update-settings.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityUpdateSettingsRequest, options?: TransportRequestOptions) => Promise<SecurityUpdateSettingsResponse> +---- + +[discrete] +[[client.security.updateUserProfileData]] +== `client.security.updateUserProfileData()` + +Update user profile data. Update specific data for the user profile that is associated with a unique ID. + +{ref}/security-api-update-user-profile-data.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptions) => Promise<SecurityUpdateUserProfileDataResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SecurityUpdateUserProfileDataRequest extends <<RequestBase>> { + uid: <<SecurityUserProfileId>> + if_seq_no?: <<SequenceNumber>> + if_primary_term?: <<long>> + refresh?: <<Refresh>> + labels?: Record<string, any> + data?: Record<string, any> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SecurityUpdateUserProfileDataResponse = <<AcknowledgedResponseBase>> + +---- + + diff --git a/docs/reference/shared-types/async-search-types.asciidoc b/docs/reference/shared-types/async-search-types.asciidoc new file mode 100644 index 000000000..f9695b24d --- /dev/null +++ b/docs/reference/shared-types/async-search-types.asciidoc @@ -0,0 +1,101 @@ +[[reference-shared-types-async-search-types]] + +=== `AsyncSearch` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[AsyncSearchAsyncSearch]] +=== AsyncSearchAsyncSearch + +[source,ts,subs=+macros] +---- +interface AsyncSearchAsyncSearch<TDocument = unknown, TAggregations = Record<<<AggregateName>>, <<AggregationsAggregate>>>> { + pass:[/**] @property aggregations Partial aggregations results, coming from the shards that have already completed the execution of the query. */ + aggregations?: TAggregations + _clusters?: <<ClusterStatistics>> + fields?: Record<string, any> + hits: <<SearchHitsMetadata>><TDocument> + max_score?: <<double>> + pass:[/**] @property num_reduce_phases Indicates how many reductions of the results have been performed. If this number increases compared to the last retrieved results for a get asynch search request, you can expect additional results included in the search response. */ + num_reduce_phases?: <<long>> + profile?: <<SearchProfile>> + pit_id?: <<Id>> + _scroll_id?: <<ScrollId>> + pass:[/**] @property _shards Indicates how many shards have run the query. Note that in order for shard results to be included in the search response, they need to be reduced first. */ + _shards: <<ShardStatistics>> + suggest?: Record<<<SuggestionName>>, <<SearchSuggest>><TDocument>[]> + terminated_early?: boolean + timed_out: boolean + took: <<long>> +} +---- + + +[discrete] +[[AsyncSearchAsyncSearchDocumentResponseBase]] +=== AsyncSearchAsyncSearchDocumentResponseBase + +[source,ts,subs=+macros] +---- +interface AsyncSearchAsyncSearchDocumentResponseBase<TDocument = unknown, TAggregations = Record<<<AggregateName>>, <<AggregationsAggregate>>>> extends <<AsyncSearchAsyncSearchResponseBase>> { + response: <<AsyncSearchAsyncSearch>><TDocument, TAggregations> +} +---- + + +[discrete] +[[AsyncSearchAsyncSearchResponseBase]] +=== AsyncSearchAsyncSearchResponseBase + +[source,ts,subs=+macros] +---- +interface AsyncSearchAsyncSearchResponseBase { + id?: <<Id>> + pass:[/**] @property is_partial When the query is no longer running, this property indicates whether the search failed or was successfully completed on all shards. While the query is running, `is_partial` is always set to `true`. */ + is_partial: boolean + pass:[/**] @property is_running Indicates whether the search is still running or has completed. NOTE: If the search failed after some shards returned their results or the node that is coordinating the async search dies, results may be partial even though `is_running` is `false`. */ + is_running: boolean + pass:[/**] @property expiration_time Indicates when the async search will expire. */ + expiration_time?: <<DateTime>> + expiration_time_in_millis: <<EpochTime>><<<UnitMillis>>> + start_time?: <<DateTime>> + start_time_in_millis: <<EpochTime>><<<UnitMillis>>> + pass:[/**] @property completion_time Indicates when the async search completed. Only present when the search has completed. */ + completion_time?: <<DateTime>> + completion_time_in_millis?: <<EpochTime>><<<UnitMillis>>> +} +---- + + diff --git a/docs/reference/shared-types/autoscaling-types.asciidoc b/docs/reference/shared-types/autoscaling-types.asciidoc new file mode 100644 index 000000000..e3b798193 --- /dev/null +++ b/docs/reference/shared-types/autoscaling-types.asciidoc @@ -0,0 +1,51 @@ +[[reference-shared-types-autoscaling-types]] + +=== `Autoscaling` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[AutoscalingAutoscalingPolicy]] +=== AutoscalingAutoscalingPolicy + +[source,ts,subs=+macros] +---- +interface AutoscalingAutoscalingPolicy { + roles: string[] + pass:[/**] @property deciders Decider settings. */ + deciders: Record<string, any> +} +---- + + diff --git a/docs/reference/shared-types/cat-types.asciidoc b/docs/reference/shared-types/cat-types.asciidoc new file mode 100644 index 000000000..fbe9f27e0 --- /dev/null +++ b/docs/reference/shared-types/cat-types.asciidoc @@ -0,0 +1,147 @@ +[[reference-shared-types-cat-types]] + +=== `Cat` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[CatCatAnomalyDetectorColumn]] +=== CatCatAnomalyDetectorColumn + +[source,ts,subs=+macros] +---- +type CatCatAnomalyDetectorColumn = 'assignment_explanation' | 'ae' | 'buckets.count' | 'bc' | 'bucketsCount' | 'buckets.time.exp_avg' | 'btea' | 'bucketsTimeExpAvg' | 'buckets.time.exp_avg_hour' | 'bteah' | 'bucketsTimeExpAvgHour' | 'buckets.time.max' | 'btmax' | 'bucketsTimeMax' | 'buckets.time.min' | 'btmin' | 'bucketsTimeMin' | 'buckets.time.total' | 'btt' | 'bucketsTimeTotal' | 'data.buckets' | 'db' | 'dataBuckets' | 'data.earliest_record' | 'der' | 'dataEarliestRecord' | 'data.empty_buckets' | 'deb' | 'dataEmptyBuckets' | 'data.input_bytes' | 'dib' | 'dataInputBytes' | 'data.input_fields' | 'dif' | 'dataInputFields' | 'data.input_records' | 'dir' | 'dataInputRecords' | 'data.invalid_dates' | 'did' | 'dataInvalidDates' | 'data.last' | 'dl' | 'dataLast' | 'data.last_empty_bucket' | 'dleb' | 'dataLastEmptyBucket' | 'data.last_sparse_bucket' | 'dlsb' | 'dataLastSparseBucket' | 'data.latest_record' | 'dlr' | 'dataLatestRecord' | 'data.missing_fields' | 'dmf' | 'dataMissingFields' | 'data.out_of_order_timestamps' | 'doot' | 'dataOutOfOrderTimestamps' | 'data.processed_fields' | 'dpf' | 'dataProcessedFields' | 'data.processed_records' | 'dpr' | 'dataProcessedRecords' | 'data.sparse_buckets' | 'dsb' | 'dataSparseBuckets' | 'forecasts.memory.avg' | 'fmavg' | 'forecastsMemoryAvg' | 'forecasts.memory.max' | 'fmmax' | 'forecastsMemoryMax' | 'forecasts.memory.min' | 'fmmin' | 'forecastsMemoryMin' | 'forecasts.memory.total' | 'fmt' | 'forecastsMemoryTotal' | 'forecasts.records.avg' | 'fravg' | 'forecastsRecordsAvg' | 'forecasts.records.max' | 'frmax' | 'forecastsRecordsMax' | 'forecasts.records.min' | 'frmin' | 'forecastsRecordsMin' | 'forecasts.records.total' | 'frt' | 'forecastsRecordsTotal' | 'forecasts.time.avg' | 'ftavg' | 'forecastsTimeAvg' | 'forecasts.time.max' | 'ftmax' | 'forecastsTimeMax' | 'forecasts.time.min' | 'ftmin' | 'forecastsTimeMin' | 'forecasts.time.total' | 'ftt' | 'forecastsTimeTotal' | 'forecasts.total' | 'ft' | 'forecastsTotal' | 'id' | 'model.bucket_allocation_failures' | 'mbaf' | 'modelBucketAllocationFailures' | 'model.by_fields' | 'mbf' | 'modelByFields' | 'model.bytes' | 'mb' | 'modelBytes' | 'model.bytes_exceeded' | 'mbe' | 'modelBytesExceeded' | 'model.categorization_status' | 'mcs' | 'modelCategorizationStatus' | 'model.categorized_doc_count' | 'mcdc' | 'modelCategorizedDocCount' | 'model.dead_category_count' | 'mdcc' | 'modelDeadCategoryCount' | 'model.failed_category_count' | 'mdcc' | 'modelFailedCategoryCount' | 'model.frequent_category_count' | 'mfcc' | 'modelFrequentCategoryCount' | 'model.log_time' | 'mlt' | 'modelLogTime' | 'model.memory_limit' | 'mml' | 'modelMemoryLimit' | 'model.memory_status' | 'mms' | 'modelMemoryStatus' | 'model.over_fields' | 'mof' | 'modelOverFields' | 'model.partition_fields' | 'mpf' | 'modelPartitionFields' | 'model.rare_category_count' | 'mrcc' | 'modelRareCategoryCount' | 'model.timestamp' | 'mt' | 'modelTimestamp' | 'model.total_category_count' | 'mtcc' | 'modelTotalCategoryCount' | 'node.address' | 'na' | 'nodeAddress' | 'node.ephemeral_id' | 'ne' | 'nodeEphemeralId' | 'node.id' | 'ni' | 'nodeId' | 'node.name' | 'nn' | 'nodeName' | 'opened_time' | 'ot' | 'state' | 's' +---- + + +[discrete] +[[CatCatAnonalyDetectorColumns]] +=== CatCatAnonalyDetectorColumns + +[source,ts,subs=+macros] +---- +type CatCatAnonalyDetectorColumns = <<CatCatAnomalyDetectorColumn>> | <<CatCatAnomalyDetectorColumn>>[] +---- + + +[discrete] +[[CatCatDatafeedColumn]] +=== CatCatDatafeedColumn + +[source,ts,subs=+macros] +---- +type CatCatDatafeedColumn = 'ae' | 'assignment_explanation' | 'bc' | 'buckets.count' | 'bucketsCount' | 'id' | 'na' | 'node.address' | 'nodeAddress' | 'ne' | 'node.ephemeral_id' | 'nodeEphemeralId' | 'ni' | 'node.id' | 'nodeId' | 'nn' | 'node.name' | 'nodeName' | 'sba' | 'search.bucket_avg' | 'searchBucketAvg' | 'sc' | 'search.count' | 'searchCount' | 'seah' | 'search.exp_avg_hour' | 'searchExpAvgHour' | 'st' | 'search.time' | 'searchTime' | 's' | 'state' +---- + + +[discrete] +[[CatCatDatafeedColumns]] +=== CatCatDatafeedColumns + +[source,ts,subs=+macros] +---- +type CatCatDatafeedColumns = <<CatCatDatafeedColumn>> | <<CatCatDatafeedColumn>>[] +---- + + +[discrete] +[[CatCatDfaColumn]] +=== CatCatDfaColumn + +[source,ts,subs=+macros] +---- +type CatCatDfaColumn = 'assignment_explanation' | 'ae' | 'create_time' | 'ct' | 'createTime' | 'description' | 'd' | 'dest_index' | 'di' | 'destIndex' | 'failure_reason' | 'fr' | 'failureReason' | 'id' | 'model_memory_limit' | 'mml' | 'modelMemoryLimit' | 'node.address' | 'na' | 'nodeAddress' | 'node.ephemeral_id' | 'ne' | 'nodeEphemeralId' | 'node.id' | 'ni' | 'nodeId' | 'node.name' | 'nn' | 'nodeName' | 'progress' | 'p' | 'source_index' | 'si' | 'sourceIndex' | 'state' | 's' | 'type' | 't' | 'version' | 'v' +---- + + +[discrete] +[[CatCatDfaColumns]] +=== CatCatDfaColumns + +[source,ts,subs=+macros] +---- +type CatCatDfaColumns = <<CatCatDfaColumn>> | <<CatCatDfaColumn>>[] +---- + + +[discrete] +[[CatCatRequestBase]] +=== CatCatRequestBase + +[source,ts,subs=+macros] +---- +interface CatCatRequestBase extends <<RequestBase>>, <<SpecUtilsCommonCatQueryParameters>> {} +---- + + +[discrete] +[[CatCatTrainedModelsColumn]] +=== CatCatTrainedModelsColumn + +[source,ts,subs=+macros] +---- +type CatCatTrainedModelsColumn = 'create_time' | 'ct' | 'created_by' | 'c' | 'createdBy' | 'data_frame_analytics_id' | 'df' | 'dataFrameAnalytics' | 'dfid' | 'description' | 'd' | 'heap_size' | 'hs' | 'modelHeapSize' | 'id' | 'ingest.count' | 'ic' | 'ingestCount' | 'ingest.current' | 'icurr' | 'ingestCurrent' | 'ingest.failed' | 'if' | 'ingestFailed' | 'ingest.pipelines' | 'ip' | 'ingestPipelines' | 'ingest.time' | 'it' | 'ingestTime' | 'license' | 'l' | 'operations' | 'o' | 'modelOperations' | 'version' | 'v' +---- + + +[discrete] +[[CatCatTrainedModelsColumns]] +=== CatCatTrainedModelsColumns + +[source,ts,subs=+macros] +---- +type CatCatTrainedModelsColumns = <<CatCatTrainedModelsColumn>> | <<CatCatTrainedModelsColumn>>[] +---- + + +[discrete] +[[CatCatTransformColumn]] +=== CatCatTransformColumn + +[source,ts,subs=+macros] +---- +type CatCatTransformColumn = 'changes_last_detection_time' | 'cldt' | 'checkpoint' | 'cp' | 'checkpoint_duration_time_exp_avg' | 'cdtea' | 'checkpointTimeExpAvg' | 'checkpoint_progress' | 'c' | 'checkpointProgress' | 'create_time' | 'ct' | 'createTime' | 'delete_time' | 'dtime' | 'description' | 'd' | 'dest_index' | 'di' | 'destIndex' | 'documents_deleted' | 'docd' | 'documents_indexed' | 'doci' | 'docs_per_second' | 'dps' | 'documents_processed' | 'docp' | 'frequency' | 'f' | 'id' | 'index_failure' | 'if' | 'index_time' | 'itime' | 'index_total' | 'it' | 'indexed_documents_exp_avg' | 'idea' | 'last_search_time' | 'lst' | 'lastSearchTime' | 'max_page_search_size' | 'mpsz' | 'pages_processed' | 'pp' | 'pipeline' | 'p' | 'processed_documents_exp_avg' | 'pdea' | 'processing_time' | 'pt' | 'reason' | 'r' | 'search_failure' | 'sf' | 'search_time' | 'stime' | 'search_total' | 'st' | 'source_index' | 'si' | 'sourceIndex' | 'state' | 's' | 'transform_type' | 'tt' | 'trigger_count' | 'tc' | 'version' | 'v' +---- + + +[discrete] +[[CatCatTransformColumns]] +=== CatCatTransformColumns + +[source,ts,subs=+macros] +---- +type CatCatTransformColumns = <<CatCatTransformColumn>> | <<CatCatTransformColumn>>[] +---- + + diff --git a/docs/reference/shared-types/ccr-types.asciidoc b/docs/reference/shared-types/ccr-types.asciidoc new file mode 100644 index 000000000..092b52b51 --- /dev/null +++ b/docs/reference/shared-types/ccr-types.asciidoc @@ -0,0 +1,108 @@ +[[reference-shared-types-ccr-types]] + +=== `Ccr` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[CcrFollowIndexStats]] +=== CcrFollowIndexStats + +[source,ts,subs=+macros] +---- +interface CcrFollowIndexStats { + index: <<IndexName>> + shards: <<CcrShardStats>>[] +} +---- + + +[discrete] +[[CcrReadException]] +=== CcrReadException + +[source,ts,subs=+macros] +---- +interface CcrReadException { + exception: <<ErrorCause>> + from_seq_no: <<SequenceNumber>> + retries: <<integer>> +} +---- + + +[discrete] +[[CcrShardStats]] +=== CcrShardStats + +[source,ts,subs=+macros] +---- +interface CcrShardStats { + bytes_read: <<long>> + failed_read_requests: <<long>> + failed_write_requests: <<long>> + fatal_exception?: <<ErrorCause>> + follower_aliases_version: <<VersionNumber>> + follower_global_checkpoint: <<long>> + follower_index: string + follower_mapping_version: <<VersionNumber>> + follower_max_seq_no: <<SequenceNumber>> + follower_settings_version: <<VersionNumber>> + last_requested_seq_no: <<SequenceNumber>> + leader_global_checkpoint: <<long>> + leader_index: string + leader_max_seq_no: <<SequenceNumber>> + operations_read: <<long>> + operations_written: <<long>> + outstanding_read_requests: <<integer>> + outstanding_write_requests: <<integer>> + read_exceptions: <<CcrReadException>>[] + remote_cluster: string + shard_id: <<integer>> + successful_read_requests: <<long>> + successful_write_requests: <<long>> + time_since_last_read?: <<Duration>> + time_since_last_read_millis: <<DurationValue>><<<UnitMillis>>> + total_read_remote_exec_time?: <<Duration>> + total_read_remote_exec_time_millis: <<DurationValue>><<<UnitMillis>>> + total_read_time?: <<Duration>> + total_read_time_millis: <<DurationValue>><<<UnitMillis>>> + total_write_time?: <<Duration>> + total_write_time_millis: <<DurationValue>><<<UnitMillis>>> + write_buffer_operation_count: <<long>> + write_buffer_size_in_bytes: <<ByteSize>> +} +---- + + diff --git a/docs/reference/shared-types/cluster-types.asciidoc b/docs/reference/shared-types/cluster-types.asciidoc new file mode 100644 index 000000000..b539b1dd6 --- /dev/null +++ b/docs/reference/shared-types/cluster-types.asciidoc @@ -0,0 +1,81 @@ +[[reference-shared-types-cluster-types]] + +=== `Cluster` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[ClusterComponentTemplate]] +=== ClusterComponentTemplate + +[source,ts,subs=+macros] +---- +interface ClusterComponentTemplate { + name: <<Name>> + component_template: <<ClusterComponentTemplateNode>> +} +---- + + +[discrete] +[[ClusterComponentTemplateNode]] +=== ClusterComponentTemplateNode + +[source,ts,subs=+macros] +---- +interface ClusterComponentTemplateNode { + template: <<ClusterComponentTemplateSummary>> + version?: <<VersionNumber>> + _meta?: <<Metadata>> +} +---- + + +[discrete] +[[ClusterComponentTemplateSummary]] +=== ClusterComponentTemplateSummary + +[source,ts,subs=+macros] +---- +interface ClusterComponentTemplateSummary { + _meta?: <<Metadata>> + version?: <<VersionNumber>> + settings?: Record<<<IndexName>>, <<IndicesIndexSettings>>> + mappings?: <<MappingTypeMapping>> + aliases?: Record<string, <<IndicesAliasDefinition>>> + lifecycle?: <<IndicesDataStreamLifecycleWithRollover>> +} +---- + + diff --git a/docs/reference/shared-types/connector-types.asciidoc b/docs/reference/shared-types/connector-types.asciidoc new file mode 100644 index 000000000..c1c7596c3 --- /dev/null +++ b/docs/reference/shared-types/connector-types.asciidoc @@ -0,0 +1,565 @@ +[[reference-shared-types-connector-types]] + +=== `Connector` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[ConnectorConnector]] +=== ConnectorConnector + +[source,ts,subs=+macros] +---- +interface ConnectorConnector { + api_key_id?: string + api_key_secret_id?: string + configuration: <<ConnectorConnectorConfiguration>> + custom_scheduling: <<ConnectorConnectorCustomScheduling>> + description?: string + error?: string | null + features?: <<ConnectorConnectorFeatures>> + filtering: <<ConnectorFilteringConfig>>[] + id?: <<Id>> + index_name?: <<IndexName>> | null + is_native: boolean + language?: string + last_access_control_sync_error?: string + last_access_control_sync_scheduled_at?: <<DateTime>> + last_access_control_sync_status?: <<ConnectorSyncStatus>> + last_deleted_document_count?: <<long>> + last_incremental_sync_scheduled_at?: <<DateTime>> + last_indexed_document_count?: <<long>> + last_seen?: <<DateTime>> + last_sync_error?: string + last_sync_scheduled_at?: <<DateTime>> + last_sync_status?: <<ConnectorSyncStatus>> + last_synced?: <<DateTime>> + name?: string + pipeline?: <<ConnectorIngestPipelineParams>> + scheduling: <<ConnectorSchedulingConfiguration>> + service_type?: string + status: <<ConnectorConnectorStatus>> + sync_cursor?: any + sync_now: boolean +} +---- + + +[discrete] +[[ConnectorConnectorConfigProperties]] +=== ConnectorConnectorConfigProperties + +[source,ts,subs=+macros] +---- +interface ConnectorConnectorConfigProperties { + category?: string + default_value: <<ScalarValue>> + depends_on: <<ConnectorDependency>>[] + display: <<ConnectorDisplayType>> + label: string + options: <<ConnectorSelectOption>>[] + order?: <<integer>> + placeholder?: string + required: boolean + sensitive: boolean + tooltip?: string | null + type?: <<ConnectorConnectorFieldType>> + ui_restrictions?: string[] + validations?: <<ConnectorValidation>>[] + value: any +} +---- + + +[discrete] +[[ConnectorConnectorConfiguration]] +=== ConnectorConnectorConfiguration + +[source,ts,subs=+macros] +---- +type ConnectorConnectorConfiguration = Record<string, <<ConnectorConnectorConfigProperties>>> +---- + + +[discrete] +[[ConnectorConnectorCustomScheduling]] +=== ConnectorConnectorCustomScheduling + +[source,ts,subs=+macros] +---- +type ConnectorConnectorCustomScheduling = Record<string, <<ConnectorCustomScheduling>>> +---- + + +[discrete] +[[ConnectorConnectorFeatures]] +=== ConnectorConnectorFeatures + +[source,ts,subs=+macros] +---- +interface ConnectorConnectorFeatures { + document_level_security?: <<ConnectorFeatureEnabled>> + incremental_sync?: <<ConnectorFeatureEnabled>> + native_connector_api_keys?: <<ConnectorFeatureEnabled>> + sync_rules?: <<ConnectorSyncRulesFeature>> +} +---- + + +[discrete] +[[ConnectorConnectorFieldType]] +=== ConnectorConnectorFieldType + +[source,ts,subs=+macros] +---- +type ConnectorConnectorFieldType = 'str' | 'int' | 'list' | 'bool' +---- + + +[discrete] +[[ConnectorConnectorScheduling]] +=== ConnectorConnectorScheduling + +[source,ts,subs=+macros] +---- +interface ConnectorConnectorScheduling { + enabled: boolean + pass:[/**] @property interval The interval is expressed using the crontab syntax */ + interval: string +} +---- + + +[discrete] +[[ConnectorConnectorStatus]] +=== ConnectorConnectorStatus + +[source,ts,subs=+macros] +---- +type ConnectorConnectorStatus = 'created' | 'needs_configuration' | 'configured' | 'connected' | 'error' +---- + + +[discrete] +[[ConnectorConnectorSyncJob]] +=== ConnectorConnectorSyncJob + +[source,ts,subs=+macros] +---- +interface ConnectorConnectorSyncJob { + cancelation_requested_at?: <<DateTime>> + canceled_at?: <<DateTime>> + completed_at?: <<DateTime>> + connector: <<ConnectorSyncJobConnectorReference>> + created_at: <<DateTime>> + deleted_document_count: <<long>> + error?: string + id: <<Id>> + indexed_document_count: <<long>> + indexed_document_volume: <<long>> + job_type: <<ConnectorSyncJobType>> + last_seen?: <<DateTime>> + metadata: Record<string, any> + started_at?: <<DateTime>> + status: <<ConnectorSyncStatus>> + total_document_count: <<long>> + trigger_method: <<ConnectorSyncJobTriggerMethod>> + worker_hostname?: string +} +---- + + +[discrete] +[[ConnectorCustomScheduling]] +=== ConnectorCustomScheduling + +[source,ts,subs=+macros] +---- +interface ConnectorCustomScheduling { + configuration_overrides: <<ConnectorCustomSchedulingConfigurationOverrides>> + enabled: boolean + interval: string + last_synced?: <<DateTime>> + name: string +} +---- + + +[discrete] +[[ConnectorCustomSchedulingConfigurationOverrides]] +=== ConnectorCustomSchedulingConfigurationOverrides + +[source,ts,subs=+macros] +---- +interface ConnectorCustomSchedulingConfigurationOverrides { + max_crawl_depth?: <<integer>> + sitemap_discovery_disabled?: boolean + domain_allowlist?: string[] + sitemap_urls?: string[] + seed_urls?: string[] +} +---- + + +[discrete] +[[ConnectorDependency]] +=== ConnectorDependency + +[source,ts,subs=+macros] +---- +interface ConnectorDependency { + field: string + value: <<ScalarValue>> +} +---- + + +[discrete] +[[ConnectorDisplayType]] +=== ConnectorDisplayType + +[source,ts,subs=+macros] +---- +type ConnectorDisplayType = 'textbox' | 'textarea' | 'numeric' | 'toggle' | 'dropdown' +---- + + +[discrete] +[[ConnectorFeatureEnabled]] +=== ConnectorFeatureEnabled + +[source,ts,subs=+macros] +---- +interface ConnectorFeatureEnabled { + enabled: boolean +} +---- + + +[discrete] +[[ConnectorFilteringAdvancedSnippet]] +=== ConnectorFilteringAdvancedSnippet + +[source,ts,subs=+macros] +---- +interface ConnectorFilteringAdvancedSnippet { + created_at?: <<DateTime>> + updated_at?: <<DateTime>> + value: any +} +---- + + +[discrete] +[[ConnectorFilteringConfig]] +=== ConnectorFilteringConfig + +[source,ts,subs=+macros] +---- +interface ConnectorFilteringConfig { + active: <<ConnectorFilteringRules>> + domain?: string + draft: <<ConnectorFilteringRules>> +} +---- + + +[discrete] +[[ConnectorFilteringPolicy]] +=== ConnectorFilteringPolicy + +[source,ts,subs=+macros] +---- +type ConnectorFilteringPolicy = 'exclude' | 'include' +---- + + +[discrete] +[[ConnectorFilteringRule]] +=== ConnectorFilteringRule + +[source,ts,subs=+macros] +---- +interface ConnectorFilteringRule { + created_at?: <<DateTime>> + field: <<Field>> + id: <<Id>> + order: <<integer>> + policy: <<ConnectorFilteringPolicy>> + rule: <<ConnectorFilteringRuleRule>> + updated_at?: <<DateTime>> + value: string +} +---- + + +[discrete] +[[ConnectorFilteringRuleRule]] +=== ConnectorFilteringRuleRule + +[source,ts,subs=+macros] +---- +type ConnectorFilteringRuleRule = 'contains' | 'ends_with' | 'equals' | 'regex' | 'starts_with' | '>' | '<' +---- + + +[discrete] +[[ConnectorFilteringRules]] +=== ConnectorFilteringRules + +[source,ts,subs=+macros] +---- +interface ConnectorFilteringRules { + advanced_snippet: <<ConnectorFilteringAdvancedSnippet>> + rules: <<ConnectorFilteringRule>>[] + validation: <<ConnectorFilteringRulesValidation>> +} +---- + + +[discrete] +[[ConnectorFilteringRulesValidation]] +=== ConnectorFilteringRulesValidation + +[source,ts,subs=+macros] +---- +interface ConnectorFilteringRulesValidation { + errors: <<ConnectorFilteringValidation>>[] + state: <<ConnectorFilteringValidationState>> +} +---- + + +[discrete] +[[ConnectorFilteringValidation]] +=== ConnectorFilteringValidation + +[source,ts,subs=+macros] +---- +interface ConnectorFilteringValidation { + ids: <<Id>>[] + messages: string[] +} +---- + + +[discrete] +[[ConnectorFilteringValidationState]] +=== ConnectorFilteringValidationState + +[source,ts,subs=+macros] +---- +type ConnectorFilteringValidationState = 'edited' | 'invalid' | 'valid' +---- + + +[discrete] +[[ConnectorGreaterThanValidation]] +=== ConnectorGreaterThanValidation + +[source,ts,subs=+macros] +---- +interface ConnectorGreaterThanValidation { + type: 'greater_than' + constraint: <<double>> +} +---- + + +[discrete] +[[ConnectorIncludedInValidation]] +=== ConnectorIncludedInValidation + +[source,ts,subs=+macros] +---- +interface ConnectorIncludedInValidation { + type: 'included_in' + constraint: <<ScalarValue>>[] +} +---- + + +[discrete] +[[ConnectorIngestPipelineParams]] +=== ConnectorIngestPipelineParams + +[source,ts,subs=+macros] +---- +interface ConnectorIngestPipelineParams { + extract_binary_content: boolean + name: string + reduce_whitespace: boolean + run_ml_inference: boolean +} +---- + + +[discrete] +[[ConnectorLessThanValidation]] +=== ConnectorLessThanValidation + +[source,ts,subs=+macros] +---- +interface ConnectorLessThanValidation { + type: 'less_than' + constraint: <<double>> +} +---- + + +[discrete] +[[ConnectorListTypeValidation]] +=== ConnectorListTypeValidation + +[source,ts,subs=+macros] +---- +interface ConnectorListTypeValidation { + type: 'list_type' + constraint: string +} +---- + + +[discrete] +[[ConnectorRegexValidation]] +=== ConnectorRegexValidation + +[source,ts,subs=+macros] +---- +interface ConnectorRegexValidation { + type: 'regex' + constraint: string +} +---- + + +[discrete] +[[ConnectorSchedulingConfiguration]] +=== ConnectorSchedulingConfiguration + +[source,ts,subs=+macros] +---- +interface ConnectorSchedulingConfiguration { + access_control?: <<ConnectorConnectorScheduling>> + full?: <<ConnectorConnectorScheduling>> + incremental?: <<ConnectorConnectorScheduling>> +} +---- + + +[discrete] +[[ConnectorSelectOption]] +=== ConnectorSelectOption + +[source,ts,subs=+macros] +---- +interface ConnectorSelectOption { + label: string + value: <<ScalarValue>> +} +---- + + +[discrete] +[[ConnectorSyncJobConnectorReference]] +=== ConnectorSyncJobConnectorReference + +[source,ts,subs=+macros] +---- +interface ConnectorSyncJobConnectorReference { + configuration: <<ConnectorConnectorConfiguration>> + filtering: <<ConnectorFilteringRules>> + id: <<Id>> + index_name: string + language?: string + pipeline?: <<ConnectorIngestPipelineParams>> + service_type: string + sync_cursor?: any +} +---- + + +[discrete] +[[ConnectorSyncJobTriggerMethod]] +=== ConnectorSyncJobTriggerMethod + +[source,ts,subs=+macros] +---- +type ConnectorSyncJobTriggerMethod = 'on_demand' | 'scheduled' +---- + + +[discrete] +[[ConnectorSyncJobType]] +=== ConnectorSyncJobType + +[source,ts,subs=+macros] +---- +type ConnectorSyncJobType = 'full' | 'incremental' | 'access_control' +---- + + +[discrete] +[[ConnectorSyncRulesFeature]] +=== ConnectorSyncRulesFeature + +[source,ts,subs=+macros] +---- +interface ConnectorSyncRulesFeature { + advanced?: <<ConnectorFeatureEnabled>> + basic?: <<ConnectorFeatureEnabled>> +} +---- + + +[discrete] +[[ConnectorSyncStatus]] +=== ConnectorSyncStatus + +[source,ts,subs=+macros] +---- +type ConnectorSyncStatus = 'canceling' | 'canceled' | 'completed' | 'error' | 'in_progress' | 'pending' | 'suspended' +---- + + +[discrete] +[[ConnectorValidation]] +=== ConnectorValidation + +[source,ts,subs=+macros] +---- +type ConnectorValidation = <<ConnectorLessThanValidation>> | <<ConnectorGreaterThanValidation>> | <<ConnectorListTypeValidation>> | <<ConnectorIncludedInValidation>> | <<ConnectorRegexValidation>> +---- + + diff --git a/docs/reference/shared-types/enrich-types.asciidoc b/docs/reference/shared-types/enrich-types.asciidoc new file mode 100644 index 000000000..5cd7cfc9f --- /dev/null +++ b/docs/reference/shared-types/enrich-types.asciidoc @@ -0,0 +1,76 @@ +[[reference-shared-types-enrich-types]] + +=== `Enrich` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[EnrichPolicy]] +=== EnrichPolicy + +[source,ts,subs=+macros] +---- +interface EnrichPolicy { + enrich_fields: <<Fields>> + indices: <<Indices>> + match_field: <<Field>> + query?: <<QueryDslQueryContainer>> + name?: <<Name>> + elasticsearch_version?: string +} +---- + + +[discrete] +[[EnrichPolicyType]] +=== EnrichPolicyType + +[source,ts,subs=+macros] +---- +type EnrichPolicyType = 'geo_match' | 'match' | 'range' +---- + + +[discrete] +[[EnrichSummary]] +=== EnrichSummary + +[source,ts,subs=+macros] +---- +interface EnrichSummary { + config: Partial<Record<<<EnrichPolicyType>>, <<EnrichPolicy>>>> +} +---- + + diff --git a/docs/reference/shared-types/eql-types.asciidoc b/docs/reference/shared-types/eql-types.asciidoc new file mode 100644 index 000000000..0b7ce2d28 --- /dev/null +++ b/docs/reference/shared-types/eql-types.asciidoc @@ -0,0 +1,112 @@ +[[reference-shared-types-eql-types]] + +=== `Eql` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[EqlEqlHits]] +=== EqlEqlHits + +[source,ts,subs=+macros] +---- +interface EqlEqlHits<TEvent = unknown> { + pass:[/**] @property total <<Metadata>> about the number of matching events or sequences. */ + total?: <<SearchTotalHits>> + pass:[/**] @property events Contains events matching the query. Each object represents a matching event. */ + events?: <<EqlHitsEvent>><TEvent>[] + pass:[/**] @property sequences Contains event sequences matching the query. Each object represents a matching sequence. This parameter is only returned for EQL queries containing a sequence. */ + sequences?: <<EqlHitsSequence>><TEvent>[] +} +---- + + +[discrete] +[[EqlEqlSearchResponseBase]] +=== EqlEqlSearchResponseBase + +[source,ts,subs=+macros] +---- +interface EqlEqlSearchResponseBase<TEvent = unknown> { + pass:[/**] @property id Identifier for the search. */ + id?: <<Id>> + pass:[/**] @property is_partial If true, the response does not contain complete search results. */ + is_partial?: boolean + pass:[/**] @property is_running If true, the search request is still executing. */ + is_running?: boolean + pass:[/**] @property took Milliseconds it took Elasticsearch to execute the request. */ + took?: <<DurationValue>><<<UnitMillis>>> + pass:[/**] @property timed_out If true, the request timed out before completion. */ + timed_out?: boolean + pass:[/**] @property hits Contains matching events and sequences. Also contains related metadata. */ + hits: <<EqlEqlHits>><TEvent> +} +---- + + +[discrete] +[[EqlHitsEvent]] +=== EqlHitsEvent + +[source,ts,subs=+macros] +---- +interface EqlHitsEvent<TEvent = unknown> { + pass:[/**] @property _index <<Name>> of the index containing the event. */ + _index: <<IndexName>> + pass:[/**] @property _id Unique identifier for the event. This ID is only unique within the index. */ + _id: <<Id>> + pass:[/**] @property _source Original JSON body passed for the event at index time. */ + _source: TEvent + pass:[/**] @property missing Set to `true` for events in a timespan-constrained sequence that do not meet a given condition. */ + missing?: boolean + fields?: Record<<<Field>>, any[]> +} +---- + + +[discrete] +[[EqlHitsSequence]] +=== EqlHitsSequence + +[source,ts,subs=+macros] +---- +interface EqlHitsSequence<TEvent = unknown> { + pass:[/**] @property events Contains events matching the query. Each object represents a matching event. */ + events: <<EqlHitsEvent>><TEvent>[] + pass:[/**] @property join_keys Shared field values used to constrain matches in the sequence. These are defined using the by keyword in the EQL query syntax. */ + join_keys?: any[] +} +---- + + diff --git a/docs/reference/shared-types/esql-types.asciidoc b/docs/reference/shared-types/esql-types.asciidoc new file mode 100644 index 000000000..73424dbfc --- /dev/null +++ b/docs/reference/shared-types/esql-types.asciidoc @@ -0,0 +1,92 @@ +[[reference-shared-types-esql-types]] + +=== `Esql` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[EsqlTableValuesContainer]] +=== EsqlTableValuesContainer + +[source,ts,subs=+macros] +---- +interface EsqlTableValuesContainer { + <<integer>>?: <<EsqlTableValuesIntegerValue>>[] + keyword?: <<EsqlTableValuesKeywordValue>>[] + <<long>>?: <<EsqlTableValuesLongValue>>[] + <<double>>?: <<EsqlTableValuesLongDouble>>[] +} +---- + + +[discrete] +[[EsqlTableValuesIntegerValue]] +=== EsqlTableValuesIntegerValue + +[source,ts,subs=+macros] +---- +type EsqlTableValuesIntegerValue = <<integer>> | <<integer>>[] +---- + + +[discrete] +[[EsqlTableValuesKeywordValue]] +=== EsqlTableValuesKeywordValue + +[source,ts,subs=+macros] +---- +type EsqlTableValuesKeywordValue = string | string[] +---- + + +[discrete] +[[EsqlTableValuesLongDouble]] +=== EsqlTableValuesLongDouble + +[source,ts,subs=+macros] +---- +type EsqlTableValuesLongDouble = <<double>> | <<double>>[] +---- + + +[discrete] +[[EsqlTableValuesLongValue]] +=== EsqlTableValuesLongValue + +[source,ts,subs=+macros] +---- +type EsqlTableValuesLongValue = <<long>> | <<long>>[] +---- + + diff --git a/docs/reference/shared-types/features-types.asciidoc b/docs/reference/shared-types/features-types.asciidoc new file mode 100644 index 000000000..b5467e746 --- /dev/null +++ b/docs/reference/shared-types/features-types.asciidoc @@ -0,0 +1,50 @@ +[[reference-shared-types-features-types]] + +=== `Features` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[FeaturesFeature]] +=== FeaturesFeature + +[source,ts,subs=+macros] +---- +interface FeaturesFeature { + name: string + description: string +} +---- + + diff --git a/docs/reference/shared-types/fleet-types.asciidoc b/docs/reference/shared-types/fleet-types.asciidoc new file mode 100644 index 000000000..f5dfa0297 --- /dev/null +++ b/docs/reference/shared-types/fleet-types.asciidoc @@ -0,0 +1,47 @@ +[[reference-shared-types-fleet-types]] + +=== `Fleet` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[FleetCheckpoint]] +=== FleetCheckpoint + +[source,ts,subs=+macros] +---- +type FleetCheckpoint = <<long>> +---- + + diff --git a/docs/reference/shared-types/global-bulk.asciidoc b/docs/reference/shared-types/global-bulk.asciidoc new file mode 100644 index 000000000..af3ae3adc --- /dev/null +++ b/docs/reference/shared-types/global-bulk.asciidoc @@ -0,0 +1,241 @@ +[[reference-shared-types-global-bulk]] + +=== `Bulk` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[BulkCreateOperation]] +==== BulkCreateOperation + +[source,ts,subs=+macros] +---- +interface BulkCreateOperation extends <<BulkWriteOperation>> {} +---- + + +[discrete] +[[BulkDeleteOperation]] +==== BulkDeleteOperation + +[source,ts,subs=+macros] +---- +interface BulkDeleteOperation extends <<BulkOperationBase>> {} +---- + + +[discrete] +[[BulkIndexOperation]] +==== BulkIndexOperation + +[source,ts,subs=+macros] +---- +interface BulkIndexOperation extends <<BulkWriteOperation>> {} +---- + + +[discrete] +[[BulkOperationBase]] +==== BulkOperationBase + +[source,ts,subs=+macros] +---- +interface BulkOperationBase { + pass:[/**] @property _id The document ID. */ + _id?: <<Id>> + pass:[/**] @property _index <<Name>> of the index or index alias to perform the action on. */ + _index?: <<IndexName>> + pass:[/**] @property routing Custom value used to route operations to a specific shard. */ + routing?: <<Routing>> + if_primary_term?: <<long>> + if_seq_no?: <<SequenceNumber>> + version?: <<VersionNumber>> + version_type?: <<VersionType>> +} +---- + + +[discrete] +[[BulkOperationContainer]] +==== BulkOperationContainer + +[source,ts,subs=+macros] +---- +interface BulkOperationContainer { + pass:[/**] @property index Indexes the specified document. If the document exists, replaces the document and increments the version. The following line must contain the source data to be indexed. */ + index?: <<BulkIndexOperation>> + pass:[/**] @property create Indexes the specified document if it does not already exist. The following line must contain the source data to be indexed. */ + create?: <<BulkCreateOperation>> + pass:[/**] @property update Performs a partial document update. The following line must contain the partial document and update options. */ + update?: <<BulkUpdateOperation>> + pass:[/**] @property delete Removes the specified document from the index. */ + delete?: <<BulkDeleteOperation>> +} +---- + + +[discrete] +[[BulkOperationType]] +==== BulkOperationType + +[source,ts,subs=+macros] +---- +type BulkOperationType = 'index' | 'create' | 'update' | 'delete' +---- + + +[discrete] +[[BulkRequest]] +==== BulkRequest + +[source,ts,subs=+macros] +---- +interface BulkRequest<TDocument = unknown, TPartialDocument = unknown> extends <<RequestBase>> { + index?: <<IndexName>> + pipeline?: string + refresh?: <<Refresh>> + routing?: <<Routing>> + _source?: <<SearchSourceConfigParam>> + _source_excludes?: <<Fields>> + _source_includes?: <<Fields>> + timeout?: <<Duration>> + wait_for_active_shards?: <<WaitForActiveShards>> + require_alias?: boolean + operations?: (<<BulkOperationContainer>> | <<BulkUpdateAction>><TDocument, TPartialDocument> | TDocument)[] +} +---- + + +[discrete] +[[BulkResponse]] +==== BulkResponse + +[source,ts,subs=+macros] +---- +interface BulkResponse { + errors: boolean + items: Partial<Record<<<BulkOperationType>>, <<BulkResponseItem>>>>[] + took: <<long>> + ingest_took?: <<long>> +} +---- + + +[discrete] +[[BulkResponseItem]] +==== BulkResponseItem + +[source,ts,subs=+macros] +---- +interface BulkResponseItem { + pass:[/**] @property _id The document ID associated with the operation. */ + _id?: string | null + pass:[/**] @property _index <<Name>> of the index associated with the operation. If the operation targeted a data stream, this is the backing index into which the document was written. */ + _index: string + pass:[/**] @property status HTTP status code returned for the operation. */ + status: <<integer>> + pass:[/**] @property error Contains additional information about the failed operation. The parameter is only returned for failed operations. */ + error?: <<ErrorCause>> + pass:[/**] @property _primary_term The primary term assigned to the document for the operation. */ + _primary_term?: <<long>> + pass:[/**] @property result <<Result>> of the operation. Successful values are `created`, `deleted`, and `updated`. */ + result?: string + pass:[/**] @property _seq_no The sequence number assigned to the document for the operation. Sequence numbers are used to ensure an older version of a document doesn’t overwrite a newer version. */ + _seq_no?: <<SequenceNumber>> + pass:[/**] @property _shards Contains shard information for the operation. */ + _shards?: <<ShardStatistics>> + pass:[/**] @property _version The document version associated with the operation. The document version is incremented each time the document is updated. */ + _version?: <<VersionNumber>> + forced_refresh?: boolean + get?: <<InlineGet>><Record<string, any>> +} +---- + + +[discrete] +[[BulkUpdateAction]] +==== BulkUpdateAction + +[source,ts,subs=+macros] +---- +interface BulkUpdateAction<TDocument = unknown, TPartialDocument = unknown> { + pass:[/**] @property detect_noop Set to false to disable setting 'result' in the response to 'noop' if no change to the document occurred. */ + detect_noop?: boolean + pass:[/**] @property doc A partial update to an existing document. */ + doc?: TPartialDocument + pass:[/**] @property doc_as_upsert Set to true to use the contents of 'doc' as the value of 'upsert' */ + doc_as_upsert?: boolean + pass:[/**] @property script <<Script>> to execute to update the document. */ + script?: <<Script>> | string + pass:[/**] @property scripted_upsert Set to true to execute the script whether or not the document exists. */ + scripted_upsert?: boolean + pass:[/**] @property _source Set to false to disable source retrieval. You can also specify a comma-separated list of the fields you want to retrieve. */ + _source?: <<SearchSourceConfig>> + pass:[/**] @property upsert If the document does not already exist, the contents of 'upsert' are inserted as a new document. If the document exists, the 'script' is executed. */ + upsert?: TDocument +} +---- + + +[discrete] +[[BulkUpdateOperation]] +==== BulkUpdateOperation + +[source,ts,subs=+macros] +---- +interface BulkUpdateOperation extends <<BulkOperationBase>> { + pass:[/**] @property require_alias If `true`, the request’s actions must target an index alias. */ + require_alias?: boolean + retry_on_conflict?: <<integer>> +} +---- + + +[discrete] +[[BulkWriteOperation]] +==== BulkWriteOperation + +[source,ts,subs=+macros] +---- +interface BulkWriteOperation extends <<BulkOperationBase>> { + pass:[/**] @property dynamic_templates A map from the full name of fields to the name of dynamic templates. Defaults to an empty map. If a name matches a dynamic template, then that template will be applied regardless of other match predicates defined in the template. If a field is already defined in the mapping, then this parameter won’t be used. */ + dynamic_templates?: Record<string, string> + pass:[/**] @property pipeline ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. */ + pipeline?: string + pass:[/**] @property require_alias If `true`, the request’s actions must target an index alias. */ + require_alias?: boolean +} +---- + + diff --git a/docs/reference/shared-types/global-clear-scroll.asciidoc b/docs/reference/shared-types/global-clear-scroll.asciidoc new file mode 100644 index 000000000..f5bd2b881 --- /dev/null +++ b/docs/reference/shared-types/global-clear-scroll.asciidoc @@ -0,0 +1,62 @@ +[[reference-shared-types-global-clear-scroll]] + +=== `ClearScroll` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[ClearScrollRequest]] +==== ClearScrollRequest + +[source,ts,subs=+macros] +---- +interface ClearScrollRequest extends <<RequestBase>> { + scroll_id?: <<ScrollIds>> +} +---- + + +[discrete] +[[ClearScrollResponse]] +==== ClearScrollResponse + +[source,ts,subs=+macros] +---- +interface ClearScrollResponse { + succeeded: boolean + num_freed: <<integer>> +} +---- + + diff --git a/docs/reference/shared-types/global-close-point-in-time.asciidoc b/docs/reference/shared-types/global-close-point-in-time.asciidoc new file mode 100644 index 000000000..7fc58a3bf --- /dev/null +++ b/docs/reference/shared-types/global-close-point-in-time.asciidoc @@ -0,0 +1,62 @@ +[[reference-shared-types-global-close-point-in-time]] + +=== `ClosePointInTime` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[ClosePointInTimeRequest]] +==== ClosePointInTimeRequest + +[source,ts,subs=+macros] +---- +interface ClosePointInTimeRequest extends <<RequestBase>> { + id: <<Id>> +} +---- + + +[discrete] +[[ClosePointInTimeResponse]] +==== ClosePointInTimeResponse + +[source,ts,subs=+macros] +---- +interface ClosePointInTimeResponse { + succeeded: boolean + num_freed: <<integer>> +} +---- + + diff --git a/docs/reference/shared-types/global-count.asciidoc b/docs/reference/shared-types/global-count.asciidoc new file mode 100644 index 000000000..9f5aa5bd3 --- /dev/null +++ b/docs/reference/shared-types/global-count.asciidoc @@ -0,0 +1,77 @@ +[[reference-shared-types-global-count]] + +=== `Count` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[CountRequest]] +==== CountRequest + +[source,ts,subs=+macros] +---- +interface CountRequest extends <<RequestBase>> { + index?: <<Indices>> + allow_no_indices?: boolean + analyzer?: string + analyze_wildcard?: boolean + default_operator?: <<QueryDslOperator>> + df?: string + expand_wildcards?: <<ExpandWildcards>> + ignore_throttled?: boolean + ignore_unavailable?: boolean + lenient?: boolean + min_score?: <<double>> + preference?: string + routing?: <<Routing>> + terminate_after?: <<long>> + q?: string + query?: <<QueryDslQueryContainer>> +} +---- + + +[discrete] +[[CountResponse]] +==== CountResponse + +[source,ts,subs=+macros] +---- +interface CountResponse { + count: <<long>> + _shards: <<ShardStatistics>> +} +---- + + diff --git a/docs/reference/shared-types/global-create.asciidoc b/docs/reference/shared-types/global-create.asciidoc new file mode 100644 index 000000000..40038c5b9 --- /dev/null +++ b/docs/reference/shared-types/global-create.asciidoc @@ -0,0 +1,68 @@ +[[reference-shared-types-global-create]] + +=== `Create` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[CreateRequest]] +==== CreateRequest + +[source,ts,subs=+macros] +---- +interface CreateRequest<TDocument = unknown> extends <<RequestBase>> { + id: <<Id>> + index: <<IndexName>> + pipeline?: string + refresh?: <<Refresh>> + routing?: <<Routing>> + timeout?: <<Duration>> + version?: <<VersionNumber>> + version_type?: <<VersionType>> + wait_for_active_shards?: <<WaitForActiveShards>> + document?: TDocument +} +---- + + +[discrete] +[[CreateResponse]] +==== CreateResponse + +[source,ts,subs=+macros] +---- +type CreateResponse = <<WriteResponseBase>> +---- + + diff --git a/docs/reference/shared-types/global-delete-by-query-rethrottle.asciidoc b/docs/reference/shared-types/global-delete-by-query-rethrottle.asciidoc new file mode 100644 index 000000000..8e44c76bd --- /dev/null +++ b/docs/reference/shared-types/global-delete-by-query-rethrottle.asciidoc @@ -0,0 +1,60 @@ +[[reference-shared-types-global-delete-by-query-rethrottle]] + +=== `DeleteByQueryRethrottle` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[DeleteByQueryRethrottleRequest]] +==== DeleteByQueryRethrottleRequest + +[source,ts,subs=+macros] +---- +interface DeleteByQueryRethrottleRequest extends <<RequestBase>> { + task_id: <<TaskId>> + requests_per_second?: <<float>> +} +---- + + +[discrete] +[[DeleteByQueryRethrottleResponse]] +==== DeleteByQueryRethrottleResponse + +[source,ts,subs=+macros] +---- +type DeleteByQueryRethrottleResponse = <<TasksTaskListResponseBase>> +---- + + diff --git a/docs/reference/shared-types/global-delete-by-query.asciidoc b/docs/reference/shared-types/global-delete-by-query.asciidoc new file mode 100644 index 000000000..8108aa4d5 --- /dev/null +++ b/docs/reference/shared-types/global-delete-by-query.asciidoc @@ -0,0 +1,107 @@ +[[reference-shared-types-global-delete-by-query]] + +=== `DeleteByQuery` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[DeleteByQueryRequest]] +==== DeleteByQueryRequest + +[source,ts,subs=+macros] +---- +interface DeleteByQueryRequest extends <<RequestBase>> { + index: <<Indices>> + allow_no_indices?: boolean + analyzer?: string + analyze_wildcard?: boolean + conflicts?: <<Conflicts>> + default_operator?: <<QueryDslOperator>> + df?: string + expand_wildcards?: <<ExpandWildcards>> + from?: <<long>> + ignore_unavailable?: boolean + lenient?: boolean + preference?: string + refresh?: boolean + request_cache?: boolean + requests_per_second?: <<float>> + routing?: <<Routing>> + q?: string + scroll?: <<Duration>> + scroll_size?: <<long>> + search_timeout?: <<Duration>> + search_type?: <<SearchType>> + slices?: <<Slices>> + sort?: string[] + stats?: string[] + terminate_after?: <<long>> + timeout?: <<Duration>> + version?: boolean + wait_for_active_shards?: <<WaitForActiveShards>> + wait_for_completion?: boolean + max_docs?: <<long>> + query?: <<QueryDslQueryContainer>> + slice?: <<SlicedScroll>> +} +---- + + +[discrete] +[[DeleteByQueryResponse]] +==== DeleteByQueryResponse + +[source,ts,subs=+macros] +---- +interface DeleteByQueryResponse { + batches?: <<long>> + deleted?: <<long>> + failures?: <<BulkIndexByScrollFailure>>[] + noops?: <<long>> + requests_per_second?: <<float>> + retries?: <<Retries>> + slice_id?: <<integer>> + task?: <<TaskId>> + throttled?: <<Duration>> + throttled_millis?: <<DurationValue>><<<UnitMillis>>> + throttled_until?: <<Duration>> + throttled_until_millis?: <<DurationValue>><<<UnitMillis>>> + timed_out?: boolean + took?: <<DurationValue>><<<UnitMillis>>> + total?: <<long>> + version_conflicts?: <<long>> +} +---- + + diff --git a/docs/reference/shared-types/global-delete-script.asciidoc b/docs/reference/shared-types/global-delete-script.asciidoc new file mode 100644 index 000000000..002a967ee --- /dev/null +++ b/docs/reference/shared-types/global-delete-script.asciidoc @@ -0,0 +1,61 @@ +[[reference-shared-types-global-delete-script]] + +=== `DeleteScript` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[DeleteScriptRequest]] +==== DeleteScriptRequest + +[source,ts,subs=+macros] +---- +interface DeleteScriptRequest extends <<RequestBase>> { + id: <<Id>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> +} +---- + + +[discrete] +[[DeleteScriptResponse]] +==== DeleteScriptResponse + +[source,ts,subs=+macros] +---- +type DeleteScriptResponse = <<AcknowledgedResponseBase>> +---- + + diff --git a/docs/reference/shared-types/global-delete.asciidoc b/docs/reference/shared-types/global-delete.asciidoc new file mode 100644 index 000000000..45cab105a --- /dev/null +++ b/docs/reference/shared-types/global-delete.asciidoc @@ -0,0 +1,68 @@ +[[reference-shared-types-global-delete]] + +=== `Delete` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[DeleteRequest]] +==== DeleteRequest + +[source,ts,subs=+macros] +---- +interface DeleteRequest extends <<RequestBase>> { + id: <<Id>> + index: <<IndexName>> + if_primary_term?: <<long>> + if_seq_no?: <<SequenceNumber>> + refresh?: <<Refresh>> + routing?: <<Routing>> + timeout?: <<Duration>> + version?: <<VersionNumber>> + version_type?: <<VersionType>> + wait_for_active_shards?: <<WaitForActiveShards>> +} +---- + + +[discrete] +[[DeleteResponse]] +==== DeleteResponse + +[source,ts,subs=+macros] +---- +type DeleteResponse = <<WriteResponseBase>> +---- + + diff --git a/docs/reference/shared-types/global-exists-source.asciidoc b/docs/reference/shared-types/global-exists-source.asciidoc new file mode 100644 index 000000000..273d94bf4 --- /dev/null +++ b/docs/reference/shared-types/global-exists-source.asciidoc @@ -0,0 +1,69 @@ +[[reference-shared-types-global-exists-source]] + +=== `ExistsSource` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[ExistsSourceRequest]] +==== ExistsSourceRequest + +[source,ts,subs=+macros] +---- +interface ExistsSourceRequest extends <<RequestBase>> { + id: <<Id>> + index: <<IndexName>> + preference?: string + realtime?: boolean + refresh?: boolean + routing?: <<Routing>> + _source?: <<SearchSourceConfigParam>> + _source_excludes?: <<Fields>> + _source_includes?: <<Fields>> + version?: <<VersionNumber>> + version_type?: <<VersionType>> +} +---- + + +[discrete] +[[ExistsSourceResponse]] +==== ExistsSourceResponse + +[source,ts,subs=+macros] +---- +type ExistsSourceResponse = boolean +---- + + diff --git a/docs/reference/shared-types/global-exists.asciidoc b/docs/reference/shared-types/global-exists.asciidoc new file mode 100644 index 000000000..6b2e727cb --- /dev/null +++ b/docs/reference/shared-types/global-exists.asciidoc @@ -0,0 +1,70 @@ +[[reference-shared-types-global-exists]] + +=== `Exists` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[ExistsRequest]] +==== ExistsRequest + +[source,ts,subs=+macros] +---- +interface ExistsRequest extends <<RequestBase>> { + id: <<Id>> + index: <<IndexName>> + preference?: string + realtime?: boolean + refresh?: boolean + routing?: <<Routing>> + _source?: <<SearchSourceConfigParam>> + _source_excludes?: <<Fields>> + _source_includes?: <<Fields>> + stored_fields?: <<Fields>> + version?: <<VersionNumber>> + version_type?: <<VersionType>> +} +---- + + +[discrete] +[[ExistsResponse]] +==== ExistsResponse + +[source,ts,subs=+macros] +---- +type ExistsResponse = boolean +---- + + diff --git a/docs/reference/shared-types/global-explain.asciidoc b/docs/reference/shared-types/global-explain.asciidoc new file mode 100644 index 000000000..db491a81c --- /dev/null +++ b/docs/reference/shared-types/global-explain.asciidoc @@ -0,0 +1,107 @@ +[[reference-shared-types-global-explain]] + +=== `Explain` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[ExplainExplanation]] +==== ExplainExplanation + +[source,ts,subs=+macros] +---- +interface ExplainExplanation { + description: string + details: <<ExplainExplanationDetail>>[] + value: <<float>> +} +---- + + +[discrete] +[[ExplainExplanationDetail]] +==== ExplainExplanationDetail + +[source,ts,subs=+macros] +---- +interface ExplainExplanationDetail { + description: string + details?: <<ExplainExplanationDetail>>[] + value: <<float>> +} +---- + + +[discrete] +[[ExplainRequest]] +==== ExplainRequest + +[source,ts,subs=+macros] +---- +interface ExplainRequest extends <<RequestBase>> { + id: <<Id>> + index: <<IndexName>> + analyzer?: string + analyze_wildcard?: boolean + default_operator?: <<QueryDslOperator>> + df?: string + lenient?: boolean + preference?: string + routing?: <<Routing>> + _source?: <<SearchSourceConfigParam>> + _source_excludes?: <<Fields>> + _source_includes?: <<Fields>> + stored_fields?: <<Fields>> + q?: string + query?: <<QueryDslQueryContainer>> +} +---- + + +[discrete] +[[ExplainResponse]] +==== ExplainResponse + +[source,ts,subs=+macros] +---- +interface ExplainResponse<TDocument = unknown> { + _index: <<IndexName>> + _id: <<Id>> + matched: boolean + explanation?: <<ExplainExplanationDetail>> + get?: <<InlineGet>><TDocument> +} +---- + + diff --git a/docs/reference/shared-types/global-field-caps.asciidoc b/docs/reference/shared-types/global-field-caps.asciidoc new file mode 100644 index 000000000..953b36845 --- /dev/null +++ b/docs/reference/shared-types/global-field-caps.asciidoc @@ -0,0 +1,106 @@ +[[reference-shared-types-global-field-caps]] + +=== `FieldCaps` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[FieldCapsFieldCapability]] +==== FieldCapsFieldCapability + +[source,ts,subs=+macros] +---- +interface FieldCapsFieldCapability { + pass:[/**] @property aggregatable Whether this field can be aggregated on all indices. */ + aggregatable: boolean + pass:[/**] @property indices The list of indices where this field has the same type family, or null if all indices have the same type family for the field. */ + indices?: <<Indices>> + pass:[/**] @property meta Merged metadata across all indices as a map of string keys to arrays of values. A value length of 1 indicates that all indices had the same value for this key, while a length of 2 or more indicates that not all indices had the same value for this key. */ + meta?: <<Metadata>> + pass:[/**] @property non_aggregatable_indices The list of indices where this field is not aggregatable, or null if all indices have the same definition for the field. */ + non_aggregatable_indices?: <<Indices>> + pass:[/**] @property non_searchable_indices The list of indices where this field is not searchable, or null if all indices have the same definition for the field. */ + non_searchable_indices?: <<Indices>> + pass:[/**] @property searchable Whether this field is indexed for search on all indices. */ + searchable: boolean + type: string + pass:[/**] @property metadata_field Whether this field is registered as a metadata field. */ + metadata_field?: boolean + pass:[/**] @property time_series_dimension Whether this field is used as a time series dimension. */ + time_series_dimension?: boolean + pass:[/**] @property time_series_metric Contains metric type if this fields is used as a time series metrics, absent if the field is not used as metric. */ + time_series_metric?: <<MappingTimeSeriesMetricType>> + pass:[/**] @property non_dimension_indices If this list is present in response then some indices have the field marked as a dimension and other indices, the ones in this list, do not. */ + non_dimension_indices?: <<IndexName>>[] + pass:[/**] @property metric_conflicts_indices The list of indices where this field is present if these indices don’t have the same `time_series_metric` value for this field. */ + metric_conflicts_indices?: <<IndexName>>[] +} +---- + + +[discrete] +[[FieldCapsRequest]] +==== FieldCapsRequest + +[source,ts,subs=+macros] +---- +interface FieldCapsRequest extends <<RequestBase>> { + index?: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_unavailable?: boolean + include_unmapped?: boolean + filters?: string + types?: string[] + include_empty_fields?: boolean + fields?: <<Fields>> + index_filter?: <<QueryDslQueryContainer>> + runtime_mappings?: <<MappingRuntimeFields>> +} +---- + + +[discrete] +[[FieldCapsResponse]] +==== FieldCapsResponse + +[source,ts,subs=+macros] +---- +interface FieldCapsResponse { + indices: <<Indices>> + fields: Record<<<Field>>, Record<string, <<FieldCapsFieldCapability>>>> +} +---- + + diff --git a/docs/reference/shared-types/global-get-script-context.asciidoc b/docs/reference/shared-types/global-get-script-context.asciidoc new file mode 100644 index 000000000..e380229d6 --- /dev/null +++ b/docs/reference/shared-types/global-get-script-context.asciidoc @@ -0,0 +1,99 @@ +[[reference-shared-types-global-get-script-context]] + +=== `GetScriptContext` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[GetScriptContextContext]] +==== GetScriptContextContext + +[source,ts,subs=+macros] +---- +interface GetScriptContextContext { + methods: <<GetScriptContextContextMethod>>[] + name: <<Name>> +} +---- + + +[discrete] +[[GetScriptContextContextMethod]] +==== GetScriptContextContextMethod + +[source,ts,subs=+macros] +---- +interface GetScriptContextContextMethod { + name: <<Name>> + return_type: string + params: <<GetScriptContextContextMethodParam>>[] +} +---- + + +[discrete] +[[GetScriptContextContextMethodParam]] +==== GetScriptContextContextMethodParam + +[source,ts,subs=+macros] +---- +interface GetScriptContextContextMethodParam { + name: <<Name>> + type: string +} +---- + + +[discrete] +[[GetScriptContextRequest]] +==== GetScriptContextRequest + +[source,ts,subs=+macros] +---- +interface GetScriptContextRequest extends <<RequestBase>> {} +---- + + +[discrete] +[[GetScriptContextResponse]] +==== GetScriptContextResponse + +[source,ts,subs=+macros] +---- +interface GetScriptContextResponse { + contexts: <<GetScriptContextContext>>[] +} +---- + + diff --git a/docs/reference/shared-types/global-get-script-languages.asciidoc b/docs/reference/shared-types/global-get-script-languages.asciidoc new file mode 100644 index 000000000..2ad5dedb1 --- /dev/null +++ b/docs/reference/shared-types/global-get-script-languages.asciidoc @@ -0,0 +1,73 @@ +[[reference-shared-types-global-get-script-languages]] + +=== `GetScriptLanguages` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[GetScriptLanguagesLanguageContext]] +==== GetScriptLanguagesLanguageContext + +[source,ts,subs=+macros] +---- +interface GetScriptLanguagesLanguageContext { + contexts: string[] + language: <<ScriptLanguage>> +} +---- + + +[discrete] +[[GetScriptLanguagesRequest]] +==== GetScriptLanguagesRequest + +[source,ts,subs=+macros] +---- +interface GetScriptLanguagesRequest extends <<RequestBase>> {} +---- + + +[discrete] +[[GetScriptLanguagesResponse]] +==== GetScriptLanguagesResponse + +[source,ts,subs=+macros] +---- +interface GetScriptLanguagesResponse { + language_contexts: <<GetScriptLanguagesLanguageContext>>[] + types_allowed: string[] +} +---- + + diff --git a/docs/reference/shared-types/global-get-script.asciidoc b/docs/reference/shared-types/global-get-script.asciidoc new file mode 100644 index 000000000..102d2547b --- /dev/null +++ b/docs/reference/shared-types/global-get-script.asciidoc @@ -0,0 +1,64 @@ +[[reference-shared-types-global-get-script]] + +=== `GetScript` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[GetScriptRequest]] +==== GetScriptRequest + +[source,ts,subs=+macros] +---- +interface GetScriptRequest extends <<RequestBase>> { + id: <<Id>> + master_timeout?: <<Duration>> +} +---- + + +[discrete] +[[GetScriptResponse]] +==== GetScriptResponse + +[source,ts,subs=+macros] +---- +interface GetScriptResponse { + _id: <<Id>> + found: boolean + script?: <<StoredScript>> +} +---- + + diff --git a/docs/reference/shared-types/global-get-source.asciidoc b/docs/reference/shared-types/global-get-source.asciidoc new file mode 100644 index 000000000..fe71ef3aa --- /dev/null +++ b/docs/reference/shared-types/global-get-source.asciidoc @@ -0,0 +1,70 @@ +[[reference-shared-types-global-get-source]] + +=== `GetSource` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[GetSourceRequest]] +==== GetSourceRequest + +[source,ts,subs=+macros] +---- +interface GetSourceRequest extends <<RequestBase>> { + id: <<Id>> + index: <<IndexName>> + preference?: string + realtime?: boolean + refresh?: boolean + routing?: <<Routing>> + _source?: <<SearchSourceConfigParam>> + _source_excludes?: <<Fields>> + _source_includes?: <<Fields>> + stored_fields?: <<Fields>> + version?: <<VersionNumber>> + version_type?: <<VersionType>> +} +---- + + +[discrete] +[[GetSourceResponse]] +==== GetSourceResponse + +[source,ts,subs=+macros] +---- +type GetSourceResponse<TDocument = unknown> = TDocument +---- + + diff --git a/docs/reference/shared-types/global-get.asciidoc b/docs/reference/shared-types/global-get.asciidoc new file mode 100644 index 000000000..7030b3475 --- /dev/null +++ b/docs/reference/shared-types/global-get.asciidoc @@ -0,0 +1,92 @@ +[[reference-shared-types-global-get]] + +=== `Get` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[GetGetResult]] +==== GetGetResult + +[source,ts,subs=+macros] +---- +interface GetGetResult<TDocument = unknown> { + _index: <<IndexName>> + fields?: Record<string, any> + _ignored?: string[] + found: boolean + _id: <<Id>> + _primary_term?: <<long>> + _routing?: string + _seq_no?: <<SequenceNumber>> + _source?: TDocument + _version?: <<VersionNumber>> +} +---- + + +[discrete] +[[GetRequest]] +==== GetRequest + +[source,ts,subs=+macros] +---- +interface GetRequest extends <<RequestBase>> { + id: <<Id>> + index: <<IndexName>> + force_synthetic_source?: boolean + preference?: string + realtime?: boolean + refresh?: boolean + routing?: <<Routing>> + _source?: <<SearchSourceConfigParam>> + _source_excludes?: <<Fields>> + _source_includes?: <<Fields>> + stored_fields?: <<Fields>> + version?: <<VersionNumber>> + version_type?: <<VersionType>> +} +---- + + +[discrete] +[[GetResponse]] +==== GetResponse + +[source,ts,subs=+macros] +---- +type GetResponse<TDocument = unknown> = <<GetGetResult>><TDocument> +---- + + diff --git a/docs/reference/shared-types/global-health-report.asciidoc b/docs/reference/shared-types/global-health-report.asciidoc new file mode 100644 index 000000000..fff8f47fd --- /dev/null +++ b/docs/reference/shared-types/global-health-report.asciidoc @@ -0,0 +1,490 @@ +[[reference-shared-types-global-health-report]] + +=== `HealthReport` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[HealthReportBaseIndicator]] +==== HealthReportBaseIndicator + +[source,ts,subs=+macros] +---- +interface HealthReportBaseIndicator { + status: <<HealthReportIndicatorHealthStatus>> + symptom: string + impacts?: <<HealthReportImpact>>[] + diagnosis?: <<HealthReportDiagnosis>>[] +} +---- + + +[discrete] +[[HealthReportDataStreamLifecycleDetails]] +==== HealthReportDataStreamLifecycleDetails + +[source,ts,subs=+macros] +---- +interface HealthReportDataStreamLifecycleDetails { + stagnating_backing_indices_count: <<integer>> + total_backing_indices_in_error: <<integer>> + stagnating_backing_indices?: <<HealthReportStagnatingBackingIndices>>[] +} +---- + + +[discrete] +[[HealthReportDataStreamLifecycleIndicator]] +==== HealthReportDataStreamLifecycleIndicator + +[source,ts,subs=+macros] +---- +interface HealthReportDataStreamLifecycleIndicator extends <<HealthReportBaseIndicator>> { + details?: <<HealthReportDataStreamLifecycleDetails>> +} +---- + + +[discrete] +[[HealthReportDiagnosis]] +==== HealthReportDiagnosis + +[source,ts,subs=+macros] +---- +interface HealthReportDiagnosis { + id: string + action: string + affected_resources: <<HealthReportDiagnosisAffectedResources>> + cause: string + help_url: string +} +---- + + +[discrete] +[[HealthReportDiagnosisAffectedResources]] +==== HealthReportDiagnosisAffectedResources + +[source,ts,subs=+macros] +---- +interface HealthReportDiagnosisAffectedResources { + indices?: <<Indices>> + nodes?: <<HealthReportIndicatorNode>>[] + slm_policies?: string[] + feature_states?: string[] + snapshot_repositories?: string[] +} +---- + + +[discrete] +[[HealthReportDiskIndicator]] +==== HealthReportDiskIndicator + +[source,ts,subs=+macros] +---- +interface HealthReportDiskIndicator extends <<HealthReportBaseIndicator>> { + details?: <<HealthReportDiskIndicatorDetails>> +} +---- + + +[discrete] +[[HealthReportDiskIndicatorDetails]] +==== HealthReportDiskIndicatorDetails + +[source,ts,subs=+macros] +---- +interface HealthReportDiskIndicatorDetails { + indices_with_readonly_block: <<long>> + nodes_with_enough_disk_space: <<long>> + nodes_over_high_watermark: <<long>> + nodes_over_flood_stage_watermark: <<long>> + nodes_with_unknown_disk_status: <<long>> +} +---- + + +[discrete] +[[HealthReportFileSettingsIndicator]] +==== HealthReportFileSettingsIndicator + +[source,ts,subs=+macros] +---- +interface HealthReportFileSettingsIndicator extends <<HealthReportBaseIndicator>> { + details?: <<HealthReportFileSettingsIndicatorDetails>> +} +---- + + +[discrete] +[[HealthReportFileSettingsIndicatorDetails]] +==== HealthReportFileSettingsIndicatorDetails + +[source,ts,subs=+macros] +---- +interface HealthReportFileSettingsIndicatorDetails { + failure_streak: <<long>> + most_recent_failure: string +} +---- + + +[discrete] +[[HealthReportIlmIndicator]] +==== HealthReportIlmIndicator + +[source,ts,subs=+macros] +---- +interface HealthReportIlmIndicator extends <<HealthReportBaseIndicator>> { + details?: <<HealthReportIlmIndicatorDetails>> +} +---- + + +[discrete] +[[HealthReportIlmIndicatorDetails]] +==== HealthReportIlmIndicatorDetails + +[source,ts,subs=+macros] +---- +interface HealthReportIlmIndicatorDetails { + ilm_status: <<LifecycleOperationMode>> + policies: <<long>> + stagnating_indices: <<integer>> +} +---- + + +[discrete] +[[HealthReportImpact]] +==== HealthReportImpact + +[source,ts,subs=+macros] +---- +interface HealthReportImpact { + description: string + id: string + impact_areas: <<HealthReportImpactArea>>[] + severity: <<integer>> +} +---- + + +[discrete] +[[HealthReportImpactArea]] +==== HealthReportImpactArea + +[source,ts,subs=+macros] +---- +type HealthReportImpactArea = 'search' | 'ingest' | 'backup' | 'deployment_management' +---- + + +[discrete] +[[HealthReportIndicatorHealthStatus]] +==== HealthReportIndicatorHealthStatus + +[source,ts,subs=+macros] +---- +type HealthReportIndicatorHealthStatus = 'green' | 'yellow' | 'red' | 'unknown' +---- + + +[discrete] +[[HealthReportIndicatorNode]] +==== HealthReportIndicatorNode + +[source,ts,subs=+macros] +---- +interface HealthReportIndicatorNode { + name: string | null + node_id: string | null +} +---- + + +[discrete] +[[HealthReportIndicators]] +==== HealthReportIndicators + +[source,ts,subs=+macros] +---- +interface HealthReportIndicators { + master_is_stable?: <<HealthReportMasterIsStableIndicator>> + shards_availability?: <<HealthReportShardsAvailabilityIndicator>> + disk?: <<HealthReportDiskIndicator>> + repository_integrity?: <<HealthReportRepositoryIntegrityIndicator>> + data_stream_lifecycle?: <<HealthReportDataStreamLifecycleIndicator>> + ilm?: <<HealthReportIlmIndicator>> + slm?: <<HealthReportSlmIndicator>> + shards_capacity?: <<HealthReportShardsCapacityIndicator>> + file_settings?: <<HealthReportFileSettingsIndicator>> +} +---- + + +[discrete] +[[HealthReportMasterIsStableIndicator]] +==== HealthReportMasterIsStableIndicator + +[source,ts,subs=+macros] +---- +interface HealthReportMasterIsStableIndicator extends <<HealthReportBaseIndicator>> { + details?: <<HealthReportMasterIsStableIndicatorDetails>> +} +---- + + +[discrete] +[[HealthReportMasterIsStableIndicatorClusterFormationNode]] +==== HealthReportMasterIsStableIndicatorClusterFormationNode + +[source,ts,subs=+macros] +---- +interface HealthReportMasterIsStableIndicatorClusterFormationNode { + name?: string + node_id: string + cluster_formation_message: string +} +---- + + +[discrete] +[[HealthReportMasterIsStableIndicatorDetails]] +==== HealthReportMasterIsStableIndicatorDetails + +[source,ts,subs=+macros] +---- +interface HealthReportMasterIsStableIndicatorDetails { + current_master: <<HealthReportIndicatorNode>> + recent_masters: <<HealthReportIndicatorNode>>[] + exception_fetching_history?: <<HealthReportMasterIsStableIndicatorExceptionFetchingHistory>> + cluster_formation?: <<HealthReportMasterIsStableIndicatorClusterFormationNode>>[] +} +---- + + +[discrete] +[[HealthReportMasterIsStableIndicatorExceptionFetchingHistory]] +==== HealthReportMasterIsStableIndicatorExceptionFetchingHistory + +[source,ts,subs=+macros] +---- +interface HealthReportMasterIsStableIndicatorExceptionFetchingHistory { + message: string + stack_trace: string +} +---- + + +[discrete] +[[HealthReportRepositoryIntegrityIndicator]] +==== HealthReportRepositoryIntegrityIndicator + +[source,ts,subs=+macros] +---- +interface HealthReportRepositoryIntegrityIndicator extends <<HealthReportBaseIndicator>> { + details?: <<HealthReportRepositoryIntegrityIndicatorDetails>> +} +---- + + +[discrete] +[[HealthReportRepositoryIntegrityIndicatorDetails]] +==== HealthReportRepositoryIntegrityIndicatorDetails + +[source,ts,subs=+macros] +---- +interface HealthReportRepositoryIntegrityIndicatorDetails { + total_repositories?: <<long>> + corrupted_repositories?: <<long>> + corrupted?: string[] +} +---- + + +[discrete] +[[HealthReportRequest]] +==== HealthReportRequest + +[source,ts,subs=+macros] +---- +interface HealthReportRequest extends <<RequestBase>> { + feature?: string | string[] + timeout?: <<Duration>> + verbose?: boolean + size?: <<integer>> +} +---- + + +[discrete] +[[HealthReportResponse]] +==== HealthReportResponse + +[source,ts,subs=+macros] +---- +interface HealthReportResponse { + cluster_name: string + indicators: <<HealthReportIndicators>> + status?: <<HealthReportIndicatorHealthStatus>> +} +---- + + +[discrete] +[[HealthReportShardsAvailabilityIndicator]] +==== HealthReportShardsAvailabilityIndicator + +[source,ts,subs=+macros] +---- +interface HealthReportShardsAvailabilityIndicator extends <<HealthReportBaseIndicator>> { + details?: <<HealthReportShardsAvailabilityIndicatorDetails>> +} +---- + + +[discrete] +[[HealthReportShardsAvailabilityIndicatorDetails]] +==== HealthReportShardsAvailabilityIndicatorDetails + +[source,ts,subs=+macros] +---- +interface HealthReportShardsAvailabilityIndicatorDetails { + creating_primaries: <<long>> + creating_replicas: <<long>> + initializing_primaries: <<long>> + initializing_replicas: <<long>> + restarting_primaries: <<long>> + restarting_replicas: <<long>> + started_primaries: <<long>> + started_replicas: <<long>> + unassigned_primaries: <<long>> + unassigned_replicas: <<long>> +} +---- + + +[discrete] +[[HealthReportShardsCapacityIndicator]] +==== HealthReportShardsCapacityIndicator + +[source,ts,subs=+macros] +---- +interface HealthReportShardsCapacityIndicator extends <<HealthReportBaseIndicator>> { + details?: <<HealthReportShardsCapacityIndicatorDetails>> +} +---- + + +[discrete] +[[HealthReportShardsCapacityIndicatorDetails]] +==== HealthReportShardsCapacityIndicatorDetails + +[source,ts,subs=+macros] +---- +interface HealthReportShardsCapacityIndicatorDetails { + data: <<HealthReportShardsCapacityIndicatorTierDetail>> + frozen: <<HealthReportShardsCapacityIndicatorTierDetail>> +} +---- + + +[discrete] +[[HealthReportShardsCapacityIndicatorTierDetail]] +==== HealthReportShardsCapacityIndicatorTierDetail + +[source,ts,subs=+macros] +---- +interface HealthReportShardsCapacityIndicatorTierDetail { + max_shards_in_cluster: <<integer>> + current_used_shards?: <<integer>> +} +---- + + +[discrete] +[[HealthReportSlmIndicator]] +==== HealthReportSlmIndicator + +[source,ts,subs=+macros] +---- +interface HealthReportSlmIndicator extends <<HealthReportBaseIndicator>> { + details?: <<HealthReportSlmIndicatorDetails>> +} +---- + + +[discrete] +[[HealthReportSlmIndicatorDetails]] +==== HealthReportSlmIndicatorDetails + +[source,ts,subs=+macros] +---- +interface HealthReportSlmIndicatorDetails { + slm_status: <<LifecycleOperationMode>> + policies: <<long>> + unhealthy_policies?: <<HealthReportSlmIndicatorUnhealthyPolicies>> +} +---- + + +[discrete] +[[HealthReportSlmIndicatorUnhealthyPolicies]] +==== HealthReportSlmIndicatorUnhealthyPolicies + +[source,ts,subs=+macros] +---- +interface HealthReportSlmIndicatorUnhealthyPolicies { + count: <<long>> + invocations_since_last_success?: Record<string, <<long>>> +} +---- + + +[discrete] +[[HealthReportStagnatingBackingIndices]] +==== HealthReportStagnatingBackingIndices + +[source,ts,subs=+macros] +---- +interface HealthReportStagnatingBackingIndices { + index_name: <<IndexName>> + first_occurrence_timestamp: <<long>> + retry_count: <<integer>> +} +---- + + diff --git a/docs/reference/shared-types/global-index.asciidoc b/docs/reference/shared-types/global-index.asciidoc new file mode 100644 index 000000000..cb3bb6315 --- /dev/null +++ b/docs/reference/shared-types/global-index.asciidoc @@ -0,0 +1,72 @@ +[[reference-shared-types-global-index]] + +=== `Index` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[IndexRequest]] +==== IndexRequest + +[source,ts,subs=+macros] +---- +interface IndexRequest<TDocument = unknown> extends <<RequestBase>> { + id?: <<Id>> + index: <<IndexName>> + if_primary_term?: <<long>> + if_seq_no?: <<SequenceNumber>> + op_type?: <<OpType>> + pipeline?: string + refresh?: <<Refresh>> + routing?: <<Routing>> + timeout?: <<Duration>> + version?: <<VersionNumber>> + version_type?: <<VersionType>> + wait_for_active_shards?: <<WaitForActiveShards>> + require_alias?: boolean + document?: TDocument +} +---- + + +[discrete] +[[IndexResponse]] +==== IndexResponse + +[source,ts,subs=+macros] +---- +type IndexResponse = <<WriteResponseBase>> +---- + + diff --git a/docs/reference/shared-types/global-info.asciidoc b/docs/reference/shared-types/global-info.asciidoc new file mode 100644 index 000000000..2825956e3 --- /dev/null +++ b/docs/reference/shared-types/global-info.asciidoc @@ -0,0 +1,63 @@ +[[reference-shared-types-global-info]] + +=== `Info` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[InfoRequest]] +==== InfoRequest + +[source,ts,subs=+macros] +---- +interface InfoRequest extends <<RequestBase>> {} +---- + + +[discrete] +[[InfoResponse]] +==== InfoResponse + +[source,ts,subs=+macros] +---- +interface InfoResponse { + cluster_name: <<Name>> + cluster_uuid: <<Uuid>> + name: <<Name>> + tagline: string + version: <<ElasticsearchVersionInfo>> +} +---- + + diff --git a/docs/reference/shared-types/global-knn-search-types.asciidoc b/docs/reference/shared-types/global-knn-search-types.asciidoc new file mode 100644 index 000000000..9ee10cb8b --- /dev/null +++ b/docs/reference/shared-types/global-knn-search-types.asciidoc @@ -0,0 +1,56 @@ +[[reference-shared-types-global-knn-search-types]] + +=== `KnnSearch` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[KnnSearchQuery]] +=== KnnSearchQuery + +[source,ts,subs=+macros] +---- +interface KnnSearchQuery { + pass:[/**] @property field The name of the vector field to search against */ + field: <<Field>> + pass:[/**] @property query_vector The query vector */ + query_vector: <<QueryVector>> + pass:[/**] @property k The final number of nearest neighbors to return as top hits */ + k: <<integer>> + pass:[/**] @property num_candidates The number of nearest neighbor candidates to consider per shard */ + num_candidates: <<integer>> +} +---- + + diff --git a/docs/reference/shared-types/global-knn-search.asciidoc b/docs/reference/shared-types/global-knn-search.asciidoc new file mode 100644 index 000000000..03a5af4a6 --- /dev/null +++ b/docs/reference/shared-types/global-knn-search.asciidoc @@ -0,0 +1,73 @@ +[[reference-shared-types-global-knn-search]] + +=== `KnnSearch` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[KnnSearchRequest]] +==== KnnSearchRequest + +[source,ts,subs=+macros] +---- +interface KnnSearchRequest extends <<RequestBase>> { + index: <<Indices>> + routing?: <<Routing>> + _source?: <<SearchSourceConfig>> + docvalue_fields?: (<<QueryDslFieldAndFormat>> | <<Field>>)[] + stored_fields?: <<Fields>> + fields?: <<Fields>> + filter?: <<QueryDslQueryContainer>> | <<QueryDslQueryContainer>>[] + knn: <<KnnSearchQuery>> +} +---- + + +[discrete] +[[KnnSearchResponse]] +==== KnnSearchResponse + +[source,ts,subs=+macros] +---- +interface KnnSearchResponse<TDocument = unknown> { + took: <<long>> + timed_out: boolean + _shards: <<ShardStatistics>> + hits: <<SearchHitsMetadata>><TDocument> + fields?: Record<string, any> + max_score?: <<double>> +} +---- + + diff --git a/docs/reference/shared-types/global-mget.asciidoc b/docs/reference/shared-types/global-mget.asciidoc new file mode 100644 index 000000000..d73270303 --- /dev/null +++ b/docs/reference/shared-types/global-mget.asciidoc @@ -0,0 +1,119 @@ +[[reference-shared-types-global-mget]] + +=== `Mget` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[MgetMultiGetError]] +==== MgetMultiGetError + +[source,ts,subs=+macros] +---- +interface MgetMultiGetError { + error: <<ErrorCause>> + _id: <<Id>> + _index: <<IndexName>> +} +---- + + +[discrete] +[[MgetOperation]] +==== MgetOperation + +[source,ts,subs=+macros] +---- +interface MgetOperation { + pass:[/**] @property _id The unique document ID. */ + _id: <<Id>> + pass:[/**] @property _index The index that contains the document. */ + _index?: <<IndexName>> + pass:[/**] @property routing The key for the primary shard the document resides on. Required if routing is used during indexing. */ + routing?: <<Routing>> + pass:[/**] @property _source If `false`, excludes all _source fields. */ + _source?: <<SearchSourceConfig>> + pass:[/**] @property stored_fields The stored fields you want to retrieve. */ + stored_fields?: <<Fields>> + version?: <<VersionNumber>> + version_type?: <<VersionType>> +} +---- + + +[discrete] +[[MgetRequest]] +==== MgetRequest + +[source,ts,subs=+macros] +---- +interface MgetRequest extends <<RequestBase>> { + index?: <<IndexName>> + force_synthetic_source?: boolean + preference?: string + realtime?: boolean + refresh?: boolean + routing?: <<Routing>> + _source?: <<SearchSourceConfigParam>> + _source_excludes?: <<Fields>> + _source_includes?: <<Fields>> + stored_fields?: <<Fields>> + docs?: <<MgetOperation>>[] + ids?: <<Ids>> +} +---- + + +[discrete] +[[MgetResponse]] +==== MgetResponse + +[source,ts,subs=+macros] +---- +interface MgetResponse<TDocument = unknown> { + docs: <<MgetResponseItem>><TDocument>[] +} +---- + + +[discrete] +[[MgetResponseItem]] +==== MgetResponseItem + +[source,ts,subs=+macros] +---- +type MgetResponseItem<TDocument = unknown> = <<GetGetResult>><TDocument> | <<MgetMultiGetError>> +---- + + diff --git a/docs/reference/shared-types/global-msearch-template.asciidoc b/docs/reference/shared-types/global-msearch-template.asciidoc new file mode 100644 index 000000000..54546a944 --- /dev/null +++ b/docs/reference/shared-types/global-msearch-template.asciidoc @@ -0,0 +1,96 @@ +[[reference-shared-types-global-msearch-template]] + +=== `MsearchTemplate` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[MsearchTemplateRequest]] +==== MsearchTemplateRequest + +[source,ts,subs=+macros] +---- +interface MsearchTemplateRequest extends <<RequestBase>> { + index?: <<Indices>> + ccs_minimize_roundtrips?: boolean + max_concurrent_searches?: <<long>> + search_type?: <<SearchType>> + rest_total_hits_as_int?: boolean + typed_keys?: boolean + search_templates?: <<MsearchTemplateRequestItem>>[] +} +---- + + +[discrete] +[[MsearchTemplateRequestItem]] +==== MsearchTemplateRequestItem + +[source,ts,subs=+macros] +---- +type MsearchTemplateRequestItem = <<MsearchMultisearchHeader>> | <<MsearchTemplateTemplateConfig>> +---- + + +[discrete] +[[MsearchTemplateResponse]] +==== MsearchTemplateResponse + +[source,ts,subs=+macros] +---- +type MsearchTemplateResponse<TDocument = unknown, TAggregations = Record<<<AggregateName>>, <<AggregationsAggregate>>>> = <<MsearchMultiSearchResult>><TDocument, TAggregations> +---- + + +[discrete] +[[MsearchTemplateTemplateConfig]] +==== MsearchTemplateTemplateConfig + +[source,ts,subs=+macros] +---- +interface MsearchTemplateTemplateConfig { + pass:[/**] @property explain If `true`, returns detailed information about score calculation as part of each hit. */ + explain?: boolean + pass:[/**] @property id ID of the search template to use. If no source is specified, this parameter is required. */ + id?: <<Id>> + pass:[/**] @property params Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. */ + params?: Record<string, any> + pass:[/**] @property profile If `true`, the query execution is profiled. */ + profile?: boolean + pass:[/**] @property source An inline search template. Supports the same parameters as the search API's request body. Also supports Mustache variables. If no id is specified, this parameter is required. */ + source?: string +} +---- + + diff --git a/docs/reference/shared-types/global-msearch.asciidoc b/docs/reference/shared-types/global-msearch.asciidoc new file mode 100644 index 000000000..50ab4bcda --- /dev/null +++ b/docs/reference/shared-types/global-msearch.asciidoc @@ -0,0 +1,205 @@ +[[reference-shared-types-global-msearch]] + +=== `Msearch` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[MsearchMultiSearchItem]] +==== MsearchMultiSearchItem + +[source,ts,subs=+macros] +---- +interface MsearchMultiSearchItem<TDocument = unknown> extends <<SearchResponseBody>><TDocument> { + status?: <<integer>> +} +---- + + +[discrete] +[[MsearchMultiSearchResult]] +==== MsearchMultiSearchResult + +[source,ts,subs=+macros] +---- +interface MsearchMultiSearchResult<TDocument = unknown, TAggregations = Record<<<AggregateName>>, <<AggregationsAggregate>>>> { + took: <<long>> + responses: <<MsearchResponseItem>><TDocument>[] +} +---- + + +[discrete] +[[MsearchMultisearchBody]] +==== MsearchMultisearchBody + +[source,ts,subs=+macros] +---- +interface MsearchMultisearchBody { + aggregations?: Record<string, <<AggregationsAggregationContainer>>> + aggs?: Record<string, <<AggregationsAggregationContainer>>> + collapse?: <<SearchFieldCollapse>> + pass:[/**] @property query Defines the search definition using the Query DSL. */ + query?: <<QueryDslQueryContainer>> + pass:[/**] @property explain If true, returns detailed information about score computation as part of a hit. */ + explain?: boolean + pass:[/**] @property ext Configuration of search extensions defined by Elasticsearch plugins. */ + ext?: Record<string, any> + pass:[/**] @property stored_fields List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. */ + stored_fields?: <<Fields>> + pass:[/**] @property docvalue_fields Array of wildcard (*) patterns. The request returns doc values for field names matching these patterns in the hits.fields property of the response. */ + docvalue_fields?: (<<QueryDslFieldAndFormat>> | <<Field>>)[] + pass:[/**] @property knn Defines the approximate kNN search to run. */ + knn?: <<KnnSearch>> | <<KnnSearch>>[] + pass:[/**] @property from Starting document offset. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. */ + from?: <<integer>> + highlight?: <<SearchHighlight>> + pass:[/**] @property indices_boost Boosts the _score of documents from specified indices. */ + indices_boost?: Record<<<IndexName>>, <<double>>>[] + pass:[/**] @property min_score Minimum _score for matching documents. Documents with a lower _score are not included in the search results. */ + min_score?: <<double>> + post_filter?: <<QueryDslQueryContainer>> + profile?: boolean + rescore?: <<SearchRescore>> | <<SearchRescore>>[] + pass:[/**] @property script_fields Retrieve a script evaluation (based on different fields) for each hit. */ + script_fields?: Record<string, <<ScriptField>>> + search_after?: <<SortResults>> + pass:[/**] @property size The number of hits to return. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. */ + size?: <<integer>> + sort?: <<Sort>> + pass:[/**] @property _source Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. */ + _source?: <<SearchSourceConfig>> + pass:[/**] @property fields Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response. */ + fields?: (<<QueryDslFieldAndFormat>> | <<Field>>)[] + pass:[/**] @property terminate_after Maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Defaults to 0, which does not terminate query execution early. */ + terminate_after?: <<long>> + pass:[/**] @property stats Stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. */ + stats?: string[] + pass:[/**] @property timeout Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. */ + timeout?: string + pass:[/**] @property track_scores If true, calculate and return document scores, even if the scores are not used for sorting. */ + track_scores?: boolean + pass:[/**] @property track_total_hits Number of hits matching the query to count accurately. If true, the exact number of hits is returned at the cost of some performance. If false, the response does not include the total number of hits matching the query. Defaults to 10,000 hits. */ + track_total_hits?: <<SearchTrackHits>> + pass:[/**] @property version If true, returns document version as part of a hit. */ + version?: boolean + pass:[/**] @property runtime_mappings Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. */ + runtime_mappings?: <<MappingRuntimeFields>> + pass:[/**] @property seq_no_primary_term If true, returns sequence number and primary term of the last modification of each hit. See Optimistic concurrency control. */ + seq_no_primary_term?: boolean + pass:[/**] @property pit Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an <index> in the request path. */ + pit?: <<SearchPointInTimeReference>> + suggest?: <<SearchSuggester>> +} +---- + + +[discrete] +[[MsearchMultisearchHeader]] +==== MsearchMultisearchHeader + +[source,ts,subs=+macros] +---- +interface MsearchMultisearchHeader { + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_unavailable?: boolean + index?: <<Indices>> + preference?: string + request_cache?: boolean + routing?: <<Routing>> + search_type?: <<SearchType>> + ccs_minimize_roundtrips?: boolean + allow_partial_search_results?: boolean + ignore_throttled?: boolean +} +---- + + +[discrete] +[[MsearchRequest]] +==== MsearchRequest + +[source,ts,subs=+macros] +---- +interface MsearchRequest extends <<RequestBase>> { + index?: <<Indices>> + allow_no_indices?: boolean + ccs_minimize_roundtrips?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_throttled?: boolean + ignore_unavailable?: boolean + include_named_queries_score?: boolean + max_concurrent_searches?: <<long>> + max_concurrent_shard_requests?: <<long>> + pre_filter_shard_size?: <<long>> + rest_total_hits_as_int?: boolean + routing?: <<Routing>> + search_type?: <<SearchType>> + typed_keys?: boolean + searches?: <<MsearchRequestItem>>[] +} +---- + + +[discrete] +[[MsearchRequestItem]] +==== MsearchRequestItem + +[source,ts,subs=+macros] +---- +type MsearchRequestItem = <<MsearchMultisearchHeader>> | <<MsearchMultisearchBody>> +---- + + +[discrete] +[[MsearchResponse]] +==== MsearchResponse + +[source,ts,subs=+macros] +---- +type MsearchResponse<TDocument = unknown, TAggregations = Record<<<AggregateName>>, <<AggregationsAggregate>>>> = <<MsearchMultiSearchResult>><TDocument, TAggregations> +---- + + +[discrete] +[[MsearchResponseItem]] +==== MsearchResponseItem + +[source,ts,subs=+macros] +---- +type MsearchResponseItem<TDocument = unknown> = <<MsearchMultiSearchItem>><TDocument> | <<ErrorResponseBase>> +---- + + diff --git a/docs/reference/shared-types/global-mtermvectors.asciidoc b/docs/reference/shared-types/global-mtermvectors.asciidoc new file mode 100644 index 000000000..5604d9556 --- /dev/null +++ b/docs/reference/shared-types/global-mtermvectors.asciidoc @@ -0,0 +1,129 @@ +[[reference-shared-types-global-mtermvectors]] + +=== `Mtermvectors` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[MtermvectorsOperation]] +==== MtermvectorsOperation + +[source,ts,subs=+macros] +---- +interface MtermvectorsOperation { + pass:[/**] @property _id The ID of the document. */ + _id?: <<Id>> + pass:[/**] @property _index The index of the document. */ + _index?: <<IndexName>> + pass:[/**] @property doc An artificial document (a document not present in the index) for which you want to retrieve term vectors. */ + doc?: any + pass:[/**] @property fields Comma-separated list or wildcard expressions of fields to include in the statistics. Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ + fields?: <<Fields>> + pass:[/**] @property field_statistics If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. */ + field_statistics?: boolean + pass:[/**] @property filter Filter terms based on their tf-idf scores. */ + filter?: <<TermvectorsFilter>> + pass:[/**] @property offsets If `true`, the response includes term offsets. */ + offsets?: boolean + pass:[/**] @property payloads If `true`, the response includes term payloads. */ + payloads?: boolean + pass:[/**] @property positions If `true`, the response includes term positions. */ + positions?: boolean + pass:[/**] @property routing Custom value used to route operations to a specific shard. */ + routing?: <<Routing>> + pass:[/**] @property term_statistics If true, the response includes term frequency and document frequency. */ + term_statistics?: boolean + pass:[/**] @property version If `true`, returns the document version as part of a hit. */ + version?: <<VersionNumber>> + pass:[/**] @property version_type Specific version type. */ + version_type?: <<VersionType>> +} +---- + + +[discrete] +[[MtermvectorsRequest]] +==== MtermvectorsRequest + +[source,ts,subs=+macros] +---- +interface MtermvectorsRequest extends <<RequestBase>> { + index?: <<IndexName>> + fields?: <<Fields>> + field_statistics?: boolean + offsets?: boolean + payloads?: boolean + positions?: boolean + preference?: string + realtime?: boolean + routing?: <<Routing>> + term_statistics?: boolean + version?: <<VersionNumber>> + version_type?: <<VersionType>> + docs?: <<MtermvectorsOperation>>[] + ids?: <<Id>>[] +} +---- + + +[discrete] +[[MtermvectorsResponse]] +==== MtermvectorsResponse + +[source,ts,subs=+macros] +---- +interface MtermvectorsResponse { + docs: <<MtermvectorsTermVectorsResult>>[] +} +---- + + +[discrete] +[[MtermvectorsTermVectorsResult]] +==== MtermvectorsTermVectorsResult + +[source,ts,subs=+macros] +---- +interface MtermvectorsTermVectorsResult { + _id?: <<Id>> + _index: <<IndexName>> + _version?: <<VersionNumber>> + took?: <<long>> + found?: boolean + term_vectors?: Record<<<Field>>, <<TermvectorsTermVector>>> + error?: <<ErrorCause>> +} +---- + + diff --git a/docs/reference/shared-types/global-open-point-in-time.asciidoc b/docs/reference/shared-types/global-open-point-in-time.asciidoc new file mode 100644 index 000000000..0e2b003a1 --- /dev/null +++ b/docs/reference/shared-types/global-open-point-in-time.asciidoc @@ -0,0 +1,69 @@ +[[reference-shared-types-global-open-point-in-time]] + +=== `OpenPointInTime` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[OpenPointInTimeRequest]] +==== OpenPointInTimeRequest + +[source,ts,subs=+macros] +---- +interface OpenPointInTimeRequest extends <<RequestBase>> { + index: <<Indices>> + keep_alive: <<Duration>> + ignore_unavailable?: boolean + preference?: string + routing?: <<Routing>> + expand_wildcards?: <<ExpandWildcards>> + allow_partial_search_results?: boolean + index_filter?: <<QueryDslQueryContainer>> +} +---- + + +[discrete] +[[OpenPointInTimeResponse]] +==== OpenPointInTimeResponse + +[source,ts,subs=+macros] +---- +interface OpenPointInTimeResponse { + _shards: <<ShardStatistics>> + id: <<Id>> +} +---- + + diff --git a/docs/reference/shared-types/global-ping.asciidoc b/docs/reference/shared-types/global-ping.asciidoc new file mode 100644 index 000000000..9cb5d5ee8 --- /dev/null +++ b/docs/reference/shared-types/global-ping.asciidoc @@ -0,0 +1,57 @@ +[[reference-shared-types-global-ping]] + +=== `Ping` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[PingRequest]] +==== PingRequest + +[source,ts,subs=+macros] +---- +interface PingRequest extends <<RequestBase>> {} +---- + + +[discrete] +[[PingResponse]] +==== PingResponse + +[source,ts,subs=+macros] +---- +type PingResponse = boolean +---- + + diff --git a/docs/reference/shared-types/global-put-script.asciidoc b/docs/reference/shared-types/global-put-script.asciidoc new file mode 100644 index 000000000..b48fb2fe9 --- /dev/null +++ b/docs/reference/shared-types/global-put-script.asciidoc @@ -0,0 +1,63 @@ +[[reference-shared-types-global-put-script]] + +=== `PutScript` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[PutScriptRequest]] +==== PutScriptRequest + +[source,ts,subs=+macros] +---- +interface PutScriptRequest extends <<RequestBase>> { + id: <<Id>> + context?: <<Name>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> + script: <<StoredScript>> +} +---- + + +[discrete] +[[PutScriptResponse]] +==== PutScriptResponse + +[source,ts,subs=+macros] +---- +type PutScriptResponse = <<AcknowledgedResponseBase>> +---- + + diff --git a/docs/reference/shared-types/global-rank-eval.asciidoc b/docs/reference/shared-types/global-rank-eval.asciidoc new file mode 100644 index 000000000..6b66855b0 --- /dev/null +++ b/docs/reference/shared-types/global-rank-eval.asciidoc @@ -0,0 +1,280 @@ +[[reference-shared-types-global-rank-eval]] + +=== `RankEval` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[RankEvalDocumentRating]] +==== RankEvalDocumentRating + +[source,ts,subs=+macros] +---- +interface RankEvalDocumentRating { + pass:[/**] @property _id The document ID. */ + _id: <<Id>> + pass:[/**] @property _index The document’s index. For data streams, this should be the document’s backing index. */ + _index: <<IndexName>> + pass:[/**] @property rating The document’s relevance with regard to this search request. */ + rating: <<integer>> +} +---- + + +[discrete] +[[RankEvalRankEvalHit]] +==== RankEvalRankEvalHit + +[source,ts,subs=+macros] +---- +interface RankEvalRankEvalHit { + _id: <<Id>> + _index: <<IndexName>> + _score: <<double>> +} +---- + + +[discrete] +[[RankEvalRankEvalHitItem]] +==== RankEvalRankEvalHitItem + +[source,ts,subs=+macros] +---- +interface RankEvalRankEvalHitItem { + hit: <<RankEvalRankEvalHit>> + rating?: <<double>> | null +} +---- + + +[discrete] +[[RankEvalRankEvalMetric]] +==== RankEvalRankEvalMetric + +[source,ts,subs=+macros] +---- +interface RankEvalRankEvalMetric { + precision?: <<RankEvalRankEvalMetricPrecision>> + recall?: <<RankEvalRankEvalMetricRecall>> + mean_reciprocal_rank?: <<RankEvalRankEvalMetricMeanReciprocalRank>> + dcg?: <<RankEvalRankEvalMetricDiscountedCumulativeGain>> + expected_reciprocal_rank?: <<RankEvalRankEvalMetricExpectedReciprocalRank>> +} +---- + + +[discrete] +[[RankEvalRankEvalMetricBase]] +==== RankEvalRankEvalMetricBase + +[source,ts,subs=+macros] +---- +interface RankEvalRankEvalMetricBase { + pass:[/**] @property k Sets the maximum number of documents retrieved per query. This value will act in place of the usual size parameter in the query. */ + k?: <<integer>> +} +---- + + +[discrete] +[[RankEvalRankEvalMetricDetail]] +==== RankEvalRankEvalMetricDetail + +[source,ts,subs=+macros] +---- +interface RankEvalRankEvalMetricDetail { + pass:[/**] @property metric_score The metric_score in the details section shows the contribution of this query to the global quality metric score */ + metric_score: <<double>> + pass:[/**] @property unrated_docs The unrated_docs section contains an _index and _id entry for each document in the search result for this query that didn’t have a ratings value. This can be used to ask the user to supply ratings for these documents */ + unrated_docs: <<RankEvalUnratedDocument>>[] + pass:[/**] @property hits The hits section shows a grouping of the search results with their supplied ratings */ + hits: <<RankEvalRankEvalHitItem>>[] + pass:[/**] @property metric_details The metric_details give additional information about the calculated quality metric (e.g. how many of the retrieved documents were relevant). The content varies for each metric but allows for better interpretation of the results */ + metric_details: Record<string, Record<string, any>> +} +---- + + +[discrete] +[[RankEvalRankEvalMetricDiscountedCumulativeGain]] +==== RankEvalRankEvalMetricDiscountedCumulativeGain + +[source,ts,subs=+macros] +---- +interface RankEvalRankEvalMetricDiscountedCumulativeGain extends <<RankEvalRankEvalMetricBase>> { + pass:[/**] @property normalize If set to true, this metric will calculate the Normalized DCG. */ + normalize?: boolean +} +---- + + +[discrete] +[[RankEvalRankEvalMetricExpectedReciprocalRank]] +==== RankEvalRankEvalMetricExpectedReciprocalRank + +[source,ts,subs=+macros] +---- +interface RankEvalRankEvalMetricExpectedReciprocalRank extends <<RankEvalRankEvalMetricBase>> { + pass:[/**] @property maximum_relevance The highest relevance grade used in the user-supplied relevance judgments. */ + maximum_relevance: <<integer>> +} +---- + + +[discrete] +[[RankEvalRankEvalMetricMeanReciprocalRank]] +==== RankEvalRankEvalMetricMeanReciprocalRank + +[source,ts,subs=+macros] +---- +interface RankEvalRankEvalMetricMeanReciprocalRank extends <<RankEvalRankEvalMetricRatingTreshold>> {} +---- + + +[discrete] +[[RankEvalRankEvalMetricPrecision]] +==== RankEvalRankEvalMetricPrecision + +[source,ts,subs=+macros] +---- +interface RankEvalRankEvalMetricPrecision extends <<RankEvalRankEvalMetricRatingTreshold>> { + pass:[/**] @property ignore_unlabeled Controls how unlabeled documents in the search results are counted. If set to true, unlabeled documents are ignored and neither count as relevant or irrelevant. Set to false (the default), they are treated as irrelevant. */ + ignore_unlabeled?: boolean +} +---- + + +[discrete] +[[RankEvalRankEvalMetricRatingTreshold]] +==== RankEvalRankEvalMetricRatingTreshold + +[source,ts,subs=+macros] +---- +interface RankEvalRankEvalMetricRatingTreshold extends <<RankEvalRankEvalMetricBase>> { + pass:[/**] @property relevant_rating_threshold Sets the rating threshold above which documents are considered to be "relevant". */ + relevant_rating_threshold?: <<integer>> +} +---- + + +[discrete] +[[RankEvalRankEvalMetricRecall]] +==== RankEvalRankEvalMetricRecall + +[source,ts,subs=+macros] +---- +interface RankEvalRankEvalMetricRecall extends <<RankEvalRankEvalMetricRatingTreshold>> {} +---- + + +[discrete] +[[RankEvalRankEvalQuery]] +==== RankEvalRankEvalQuery + +[source,ts,subs=+macros] +---- +interface RankEvalRankEvalQuery { + query: <<QueryDslQueryContainer>> + size?: <<integer>> +} +---- + + +[discrete] +[[RankEvalRankEvalRequestItem]] +==== RankEvalRankEvalRequestItem + +[source,ts,subs=+macros] +---- +interface RankEvalRankEvalRequestItem { + pass:[/**] @property id The search request’s ID, used to group result details later. */ + id: <<Id>> + pass:[/**] @property request The query being evaluated. */ + request?: <<RankEvalRankEvalQuery>> | <<QueryDslQueryContainer>> + pass:[/**] @property ratings List of document ratings */ + ratings: <<RankEvalDocumentRating>>[] + pass:[/**] @property template_id The search template <<Id>> */ + template_id?: <<Id>> + pass:[/**] @property params The search template parameters. */ + params?: Record<string, any> +} +---- + + +[discrete] +[[RankEvalRequest]] +==== RankEvalRequest + +[source,ts,subs=+macros] +---- +interface RankEvalRequest extends <<RequestBase>> { + index?: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_unavailable?: boolean + search_type?: string + requests: <<RankEvalRankEvalRequestItem>>[] + metric?: <<RankEvalRankEvalMetric>> +} +---- + + +[discrete] +[[RankEvalResponse]] +==== RankEvalResponse + +[source,ts,subs=+macros] +---- +interface RankEvalResponse { + metric_score: <<double>> + details: Record<<<Id>>, <<RankEvalRankEvalMetricDetail>>> + failures: Record<string, any> +} +---- + + +[discrete] +[[RankEvalUnratedDocument]] +==== RankEvalUnratedDocument + +[source,ts,subs=+macros] +---- +interface RankEvalUnratedDocument { + _id: <<Id>> + _index: <<IndexName>> +} +---- + + diff --git a/docs/reference/shared-types/global-reindex-rethrottle.asciidoc b/docs/reference/shared-types/global-reindex-rethrottle.asciidoc new file mode 100644 index 000000000..12c161893 --- /dev/null +++ b/docs/reference/shared-types/global-reindex-rethrottle.asciidoc @@ -0,0 +1,130 @@ +[[reference-shared-types-global-reindex-rethrottle]] + +=== `ReindexRethrottle` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[ReindexRethrottleReindexNode]] +==== ReindexRethrottleReindexNode + +[source,ts,subs=+macros] +---- +interface ReindexRethrottleReindexNode extends <<SpecUtilsBaseNode>> { + tasks: Record<<<TaskId>>, <<ReindexRethrottleReindexTask>>> +} +---- + + +[discrete] +[[ReindexRethrottleReindexStatus]] +==== ReindexRethrottleReindexStatus + +[source,ts,subs=+macros] +---- +interface ReindexRethrottleReindexStatus { + pass:[/**] @property batches The number of scroll responses pulled back by the reindex. */ + batches: <<long>> + pass:[/**] @property created The number of documents that were successfully created. */ + created: <<long>> + pass:[/**] @property deleted The number of documents that were successfully deleted. */ + deleted: <<long>> + pass:[/**] @property noops The number of documents that were ignored because the script used for the reindex returned a `noop` value for `ctx.op`. */ + noops: <<long>> + pass:[/**] @property requests_per_second The number of requests per second effectively executed during the reindex. */ + requests_per_second: <<float>> + pass:[/**] @property retries The number of retries attempted by reindex. `bulk` is the number of bulk actions retried and `search` is the number of search actions retried. */ + retries: <<Retries>> + throttled?: <<Duration>> + pass:[/**] @property throttled_millis Number of milliseconds the request slept to conform to `requests_per_second`. */ + throttled_millis: <<DurationValue>><<<UnitMillis>>> + throttled_until?: <<Duration>> + pass:[/**] @property throttled_until_millis This field should always be equal to zero in a `_reindex` response. It only has meaning when using the Task API, where it indicates the next time (in milliseconds since epoch) a throttled request will be executed again in order to conform to `requests_per_second`. */ + throttled_until_millis: <<DurationValue>><<<UnitMillis>>> + pass:[/**] @property total The number of documents that were successfully processed. */ + total: <<long>> + pass:[/**] @property updated The number of documents that were successfully updated, for example, a document with same ID already existed prior to reindex updating it. */ + updated: <<long>> + pass:[/**] @property version_conflicts The number of version conflicts that reindex hits. */ + version_conflicts: <<long>> +} +---- + + +[discrete] +[[ReindexRethrottleReindexTask]] +==== ReindexRethrottleReindexTask + +[source,ts,subs=+macros] +---- +interface ReindexRethrottleReindexTask { + action: string + cancellable: boolean + description: string + id: <<long>> + node: <<Name>> + running_time_in_nanos: <<DurationValue>><<<UnitNanos>>> + start_time_in_millis: <<EpochTime>><<<UnitMillis>>> + status: <<ReindexRethrottleReindexStatus>> + type: string + headers: <<HttpHeaders>> +} +---- + + +[discrete] +[[ReindexRethrottleRequest]] +==== ReindexRethrottleRequest + +[source,ts,subs=+macros] +---- +interface ReindexRethrottleRequest extends <<RequestBase>> { + task_id: <<Id>> + requests_per_second?: <<float>> +} +---- + + +[discrete] +[[ReindexRethrottleResponse]] +==== ReindexRethrottleResponse + +[source,ts,subs=+macros] +---- +interface ReindexRethrottleResponse { + nodes: Record<string, <<ReindexRethrottleReindexNode>>> +} +---- + + diff --git a/docs/reference/shared-types/global-reindex.asciidoc b/docs/reference/shared-types/global-reindex.asciidoc new file mode 100644 index 000000000..77b20adba --- /dev/null +++ b/docs/reference/shared-types/global-reindex.asciidoc @@ -0,0 +1,158 @@ +[[reference-shared-types-global-reindex]] + +=== `Reindex` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[ReindexDestination]] +==== ReindexDestination + +[source,ts,subs=+macros] +---- +interface ReindexDestination { + pass:[/**] @property index The name of the data stream, index, or index alias you are copying to. */ + index: <<IndexName>> + pass:[/**] @property op_type Set to `create` to only index documents that do not already exist. Important: To reindex to a data stream destination, this argument must be `create`. */ + op_type?: <<OpType>> + pass:[/**] @property pipeline The name of the pipeline to use. */ + pipeline?: string + pass:[/**] @property routing By default, a document's routing is preserved unless it’s changed by the script. Set to `discard` to set routing to `null`, or `=value` to route using the specified `value`. */ + routing?: <<Routing>> + pass:[/**] @property version_type The versioning to use for the indexing operation. */ + version_type?: <<VersionType>> +} +---- + + +[discrete] +[[ReindexRemoteSource]] +==== ReindexRemoteSource + +[source,ts,subs=+macros] +---- +interface ReindexRemoteSource { + pass:[/**] @property connect_timeout The remote connection timeout. Defaults to 30 seconds. */ + connect_timeout?: <<Duration>> + pass:[/**] @property headers An object containing the headers of the request. */ + headers?: Record<string, string> + pass:[/**] @property host The URL for the remote instance of Elasticsearch that you want to index from. */ + host: <<Host>> + pass:[/**] @property username The username to use for authentication with the remote host. */ + username?: <<Username>> + pass:[/**] @property password The password to use for authentication with the remote host. */ + password?: <<Password>> + pass:[/**] @property socket_timeout The remote socket read timeout. Defaults to 30 seconds. */ + socket_timeout?: <<Duration>> +} +---- + + +[discrete] +[[ReindexRequest]] +==== ReindexRequest + +[source,ts,subs=+macros] +---- +interface ReindexRequest extends <<RequestBase>> { + refresh?: boolean + requests_per_second?: <<float>> + scroll?: <<Duration>> + slices?: <<Slices>> + timeout?: <<Duration>> + wait_for_active_shards?: <<WaitForActiveShards>> + wait_for_completion?: boolean + require_alias?: boolean + conflicts?: <<Conflicts>> + dest: <<ReindexDestination>> + max_docs?: <<long>> + script?: <<Script>> | string + size?: <<long>> + source: <<ReindexSource>> +} +---- + + +[discrete] +[[ReindexResponse]] +==== ReindexResponse + +[source,ts,subs=+macros] +---- +interface ReindexResponse { + batches?: <<long>> + created?: <<long>> + deleted?: <<long>> + failures?: <<BulkIndexByScrollFailure>>[] + noops?: <<long>> + retries?: <<Retries>> + requests_per_second?: <<float>> + slice_id?: <<integer>> + task?: <<TaskId>> + throttled_millis?: <<EpochTime>><<<UnitMillis>>> + throttled_until_millis?: <<EpochTime>><<<UnitMillis>>> + timed_out?: boolean + took?: <<DurationValue>><<<UnitMillis>>> + total?: <<long>> + updated?: <<long>> + version_conflicts?: <<long>> +} +---- + + +[discrete] +[[ReindexSource]] +==== ReindexSource + +[source,ts,subs=+macros] +---- +interface ReindexSource { + pass:[/**] @property index The name of the data stream, index, or alias you are copying from. Accepts a comma-separated list to reindex from multiple sources. */ + index: <<Indices>> + pass:[/**] @property query Specifies the documents to reindex using the Query DSL. */ + query?: <<QueryDslQueryContainer>> + pass:[/**] @property remote A remote instance of Elasticsearch that you want to index from. */ + remote?: <<ReindexRemoteSource>> + pass:[/**] @property size The number of documents to index per batch. Use when indexing from remote to ensure that the batches fit within the on-heap buffer, which defaults to a maximum size of 100 MB. */ + size?: <<integer>> + pass:[/**] @property slice Slice the reindex request manually using the provided slice ID and total number of slices. */ + slice?: <<SlicedScroll>> + sort?: <<Sort>> + pass:[/**] @property _source If `true` reindexes all source fields. Set to a list to reindex select fields. */ + _source?: <<Fields>> + runtime_mappings?: <<MappingRuntimeFields>> +} +---- + + diff --git a/docs/reference/shared-types/global-render-search-template.asciidoc b/docs/reference/shared-types/global-render-search-template.asciidoc new file mode 100644 index 000000000..a6f75885e --- /dev/null +++ b/docs/reference/shared-types/global-render-search-template.asciidoc @@ -0,0 +1,64 @@ +[[reference-shared-types-global-render-search-template]] + +=== `RenderSearchTemplate` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[RenderSearchTemplateRequest]] +==== RenderSearchTemplateRequest + +[source,ts,subs=+macros] +---- +interface RenderSearchTemplateRequest extends <<RequestBase>> { + id?: <<Id>> + file?: string + params?: Record<string, any> + source?: string +} +---- + + +[discrete] +[[RenderSearchTemplateResponse]] +==== RenderSearchTemplateResponse + +[source,ts,subs=+macros] +---- +interface RenderSearchTemplateResponse { + template_output: Record<string, any> +} +---- + + diff --git a/docs/reference/shared-types/global-scripts-painless-execute.asciidoc b/docs/reference/shared-types/global-scripts-painless-execute.asciidoc new file mode 100644 index 000000000..f14564271 --- /dev/null +++ b/docs/reference/shared-types/global-scripts-painless-execute.asciidoc @@ -0,0 +1,80 @@ +[[reference-shared-types-global-scripts-painless-execute]] + +=== `ScriptsPainlessExecute` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[ScriptsPainlessExecutePainlessContextSetup]] +==== ScriptsPainlessExecutePainlessContextSetup + +[source,ts,subs=+macros] +---- +interface ScriptsPainlessExecutePainlessContextSetup { + pass:[/**] @property document Document that’s temporarily indexed in-memory and accessible from the script. */ + document: any + pass:[/**] @property index Index containing a mapping that’s compatible with the indexed document. You may specify a remote index by prefixing the index with the remote cluster alias. */ + index: <<IndexName>> + pass:[/**] @property query Use this parameter to specify a query for computing a score. */ + query?: <<QueryDslQueryContainer>> +} +---- + + +[discrete] +[[ScriptsPainlessExecuteRequest]] +==== ScriptsPainlessExecuteRequest + +[source,ts,subs=+macros] +---- +interface ScriptsPainlessExecuteRequest extends <<RequestBase>> { + context?: string + context_setup?: <<ScriptsPainlessExecutePainlessContextSetup>> + script?: <<Script>> | string +} +---- + + +[discrete] +[[ScriptsPainlessExecuteResponse]] +==== ScriptsPainlessExecuteResponse + +[source,ts,subs=+macros] +---- +interface ScriptsPainlessExecuteResponse<TResult = unknown> { + result: TResult +} +---- + + diff --git a/docs/reference/shared-types/global-scroll.asciidoc b/docs/reference/shared-types/global-scroll.asciidoc new file mode 100644 index 000000000..8ed6749fc --- /dev/null +++ b/docs/reference/shared-types/global-scroll.asciidoc @@ -0,0 +1,61 @@ +[[reference-shared-types-global-scroll]] + +=== `Scroll` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[ScrollRequest]] +==== ScrollRequest + +[source,ts,subs=+macros] +---- +interface ScrollRequest extends <<RequestBase>> { + scroll_id?: <<ScrollId>> + rest_total_hits_as_int?: boolean + scroll?: <<Duration>> +} +---- + + +[discrete] +[[ScrollResponse]] +==== ScrollResponse + +[source,ts,subs=+macros] +---- +type ScrollResponse<TDocument = unknown, TAggregations = Record<<<AggregateName>>, <<AggregationsAggregate>>>> = <<SearchResponseBody>><TDocument, TAggregations> +---- + + diff --git a/docs/reference/shared-types/global-search-mvt-types.asciidoc b/docs/reference/shared-types/global-search-mvt-types.asciidoc new file mode 100644 index 000000000..df99f455c --- /dev/null +++ b/docs/reference/shared-types/global-search-mvt-types.asciidoc @@ -0,0 +1,77 @@ +[[reference-shared-types-global-search-mvt-types]] + +=== `SearchMvt` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[SearchMvtCoordinate]] +=== SearchMvtCoordinate + +[source,ts,subs=+macros] +---- +type SearchMvtCoordinate = <<integer>> +---- + + +[discrete] +[[SearchMvtGridAggregationType]] +=== SearchMvtGridAggregationType + +[source,ts,subs=+macros] +---- +type SearchMvtGridAggregationType = 'geotile' | 'geohex' +---- + + +[discrete] +[[SearchMvtGridType]] +=== SearchMvtGridType + +[source,ts,subs=+macros] +---- +type SearchMvtGridType = 'grid' | 'point' | 'centroid' +---- + + +[discrete] +[[SearchMvtZoomLevel]] +=== SearchMvtZoomLevel + +[source,ts,subs=+macros] +---- +type SearchMvtZoomLevel = <<integer>> +---- + + diff --git a/docs/reference/shared-types/global-search-mvt.asciidoc b/docs/reference/shared-types/global-search-mvt.asciidoc new file mode 100644 index 000000000..68a57c73f --- /dev/null +++ b/docs/reference/shared-types/global-search-mvt.asciidoc @@ -0,0 +1,77 @@ +[[reference-shared-types-global-search-mvt]] + +=== `SearchMvt` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[SearchMvtRequest]] +==== SearchMvtRequest + +[source,ts,subs=+macros] +---- +interface SearchMvtRequest extends <<RequestBase>> { + index: <<Indices>> + field: <<Field>> + zoom: <<SearchMvtZoomLevel>> + x: <<SearchMvtCoordinate>> + y: <<SearchMvtCoordinate>> + aggs?: Record<string, <<AggregationsAggregationContainer>>> + buffer?: <<integer>> + exact_bounds?: boolean + extent?: <<integer>> + fields?: <<Fields>> + grid_agg?: <<SearchMvtGridAggregationType>> + grid_precision?: <<integer>> + grid_type?: <<SearchMvtGridType>> + query?: <<QueryDslQueryContainer>> + runtime_mappings?: <<MappingRuntimeFields>> + size?: <<integer>> + sort?: <<Sort>> + track_total_hits?: <<SearchTrackHits>> + with_labels?: boolean +} +---- + + +[discrete] +[[SearchMvtResponse]] +==== SearchMvtResponse + +[source,ts,subs=+macros] +---- +type SearchMvtResponse = <<MapboxVectorTiles>> +---- + + diff --git a/docs/reference/shared-types/global-search-shards.asciidoc b/docs/reference/shared-types/global-search-shards.asciidoc new file mode 100644 index 000000000..4ac2a71aa --- /dev/null +++ b/docs/reference/shared-types/global-search-shards.asciidoc @@ -0,0 +1,106 @@ +[[reference-shared-types-global-search-shards]] + +=== `SearchShards` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[SearchShardsRequest]] +==== SearchShardsRequest + +[source,ts,subs=+macros] +---- +interface SearchShardsRequest extends <<RequestBase>> { + index?: <<Indices>> + allow_no_indices?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_unavailable?: boolean + local?: boolean + preference?: string + routing?: <<Routing>> +} +---- + + +[discrete] +[[SearchShardsResponse]] +==== SearchShardsResponse + +[source,ts,subs=+macros] +---- +interface SearchShardsResponse { + nodes: Record<<<NodeId>>, <<SearchShardsSearchShardsNodeAttributes>>> + shards: <<NodeShard>>[][] + indices: Record<<<IndexName>>, <<SearchShardsShardStoreIndex>>> +} +---- + + +[discrete] +[[SearchShardsSearchShardsNodeAttributes]] +==== SearchShardsSearchShardsNodeAttributes + +[source,ts,subs=+macros] +---- +interface SearchShardsSearchShardsNodeAttributes { + pass:[/**] @property name The human-readable identifier of the node. */ + name: <<NodeName>> + pass:[/**] @property ephemeral_id The ephemeral ID of the node. */ + ephemeral_id: <<Id>> + pass:[/**] @property transport_address The host and port where transport HTTP connections are accepted. */ + transport_address: <<TransportAddress>> + external_id: string + pass:[/**] @property attributes Lists node attributes. */ + attributes: Record<string, string> + roles: <<NodeRoles>> + version: <<VersionString>> + min_index_version: <<integer>> + max_index_version: <<integer>> +} +---- + + +[discrete] +[[SearchShardsShardStoreIndex]] +==== SearchShardsShardStoreIndex + +[source,ts,subs=+macros] +---- +interface SearchShardsShardStoreIndex { + aliases?: <<Name>>[] + filter?: <<QueryDslQueryContainer>> +} +---- + + diff --git a/docs/reference/shared-types/global-search-template.asciidoc b/docs/reference/shared-types/global-search-template.asciidoc new file mode 100644 index 000000000..b93d48489 --- /dev/null +++ b/docs/reference/shared-types/global-search-template.asciidoc @@ -0,0 +1,90 @@ +[[reference-shared-types-global-search-template]] + +=== `SearchTemplate` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[SearchTemplateRequest]] +==== SearchTemplateRequest + +[source,ts,subs=+macros] +---- +interface SearchTemplateRequest extends <<RequestBase>> { + index?: <<Indices>> + allow_no_indices?: boolean + ccs_minimize_roundtrips?: boolean + expand_wildcards?: <<ExpandWildcards>> + ignore_throttled?: boolean + ignore_unavailable?: boolean + preference?: string + routing?: <<Routing>> + scroll?: <<Duration>> + search_type?: <<SearchType>> + rest_total_hits_as_int?: boolean + typed_keys?: boolean + explain?: boolean + id?: <<Id>> + params?: Record<string, any> + profile?: boolean + source?: string +} +---- + + +[discrete] +[[SearchTemplateResponse]] +==== SearchTemplateResponse + +[source,ts,subs=+macros] +---- +interface SearchTemplateResponse<TDocument = unknown> { + took: <<long>> + timed_out: boolean + _shards: <<ShardStatistics>> + hits: <<SearchHitsMetadata>><TDocument> + aggregations?: Record<<<AggregateName>>, <<AggregationsAggregate>>> + _clusters?: <<ClusterStatistics>> + fields?: Record<string, any> + max_score?: <<double>> + num_reduce_phases?: <<long>> + profile?: <<SearchProfile>> + pit_id?: <<Id>> + _scroll_id?: <<ScrollId>> + suggest?: Record<<<SuggestionName>>, <<SearchSuggest>><TDocument>[]> + terminated_early?: boolean +} +---- + + diff --git a/docs/reference/shared-types/global-search-types.asciidoc b/docs/reference/shared-types/global-search-types.asciidoc new file mode 100644 index 000000000..742fc1699 --- /dev/null +++ b/docs/reference/shared-types/global-search-types.asciidoc @@ -0,0 +1,1294 @@ +[[reference-shared-types-global-search-types]] + +=== `Search` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[SearchAggregationBreakdown]] +=== SearchAggregationBreakdown + +[source,ts,subs=+macros] +---- +interface SearchAggregationBreakdown { + build_aggregation: <<long>> + build_aggregation_count: <<long>> + build_leaf_collector: <<long>> + build_leaf_collector_count: <<long>> + collect: <<long>> + collect_count: <<long>> + initialize: <<long>> + initialize_count: <<long>> + post_collection?: <<long>> + post_collection_count?: <<long>> + reduce: <<long>> + reduce_count: <<long>> +} +---- + + +[discrete] +[[SearchAggregationProfile]] +=== SearchAggregationProfile + +[source,ts,subs=+macros] +---- +interface SearchAggregationProfile { + breakdown: <<SearchAggregationBreakdown>> + description: string + time_in_nanos: <<DurationValue>><<<UnitNanos>>> + type: string + debug?: <<SearchAggregationProfileDebug>> + children?: <<SearchAggregationProfile>>[] +} +---- + + +[discrete] +[[SearchAggregationProfileDebug]] +=== SearchAggregationProfileDebug + +[source,ts,subs=+macros] +---- +interface SearchAggregationProfileDebug { + segments_with_multi_valued_ords?: <<integer>> + collection_strategy?: string + segments_with_single_valued_ords?: <<integer>> + total_buckets?: <<integer>> + built_buckets?: <<integer>> + result_strategy?: string + has_filter?: boolean + delegate?: string + delegate_debug?: <<SearchAggregationProfileDebug>> + chars_fetched?: <<integer>> + extract_count?: <<integer>> + extract_ns?: <<integer>> + values_fetched?: <<integer>> + collect_analyzed_ns?: <<integer>> + collect_analyzed_count?: <<integer>> + surviving_buckets?: <<integer>> + ordinals_collectors_used?: <<integer>> + ordinals_collectors_overhead_too_high?: <<integer>> + string_hashing_collectors_used?: <<integer>> + numeric_collectors_used?: <<integer>> + empty_collectors_used?: <<integer>> + deferred_aggregators?: string[] + segments_with_doc_count_field?: <<integer>> + segments_with_deleted_docs?: <<integer>> + filters?: <<SearchAggregationProfileDelegateDebugFilter>>[] + segments_counted?: <<integer>> + segments_collected?: <<integer>> + map_reducer?: string + brute_force_used?: <<integer>> + dynamic_pruning_attempted?: <<integer>> + dynamic_pruning_used?: <<integer>> + skipped_due_to_no_data?: <<integer>> +} +---- + + +[discrete] +[[SearchAggregationProfileDelegateDebugFilter]] +=== SearchAggregationProfileDelegateDebugFilter + +[source,ts,subs=+macros] +---- +interface SearchAggregationProfileDelegateDebugFilter { + results_from_metadata?: <<integer>> + query?: string + specialized_for?: string + segments_counted_in_constant_time?: <<integer>> +} +---- + + +[discrete] +[[SearchBoundaryScanner]] +=== SearchBoundaryScanner + +[source,ts,subs=+macros] +---- +type SearchBoundaryScanner = 'chars' | 'sentence' | 'word' +---- + + +[discrete] +[[SearchCollector]] +=== SearchCollector + +[source,ts,subs=+macros] +---- +interface SearchCollector { + name: string + reason: string + time_in_nanos: <<DurationValue>><<<UnitNanos>>> + children?: <<SearchCollector>>[] +} +---- + + +[discrete] +[[SearchCompletionContext]] +=== SearchCompletionContext + +[source,ts,subs=+macros] +---- +interface SearchCompletionContext { + pass:[/**] @property boost The factor by which the score of the suggestion should be boosted. The score is computed by multiplying the boost with the suggestion weight. */ + boost?: <<double>> + pass:[/**] @property context The value of the category to filter/boost on. */ + context: <<SearchContext>> + pass:[/**] @property neighbours An array of precision values at which neighboring geohashes should be taken into account. Precision value can be a distance value (`5m`, `10km`, etc.) or a raw geohash precision (`1`..`12`). Defaults to generating neighbors for index time precision level. */ + neighbours?: <<GeoHashPrecision>>[] + pass:[/**] @property precision The precision of the geohash to encode the query geo point. Can be specified as a distance value (`5m`, `10km`, etc.), or as a raw geohash precision (`1`..`12`). Defaults to index time precision level. */ + precision?: <<GeoHashPrecision>> + pass:[/**] @property prefix Whether the category value should be treated as a prefix or not. */ + prefix?: boolean +} +---- + + +[discrete] +[[SearchCompletionSuggest]] +=== SearchCompletionSuggest + +[source,ts,subs=+macros] +---- +interface SearchCompletionSuggest<TDocument = unknown> extends <<SearchSuggestBase>> { + options: <<SearchCompletionSuggestOption>><TDocument> | <<SearchCompletionSuggestOption>><TDocument>[] +} +---- + + +[discrete] +[[SearchCompletionSuggestOption]] +=== SearchCompletionSuggestOption + +[source,ts,subs=+macros] +---- +interface SearchCompletionSuggestOption<TDocument = unknown> { + collate_match?: boolean + contexts?: Record<string, <<SearchContext>>[]> + fields?: Record<string, any> + _id?: string + _index?: <<IndexName>> + _routing?: <<Routing>> + _score?: <<double>> + _source?: TDocument + text: string + score?: <<double>> +} +---- + + +[discrete] +[[SearchCompletionSuggester]] +=== SearchCompletionSuggester + +[source,ts,subs=+macros] +---- +interface SearchCompletionSuggester extends <<SearchSuggesterBase>> { + pass:[/**] @property contexts A value, geo point object, or a geo hash string to filter or boost the suggestion on. */ + contexts?: Record<<<Field>>, <<SearchCompletionContext>> | <<SearchContext>> | (<<SearchCompletionContext>> | <<SearchContext>>)[]> + pass:[/**] @property fuzzy Enables fuzziness, meaning you can have a typo in your search and still get results back. */ + fuzzy?: <<SearchSuggestFuzziness>> + pass:[/**] @property regex A regex query that expresses a prefix as a regular expression. */ + regex?: <<SearchRegexOptions>> + pass:[/**] @property skip_duplicates Whether duplicate suggestions should be filtered out. */ + skip_duplicates?: boolean +} +---- + + +[discrete] +[[SearchContext]] +=== SearchContext + +[source,ts,subs=+macros] +---- +type SearchContext = string | <<GeoLocation>> +---- + + +[discrete] +[[SearchDfsKnnProfile]] +=== SearchDfsKnnProfile + +[source,ts,subs=+macros] +---- +interface SearchDfsKnnProfile { + vector_operations_count?: <<long>> + query: <<SearchKnnQueryProfileResult>>[] + rewrite_time: <<long>> + collector: <<SearchKnnCollectorResult>>[] +} +---- + + +[discrete] +[[SearchDfsProfile]] +=== SearchDfsProfile + +[source,ts,subs=+macros] +---- +interface SearchDfsProfile { + statistics?: <<SearchDfsStatisticsProfile>> + knn?: <<SearchDfsKnnProfile>>[] +} +---- + + +[discrete] +[[SearchDfsStatisticsBreakdown]] +=== SearchDfsStatisticsBreakdown + +[source,ts,subs=+macros] +---- +interface SearchDfsStatisticsBreakdown { + collection_statistics: <<long>> + collection_statistics_count: <<long>> + create_weight: <<long>> + create_weight_count: <<long>> + rewrite: <<long>> + rewrite_count: <<long>> + term_statistics: <<long>> + term_statistics_count: <<long>> +} +---- + + +[discrete] +[[SearchDfsStatisticsProfile]] +=== SearchDfsStatisticsProfile + +[source,ts,subs=+macros] +---- +interface SearchDfsStatisticsProfile { + type: string + description: string + time?: <<Duration>> + time_in_nanos: <<DurationValue>><<<UnitNanos>>> + breakdown: <<SearchDfsStatisticsBreakdown>> + debug?: Record<string, any> + children?: <<SearchDfsStatisticsProfile>>[] +} +---- + + +[discrete] +[[SearchDirectGenerator]] +=== SearchDirectGenerator + +[source,ts,subs=+macros] +---- +interface SearchDirectGenerator { + pass:[/**] @property field The field to fetch the candidate suggestions from. Needs to be set globally or per suggestion. */ + field: <<Field>> + pass:[/**] @property max_edits The maximum edit distance candidate suggestions can have in order to be considered as a suggestion. Can only be `1` or `2`. */ + max_edits?: <<integer>> + pass:[/**] @property max_inspections A factor that is used to multiply with the shard_size in order to inspect more candidate spelling corrections on the shard level. Can improve accuracy at the cost of performance. */ + max_inspections?: <<float>> + pass:[/**] @property max_term_freq The maximum threshold in number of documents in which a suggest text token can exist in order to be included. This can be used to exclude high frequency terms — which are usually spelled correctly — from being spellchecked. Can be a relative percentage number (for example `0.4`) or an absolute number to represent document frequencies. If a value higher than 1 is specified, then fractional can not be specified. */ + max_term_freq?: <<float>> + pass:[/**] @property min_doc_freq The minimal threshold in number of documents a suggestion should appear in. This can improve quality by only suggesting high frequency terms. Can be specified as an absolute number or as a relative percentage of number of documents. If a value higher than 1 is specified, the number cannot be fractional. */ + min_doc_freq?: <<float>> + pass:[/**] @property min_word_length The minimum length a suggest text term must have in order to be included. */ + min_word_length?: <<integer>> + pass:[/**] @property post_filter A filter (analyzer) that is applied to each of the generated tokens before they are passed to the actual phrase scorer. */ + post_filter?: string + pass:[/**] @property pre_filter A filter (analyzer) that is applied to each of the tokens passed to this candidate generator. This filter is applied to the original token before candidates are generated. */ + pre_filter?: string + pass:[/**] @property prefix_length The number of minimal prefix characters that must match in order be a candidate suggestions. Increasing this number improves spellcheck performance. */ + prefix_length?: <<integer>> + pass:[/**] @property size The maximum corrections to be returned per suggest text token. */ + size?: <<integer>> + pass:[/**] @property suggest_mode Controls what suggestions are included on the suggestions generated on each shard. */ + suggest_mode?: <<SuggestMode>> +} +---- + + +[discrete] +[[SearchFetchProfile]] +=== SearchFetchProfile + +[source,ts,subs=+macros] +---- +interface SearchFetchProfile { + type: string + description: string + time_in_nanos: <<DurationValue>><<<UnitNanos>>> + breakdown: <<SearchFetchProfileBreakdown>> + debug?: <<SearchFetchProfileDebug>> + children?: <<SearchFetchProfile>>[] +} +---- + + +[discrete] +[[SearchFetchProfileBreakdown]] +=== SearchFetchProfileBreakdown + +[source,ts,subs=+macros] +---- +interface SearchFetchProfileBreakdown { + load_source?: <<integer>> + load_source_count?: <<integer>> + load_stored_fields?: <<integer>> + load_stored_fields_count?: <<integer>> + next_reader?: <<integer>> + next_reader_count?: <<integer>> + process_count?: <<integer>> + process?: <<integer>> +} +---- + + +[discrete] +[[SearchFetchProfileDebug]] +=== SearchFetchProfileDebug + +[source,ts,subs=+macros] +---- +interface SearchFetchProfileDebug { + stored_fields?: string[] + fast_path?: <<integer>> +} +---- + + +[discrete] +[[SearchFieldCollapse]] +=== SearchFieldCollapse + +[source,ts,subs=+macros] +---- +interface SearchFieldCollapse { + pass:[/**] @property field The field to collapse the result set on */ + field: <<Field>> + pass:[/**] @property inner_hits The number of inner hits and their sort order */ + inner_hits?: <<SearchInnerHits>> | <<SearchInnerHits>>[] + pass:[/**] @property max_concurrent_group_searches The number of concurrent requests allowed to retrieve the inner_hits per group */ + max_concurrent_group_searches?: <<integer>> + collapse?: <<SearchFieldCollapse>> +} +---- + + +[discrete] +[[SearchFieldSuggester]] +=== SearchFieldSuggester + +[source,ts,subs=+macros] +---- +interface SearchFieldSuggester { + pass:[/**] @property completion Provides auto-complete/search-as-you-type functionality. */ + completion?: <<SearchCompletionSuggester>> + pass:[/**] @property phrase Provides access to word alternatives on a per token basis within a certain string distance. */ + phrase?: <<SearchPhraseSuggester>> + pass:[/**] @property term Suggests terms based on edit distance. */ + term?: <<SearchTermSuggester>> + pass:[/**] @property prefix Prefix used to search for suggestions. */ + prefix?: string + pass:[/**] @property regex A prefix expressed as a regular expression. */ + regex?: string + pass:[/**] @property text The text to use as input for the suggester. Needs to be set globally or per suggestion. */ + text?: string +} +---- + + +[discrete] +[[SearchHighlight]] +=== SearchHighlight + +[source,ts,subs=+macros] +---- +interface SearchHighlight extends <<SearchHighlightBase>> { + encoder?: <<SearchHighlighterEncoder>> + fields: Record<<<Field>>, <<SearchHighlightField>>> +} +---- + + +[discrete] +[[SearchHighlightBase]] +=== SearchHighlightBase + +[source,ts,subs=+macros] +---- +interface SearchHighlightBase { + type?: <<SearchHighlighterType>> + pass:[/**] @property boundary_chars A string that contains each boundary character. */ + boundary_chars?: string + pass:[/**] @property boundary_max_scan How far to scan for boundary characters. */ + boundary_max_scan?: <<integer>> + pass:[/**] @property boundary_scanner Specifies how to break the highlighted fragments: chars, sentence, or word. Only valid for the unified and fvh highlighters. Defaults to `sentence` for the `unified` highlighter. Defaults to `chars` for the `fvh` highlighter. */ + boundary_scanner?: <<SearchBoundaryScanner>> + pass:[/**] @property boundary_scanner_locale Controls which locale is used to search for sentence and word boundaries. This parameter takes a form of a language tag, for example: `"en-US"`, `"fr-FR"`, `"ja-JP"`. */ + boundary_scanner_locale?: string + force_source?: boolean + pass:[/**] @property fragmenter Specifies how text should be broken up in highlight snippets: `simple` or `span`. Only valid for the `plain` highlighter. */ + fragmenter?: <<SearchHighlighterFragmenter>> + pass:[/**] @property fragment_size The size of the highlighted fragment in characters. */ + fragment_size?: <<integer>> + highlight_filter?: boolean + pass:[/**] @property highlight_query Highlight matches for a query other than the search query. This is especially useful if you use a rescore query because those are not taken into account by highlighting by default. */ + highlight_query?: <<QueryDslQueryContainer>> + max_fragment_length?: <<integer>> + pass:[/**] @property max_analyzed_offset If set to a non-negative value, highlighting stops at this defined maximum limit. The rest of the text is not processed, thus not highlighted and no error is returned The `max_analyzed_offset` query setting does not override the `index.highlight.max_analyzed_offset` setting, which prevails when it’s set to lower value than the query setting. */ + max_analyzed_offset?: <<integer>> + pass:[/**] @property no_match_size The amount of text you want to return from the beginning of the field if there are no matching fragments to highlight. */ + no_match_size?: <<integer>> + pass:[/**] @property number_of_fragments The maximum number of fragments to return. If the number of fragments is set to `0`, no fragments are returned. Instead, the entire field contents are highlighted and returned. This can be handy when you need to highlight <<short>> texts such as a title or address, but fragmentation is not required. If `number_of_fragments` is `0`, `fragment_size` is ignored. */ + number_of_fragments?: <<integer>> + options?: Record<string, any> + pass:[/**] @property order Sorts highlighted fragments by score when set to `score`. By default, fragments will be output in the order they appear in the field (order: `none`). Setting this option to `score` will output the most relevant fragments first. Each highlighter applies its own logic to compute relevancy scores. */ + order?: <<SearchHighlighterOrder>> + pass:[/**] @property phrase_limit Controls the number of matching phrases in a document that are considered. Prevents the `fvh` highlighter from analyzing too many phrases and consuming too much memory. When using `matched_fields`, `phrase_limit` phrases per matched field are considered. Raising the limit increases query time and consumes more memory. Only supported by the `fvh` highlighter. */ + phrase_limit?: <<integer>> + pass:[/**] @property post_tags Use in conjunction with `pre_tags` to define the HTML tags to use for the highlighted text. By default, highlighted text is wrapped in `<em>` and `</em>` tags. */ + post_tags?: string[] + pass:[/**] @property pre_tags Use in conjunction with `post_tags` to define the HTML tags to use for the highlighted text. By default, highlighted text is wrapped in `<em>` and `</em>` tags. */ + pre_tags?: string[] + pass:[/**] @property require_field_match By default, only fields that contains a query match are highlighted. Set to `false` to highlight all fields. */ + require_field_match?: boolean + pass:[/**] @property tags_schema Set to `styled` to use the built-in tag schema. */ + tags_schema?: <<SearchHighlighterTagsSchema>> +} +---- + + +[discrete] +[[SearchHighlightField]] +=== SearchHighlightField + +[source,ts,subs=+macros] +---- +interface SearchHighlightField extends <<SearchHighlightBase>> { + fragment_offset?: <<integer>> + matched_fields?: <<Fields>> +} +---- + + +[discrete] +[[SearchHighlighterEncoder]] +=== SearchHighlighterEncoder + +[source,ts,subs=+macros] +---- +type SearchHighlighterEncoder = 'default' | 'html' +---- + + +[discrete] +[[SearchHighlighterFragmenter]] +=== SearchHighlighterFragmenter + +[source,ts,subs=+macros] +---- +type SearchHighlighterFragmenter = 'simple' | 'span' +---- + + +[discrete] +[[SearchHighlighterOrder]] +=== SearchHighlighterOrder + +[source,ts,subs=+macros] +---- +type SearchHighlighterOrder = 'score' +---- + + +[discrete] +[[SearchHighlighterTagsSchema]] +=== SearchHighlighterTagsSchema + +[source,ts,subs=+macros] +---- +type SearchHighlighterTagsSchema = 'styled' +---- + + +[discrete] +[[SearchHighlighterType]] +=== SearchHighlighterType + +[source,ts,subs=+macros] +---- +type SearchHighlighterType = 'plain' | 'fvh' | 'unified' | string +---- + + +[discrete] +[[SearchHit]] +=== SearchHit + +[source,ts,subs=+macros] +---- +interface SearchHit<TDocument = unknown> { + _index: <<IndexName>> + _id?: <<Id>> + _score?: <<double>> | null + _explanation?: <<ExplainExplanation>> + fields?: Record<string, any> + highlight?: Record<string, string[]> + inner_hits?: Record<string, <<SearchInnerHitsResult>>> + matched_queries?: string[] | Record<string, <<double>>> + _nested?: <<SearchNestedIdentity>> + _ignored?: string[] + ignored_field_values?: Record<string, <<FieldValue>>[]> + _shard?: string + _node?: string + _routing?: string + _source?: TDocument + _rank?: <<integer>> + _seq_no?: <<SequenceNumber>> + _primary_term?: <<long>> + _version?: <<VersionNumber>> + sort?: <<SortResults>> +} +---- + + +[discrete] +[[SearchHitsMetadata]] +=== SearchHitsMetadata + +[source,ts,subs=+macros] +---- +interface SearchHitsMetadata<T = unknown> { + pass:[/**] @property total Total hit count information, present only if `track_total_hits` wasn't `false` in the search request. */ + total?: <<SearchTotalHits>> | <<long>> + hits: <<SearchHit>><T>[] + max_score?: <<double>> | null +} +---- + + +[discrete] +[[SearchInnerHits]] +=== SearchInnerHits + +[source,ts,subs=+macros] +---- +interface SearchInnerHits { + pass:[/**] @property name The name for the particular inner hit definition in the response. Useful when a search request contains multiple inner hits. */ + name?: <<Name>> + pass:[/**] @property size The maximum number of hits to return per `inner_hits`. */ + size?: <<integer>> + pass:[/**] @property from Inner hit starting document offset. */ + from?: <<integer>> + collapse?: <<SearchFieldCollapse>> + docvalue_fields?: (<<QueryDslFieldAndFormat>> | <<Field>>)[] + explain?: boolean + highlight?: <<SearchHighlight>> + ignore_unmapped?: boolean + script_fields?: Record<<<Field>>, <<ScriptField>>> + seq_no_primary_term?: boolean + fields?: <<Fields>> + pass:[/**] @property sort How the inner hits should be sorted per `inner_hits`. By default, inner hits are sorted by score. */ + sort?: <<Sort>> + _source?: <<SearchSourceConfig>> + stored_fields?: <<Fields>> + track_scores?: boolean + version?: boolean +} +---- + + +[discrete] +[[SearchInnerHitsResult]] +=== SearchInnerHitsResult + +[source,ts,subs=+macros] +---- +interface SearchInnerHitsResult { + hits: <<SearchHitsMetadata>><any> +} +---- + + +[discrete] +[[SearchKnnCollectorResult]] +=== SearchKnnCollectorResult + +[source,ts,subs=+macros] +---- +interface SearchKnnCollectorResult { + name: string + reason: string + time?: <<Duration>> + time_in_nanos: <<DurationValue>><<<UnitNanos>>> + children?: <<SearchKnnCollectorResult>>[] +} +---- + + +[discrete] +[[SearchKnnQueryProfileBreakdown]] +=== SearchKnnQueryProfileBreakdown + +[source,ts,subs=+macros] +---- +interface SearchKnnQueryProfileBreakdown { + advance: <<long>> + advance_count: <<long>> + build_scorer: <<long>> + build_scorer_count: <<long>> + compute_max_score: <<long>> + compute_max_score_count: <<long>> + count_weight: <<long>> + count_weight_count: <<long>> + create_weight: <<long>> + create_weight_count: <<long>> + match: <<long>> + match_count: <<long>> + next_doc: <<long>> + next_doc_count: <<long>> + score: <<long>> + score_count: <<long>> + set_min_competitive_score: <<long>> + set_min_competitive_score_count: <<long>> + shallow_advance: <<long>> + shallow_advance_count: <<long>> +} +---- + + +[discrete] +[[SearchKnnQueryProfileResult]] +=== SearchKnnQueryProfileResult + +[source,ts,subs=+macros] +---- +interface SearchKnnQueryProfileResult { + type: string + description: string + time?: <<Duration>> + time_in_nanos: <<DurationValue>><<<UnitNanos>>> + breakdown: <<SearchKnnQueryProfileBreakdown>> + debug?: Record<string, any> + children?: <<SearchKnnQueryProfileResult>>[] +} +---- + + +[discrete] +[[SearchLaplaceSmoothingModel]] +=== SearchLaplaceSmoothingModel + +[source,ts,subs=+macros] +---- +interface SearchLaplaceSmoothingModel { + pass:[/**] @property alpha A constant that is added to all counts to balance weights. */ + alpha: <<double>> +} +---- + + +[discrete] +[[SearchLearningToRank]] +=== SearchLearningToRank + +[source,ts,subs=+macros] +---- +interface SearchLearningToRank { + pass:[/**] @property model_id The unique identifier of the trained model uploaded to Elasticsearch */ + model_id: string + pass:[/**] @property params Named parameters to be passed to the query templates used for feature */ + params?: Record<string, any> +} +---- + + +[discrete] +[[SearchLinearInterpolationSmoothingModel]] +=== SearchLinearInterpolationSmoothingModel + +[source,ts,subs=+macros] +---- +interface SearchLinearInterpolationSmoothingModel { + bigram_lambda: <<double>> + trigram_lambda: <<double>> + unigram_lambda: <<double>> +} +---- + + +[discrete] +[[SearchNestedIdentity]] +=== SearchNestedIdentity + +[source,ts,subs=+macros] +---- +interface SearchNestedIdentity { + field: <<Field>> + offset: <<integer>> + _nested?: <<SearchNestedIdentity>> +} +---- + + +[discrete] +[[SearchPhraseSuggest]] +=== SearchPhraseSuggest + +[source,ts,subs=+macros] +---- +interface SearchPhraseSuggest extends <<SearchSuggestBase>> { + options: <<SearchPhraseSuggestOption>> | <<SearchPhraseSuggestOption>>[] +} +---- + + +[discrete] +[[SearchPhraseSuggestCollate]] +=== SearchPhraseSuggestCollate + +[source,ts,subs=+macros] +---- +interface SearchPhraseSuggestCollate { + pass:[/**] @property params Parameters to use if the query is templated. */ + params?: Record<string, any> + pass:[/**] @property prune Returns all suggestions with an extra `collate_match` option indicating whether the generated phrase matched any document. */ + prune?: boolean + pass:[/**] @property query A collate query that is run once for every suggestion. */ + query: <<SearchPhraseSuggestCollateQuery>> +} +---- + + +[discrete] +[[SearchPhraseSuggestCollateQuery]] +=== SearchPhraseSuggestCollateQuery + +[source,ts,subs=+macros] +---- +interface SearchPhraseSuggestCollateQuery { + pass:[/**] @property id The search template ID. */ + id?: <<Id>> + pass:[/**] @property source The query source. */ + source?: string +} +---- + + +[discrete] +[[SearchPhraseSuggestHighlight]] +=== SearchPhraseSuggestHighlight + +[source,ts,subs=+macros] +---- +interface SearchPhraseSuggestHighlight { + pass:[/**] @property post_tag Use in conjunction with `pre_tag` to define the HTML tags to use for the highlighted text. */ + post_tag: string + pass:[/**] @property pre_tag Use in conjunction with `post_tag` to define the HTML tags to use for the highlighted text. */ + pre_tag: string +} +---- + + +[discrete] +[[SearchPhraseSuggestOption]] +=== SearchPhraseSuggestOption + +[source,ts,subs=+macros] +---- +interface SearchPhraseSuggestOption { + text: string + score: <<double>> + highlighted?: string + collate_match?: boolean +} +---- + + +[discrete] +[[SearchPhraseSuggester]] +=== SearchPhraseSuggester + +[source,ts,subs=+macros] +---- +interface SearchPhraseSuggester extends <<SearchSuggesterBase>> { + pass:[/**] @property collate Checks each suggestion against the specified query to prune suggestions for which no matching docs exist in the index. */ + collate?: <<SearchPhraseSuggestCollate>> + pass:[/**] @property confidence Defines a factor applied to the input phrases score, which is used as a threshold for other suggest candidates. Only candidates that score higher than the threshold will be included in the result. */ + confidence?: <<double>> + pass:[/**] @property direct_generator A list of candidate generators that produce a list of possible terms per term in the given text. */ + direct_generator?: <<SearchDirectGenerator>>[] + force_unigrams?: boolean + pass:[/**] @property gram_size Sets max size of the n-grams (shingles) in the field. If the field doesn’t contain n-grams (shingles), this should be omitted or set to `1`. If the field uses a shingle filter, the `gram_size` is set to the `max_shingle_size` if not explicitly set. */ + gram_size?: <<integer>> + pass:[/**] @property highlight Sets up suggestion highlighting. If not provided, no highlighted field is returned. */ + highlight?: <<SearchPhraseSuggestHighlight>> + pass:[/**] @property max_errors The maximum percentage of the terms considered to be misspellings in order to form a correction. This method accepts a <<float>> value in the range `[0..1)` as a fraction of the actual query terms or a number `>=1` as an absolute number of query terms. */ + max_errors?: <<double>> + pass:[/**] @property real_word_error_likelihood The likelihood of a term being misspelled even if the term exists in the dictionary. */ + real_word_error_likelihood?: <<double>> + pass:[/**] @property separator The separator that is used to separate terms in the bigram field. If not set, the whitespace character is used as a separator. */ + separator?: string + pass:[/**] @property shard_size Sets the maximum number of suggested terms to be retrieved from each individual shard. */ + shard_size?: <<integer>> + pass:[/**] @property smoothing The smoothing model used to balance weight between infrequent grams (grams (shingles) are not existing in the index) and frequent grams (appear at least once in the index). The default model is Stupid Backoff. */ + smoothing?: <<SearchSmoothingModelContainer>> + pass:[/**] @property text The text/query to provide suggestions for. */ + text?: string + token_limit?: <<integer>> +} +---- + + +[discrete] +[[SearchPointInTimeReference]] +=== SearchPointInTimeReference + +[source,ts,subs=+macros] +---- +interface SearchPointInTimeReference { + id: <<Id>> + keep_alive?: <<Duration>> +} +---- + + +[discrete] +[[SearchProfile]] +=== SearchProfile + +[source,ts,subs=+macros] +---- +interface SearchProfile { + shards: <<SearchShardProfile>>[] +} +---- + + +[discrete] +[[SearchQueryBreakdown]] +=== SearchQueryBreakdown + +[source,ts,subs=+macros] +---- +interface SearchQueryBreakdown { + advance: <<long>> + advance_count: <<long>> + build_scorer: <<long>> + build_scorer_count: <<long>> + create_weight: <<long>> + create_weight_count: <<long>> + match: <<long>> + match_count: <<long>> + shallow_advance: <<long>> + shallow_advance_count: <<long>> + next_doc: <<long>> + next_doc_count: <<long>> + score: <<long>> + score_count: <<long>> + compute_max_score: <<long>> + compute_max_score_count: <<long>> + count_weight: <<long>> + count_weight_count: <<long>> + set_min_competitive_score: <<long>> + set_min_competitive_score_count: <<long>> +} +---- + + +[discrete] +[[SearchQueryProfile]] +=== SearchQueryProfile + +[source,ts,subs=+macros] +---- +interface SearchQueryProfile { + breakdown: <<SearchQueryBreakdown>> + description: string + time_in_nanos: <<DurationValue>><<<UnitNanos>>> + type: string + children?: <<SearchQueryProfile>>[] +} +---- + + +[discrete] +[[SearchRegexOptions]] +=== SearchRegexOptions + +[source,ts,subs=+macros] +---- +interface SearchRegexOptions { + pass:[/**] @property flags Optional operators for the regular expression. */ + flags?: <<integer>> | string + pass:[/**] @property max_determinized_states Maximum number of automaton states required for the query. */ + max_determinized_states?: <<integer>> +} +---- + + +[discrete] +[[SearchRescore]] +=== SearchRescore + +[source,ts,subs=+macros] +---- +interface SearchRescore { + window_size?: <<integer>> + query?: <<SearchRescoreQuery>> + learning_to_rank?: <<SearchLearningToRank>> +} +---- + + +[discrete] +[[SearchRescoreQuery]] +=== SearchRescoreQuery + +[source,ts,subs=+macros] +---- +interface SearchRescoreQuery { + pass:[/**] @property rescore_query The query to use for rescoring. This query is only run on the Top-K results returned by the `query` and `post_filter` phases. */ + rescore_query: <<QueryDslQueryContainer>> + pass:[/**] @property query_weight Relative importance of the original query versus the rescore query. */ + query_weight?: <<double>> + pass:[/**] @property rescore_query_weight Relative importance of the rescore query versus the original query. */ + rescore_query_weight?: <<double>> + pass:[/**] @property score_mode Determines how scores are combined. */ + score_mode?: <<SearchScoreMode>> +} +---- + + +[discrete] +[[SearchScoreMode]] +=== SearchScoreMode + +[source,ts,subs=+macros] +---- +type SearchScoreMode = 'avg' | 'max' | 'min' | 'multiply' | 'total' +---- + + +[discrete] +[[SearchSearchProfile]] +=== SearchSearchProfile + +[source,ts,subs=+macros] +---- +interface SearchSearchProfile { + collector: <<SearchCollector>>[] + query: <<SearchQueryProfile>>[] + rewrite_time: <<long>> +} +---- + + +[discrete] +[[SearchShardProfile]] +=== SearchShardProfile + +[source,ts,subs=+macros] +---- +interface SearchShardProfile { + aggregations: <<SearchAggregationProfile>>[] + cluster: string + dfs?: <<SearchDfsProfile>> + fetch?: <<SearchFetchProfile>> + id: string + index: <<IndexName>> + node_id: <<NodeId>> + searches: <<SearchSearchProfile>>[] + shard_id: <<long>> +} +---- + + +[discrete] +[[SearchSmoothingModelContainer]] +=== SearchSmoothingModelContainer + +[source,ts,subs=+macros] +---- +interface SearchSmoothingModelContainer { + pass:[/**] @property laplace A smoothing model that uses an additive smoothing where a constant (typically `1.0` or smaller) is added to all counts to balance weights. */ + laplace?: <<SearchLaplaceSmoothingModel>> + pass:[/**] @property linear_interpolation A smoothing model that takes the weighted mean of the unigrams, bigrams, and trigrams based on user supplied weights (lambdas). */ + linear_interpolation?: <<SearchLinearInterpolationSmoothingModel>> + pass:[/**] @property stupid_backoff A simple backoff model that backs off to lower order n-gram models if the higher order count is `0` and discounts the lower order n-gram model by a constant factor. */ + stupid_backoff?: <<SearchStupidBackoffSmoothingModel>> +} +---- + + +[discrete] +[[SearchSourceConfig]] +=== SearchSourceConfig + +[source,ts,subs=+macros] +---- +type SearchSourceConfig = boolean | <<SearchSourceFilter>> | <<Fields>> +---- + + +[discrete] +[[SearchSourceConfigParam]] +=== SearchSourceConfigParam + +[source,ts,subs=+macros] +---- +type SearchSourceConfigParam = boolean | <<Fields>> +---- + + +[discrete] +[[SearchSourceFilter]] +=== SearchSourceFilter + +[source,ts,subs=+macros] +---- +interface SearchSourceFilter { + excludes?: <<Fields>> + exclude?: <<Fields>> + includes?: <<Fields>> + include?: <<Fields>> +} +---- + + +[discrete] +[[SearchStringDistance]] +=== SearchStringDistance + +[source,ts,subs=+macros] +---- +type SearchStringDistance = 'internal' | 'damerau_levenshtein' | 'levenshtein' | 'jaro_winkler' | 'ngram' +---- + + +[discrete] +[[SearchStupidBackoffSmoothingModel]] +=== SearchStupidBackoffSmoothingModel + +[source,ts,subs=+macros] +---- +interface SearchStupidBackoffSmoothingModel { + pass:[/**] @property discount A constant factor that the lower order n-gram model is discounted by. */ + discount: <<double>> +} +---- + + +[discrete] +[[SearchSuggest]] +=== SearchSuggest + +[source,ts,subs=+macros] +---- +type SearchSuggest<TDocument = unknown> = <<SearchCompletionSuggest>><TDocument> | <<SearchPhraseSuggest>> | <<SearchTermSuggest>> +---- + + +[discrete] +[[SearchSuggestBase]] +=== SearchSuggestBase + +[source,ts,subs=+macros] +---- +interface SearchSuggestBase { + length: <<integer>> + offset: <<integer>> + text: string +} +---- + + +[discrete] +[[SearchSuggestFuzziness]] +=== SearchSuggestFuzziness + +[source,ts,subs=+macros] +---- +interface SearchSuggestFuzziness { + pass:[/**] @property fuzziness The fuzziness factor. */ + fuzziness?: <<Fuzziness>> + pass:[/**] @property min_length Minimum length of the input before fuzzy suggestions are returned. */ + min_length?: <<integer>> + pass:[/**] @property prefix_length Minimum length of the input, which is not checked for fuzzy alternatives. */ + prefix_length?: <<integer>> + pass:[/**] @property transpositions If set to `true`, transpositions are counted as one change instead of two. */ + transpositions?: boolean + pass:[/**] @property unicode_aware If `true`, all measurements (like fuzzy edit distance, transpositions, and lengths) are measured in Unicode code points instead of in bytes. This is slightly slower than raw bytes. */ + unicode_aware?: boolean +} +---- + + +[discrete] +[[SearchSuggestSort]] +=== SearchSuggestSort + +[source,ts,subs=+macros] +---- +type SearchSuggestSort = 'score' | 'frequency' +---- + + +[discrete] +[[SearchSuggester]] +=== SearchSuggester + +[source,ts,subs=+macros] +---- +interface SearchSuggesterKeys { + text?: string +} +type SearchSuggester = SearchSuggesterKeys + & { [property: string]: <<SearchFieldSuggester>> | string } +---- + + +[discrete] +[[SearchSuggesterBase]] +=== SearchSuggesterBase + +[source,ts,subs=+macros] +---- +interface SearchSuggesterBase { + pass:[/**] @property field The field to fetch the candidate suggestions from. Needs to be set globally or per suggestion. */ + field: <<Field>> + pass:[/**] @property analyzer The analyzer to analyze the suggest text with. Defaults to the search analyzer of the suggest field. */ + analyzer?: string + pass:[/**] @property size The maximum corrections to be returned per suggest text token. */ + size?: <<integer>> +} +---- + + +[discrete] +[[SearchTermSuggest]] +=== SearchTermSuggest + +[source,ts,subs=+macros] +---- +interface SearchTermSuggest extends <<SearchSuggestBase>> { + options: <<SearchTermSuggestOption>> | <<SearchTermSuggestOption>>[] +} +---- + + +[discrete] +[[SearchTermSuggestOption]] +=== SearchTermSuggestOption + +[source,ts,subs=+macros] +---- +interface SearchTermSuggestOption { + text: string + score: <<double>> + freq: <<long>> + highlighted?: string + collate_match?: boolean +} +---- + + +[discrete] +[[SearchTermSuggester]] +=== SearchTermSuggester + +[source,ts,subs=+macros] +---- +interface SearchTermSuggester extends <<SearchSuggesterBase>> { + lowercase_terms?: boolean + pass:[/**] @property max_edits The maximum edit distance candidate suggestions can have in order to be considered as a suggestion. Can only be `1` or `2`. */ + max_edits?: <<integer>> + pass:[/**] @property max_inspections A factor that is used to multiply with the shard_size in order to inspect more candidate spelling corrections on the shard level. Can improve accuracy at the cost of performance. */ + max_inspections?: <<integer>> + pass:[/**] @property max_term_freq The maximum threshold in number of documents in which a suggest text token can exist in order to be included. Can be a relative percentage number (for example `0.4`) or an absolute number to represent document frequencies. If a value higher than 1 is specified, then fractional can not be specified. */ + max_term_freq?: <<float>> + pass:[/**] @property min_doc_freq The minimal threshold in number of documents a suggestion should appear in. This can improve quality by only suggesting high frequency terms. Can be specified as an absolute number or as a relative percentage of number of documents. If a value higher than 1 is specified, then the number cannot be fractional. */ + min_doc_freq?: <<float>> + pass:[/**] @property min_word_length The minimum length a suggest text term must have in order to be included. */ + min_word_length?: <<integer>> + pass:[/**] @property prefix_length The number of minimal prefix characters that must match in order be a candidate for suggestions. Increasing this number improves spellcheck performance. */ + prefix_length?: <<integer>> + pass:[/**] @property shard_size Sets the maximum number of suggestions to be retrieved from each individual shard. */ + shard_size?: <<integer>> + pass:[/**] @property sort Defines how suggestions should be sorted per suggest text term. */ + sort?: <<SearchSuggestSort>> + pass:[/**] @property string_distance The string distance implementation to use for comparing how similar suggested terms are. */ + string_distance?: <<SearchStringDistance>> + pass:[/**] @property suggest_mode Controls what suggestions are included or controls for what suggest text terms, suggestions should be suggested. */ + suggest_mode?: <<SuggestMode>> + pass:[/**] @property text The suggest text. Needs to be set globally or per suggestion. */ + text?: string +} +---- + + +[discrete] +[[SearchTotalHits]] +=== SearchTotalHits + +[source,ts,subs=+macros] +---- +interface SearchTotalHits { + relation: <<SearchTotalHitsRelation>> + value: <<long>> +} +---- + + +[discrete] +[[SearchTotalHitsRelation]] +=== SearchTotalHitsRelation + +[source,ts,subs=+macros] +---- +type SearchTotalHitsRelation = 'eq' | 'gte' +---- + + +[discrete] +[[SearchTrackHits]] +=== SearchTrackHits + +[source,ts,subs=+macros] +---- +type SearchTrackHits = boolean | <<integer>> +---- + + diff --git a/docs/reference/shared-types/global-search.asciidoc b/docs/reference/shared-types/global-search.asciidoc new file mode 100644 index 000000000..2d4a8dd76 --- /dev/null +++ b/docs/reference/shared-types/global-search.asciidoc @@ -0,0 +1,150 @@ +[[reference-shared-types-global-search]] + +=== `Search` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[SearchRequest]] +==== SearchRequest + +[source,ts,subs=+macros] +---- +interface SearchRequest extends <<RequestBase>> { + index?: <<Indices>> + allow_no_indices?: boolean + allow_partial_search_results?: boolean + analyzer?: string + analyze_wildcard?: boolean + batched_reduce_size?: <<long>> + ccs_minimize_roundtrips?: boolean + default_operator?: <<QueryDslOperator>> + df?: string + expand_wildcards?: <<ExpandWildcards>> + ignore_throttled?: boolean + ignore_unavailable?: boolean + include_named_queries_score?: boolean + lenient?: boolean + max_concurrent_shard_requests?: <<long>> + preference?: string + pre_filter_shard_size?: <<long>> + request_cache?: boolean + routing?: <<Routing>> + scroll?: <<Duration>> + search_type?: <<SearchType>> + suggest_field?: <<Field>> + suggest_mode?: <<SuggestMode>> + suggest_size?: <<long>> + suggest_text?: string + typed_keys?: boolean + rest_total_hits_as_int?: boolean + _source_excludes?: <<Fields>> + _source_includes?: <<Fields>> + q?: string + force_synthetic_source?: boolean + aggregations?: Record<string, <<AggregationsAggregationContainer>>> + pass:[/**] @alias aggregations */ + aggs?: Record<string, <<AggregationsAggregationContainer>>> + collapse?: <<SearchFieldCollapse>> + explain?: boolean + ext?: Record<string, any> + from?: <<integer>> + highlight?: <<SearchHighlight>> + track_total_hits?: <<SearchTrackHits>> + indices_boost?: Record<<<IndexName>>, <<double>>>[] + docvalue_fields?: (<<QueryDslFieldAndFormat>> | <<Field>>)[] + knn?: <<KnnSearch>> | <<KnnSearch>>[] + rank?: <<RankContainer>> + min_score?: <<double>> + post_filter?: <<QueryDslQueryContainer>> + profile?: boolean + query?: <<QueryDslQueryContainer>> + rescore?: <<SearchRescore>> | <<SearchRescore>>[] + retriever?: <<RetrieverContainer>> + script_fields?: Record<string, <<ScriptField>>> + search_after?: <<SortResults>> + size?: <<integer>> + slice?: <<SlicedScroll>> + sort?: <<Sort>> + _source?: <<SearchSourceConfig>> + fields?: (<<QueryDslFieldAndFormat>> | <<Field>>)[] + suggest?: <<SearchSuggester>> + terminate_after?: <<long>> + timeout?: string + track_scores?: boolean + version?: boolean + seq_no_primary_term?: boolean + stored_fields?: <<Fields>> + pit?: <<SearchPointInTimeReference>> + runtime_mappings?: <<MappingRuntimeFields>> + stats?: string[] +} +---- + + +[discrete] +[[SearchResponse]] +==== SearchResponse + +[source,ts,subs=+macros] +---- +type SearchResponse<TDocument = unknown, TAggregations = Record<<<AggregateName>>, <<AggregationsAggregate>>>> = <<SearchResponseBody>><TDocument, TAggregations> +---- + + +[discrete] +[[SearchResponseBody]] +==== SearchResponseBody + +[source,ts,subs=+macros] +---- +interface SearchResponseBody<TDocument = unknown, TAggregations = Record<<<AggregateName>>, <<AggregationsAggregate>>>> { + took: <<long>> + timed_out: boolean + _shards: <<ShardStatistics>> + hits: <<SearchHitsMetadata>><TDocument> + aggregations?: TAggregations + _clusters?: <<ClusterStatistics>> + fields?: Record<string, any> + max_score?: <<double>> + num_reduce_phases?: <<long>> + profile?: <<SearchProfile>> + pit_id?: <<Id>> + _scroll_id?: <<ScrollId>> + suggest?: Record<<<SuggestionName>>, <<SearchSuggest>><TDocument>[]> + terminated_early?: boolean +} +---- + + diff --git a/docs/reference/shared-types/global-terms-enum.asciidoc b/docs/reference/shared-types/global-terms-enum.asciidoc new file mode 100644 index 000000000..569f365b3 --- /dev/null +++ b/docs/reference/shared-types/global-terms-enum.asciidoc @@ -0,0 +1,70 @@ +[[reference-shared-types-global-terms-enum]] + +=== `TermsEnum` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[TermsEnumRequest]] +==== TermsEnumRequest + +[source,ts,subs=+macros] +---- +interface TermsEnumRequest extends <<RequestBase>> { + index: <<IndexName>> + field: <<Field>> + size?: <<integer>> + timeout?: <<Duration>> + case_insensitive?: boolean + index_filter?: <<QueryDslQueryContainer>> + string?: string + search_after?: string +} +---- + + +[discrete] +[[TermsEnumResponse]] +==== TermsEnumResponse + +[source,ts,subs=+macros] +---- +interface TermsEnumResponse { + _shards: <<ShardStatistics>> + terms: string[] + complete: boolean +} +---- + + diff --git a/docs/reference/shared-types/global-termvectors.asciidoc b/docs/reference/shared-types/global-termvectors.asciidoc new file mode 100644 index 000000000..3edbe6782 --- /dev/null +++ b/docs/reference/shared-types/global-termvectors.asciidoc @@ -0,0 +1,164 @@ +[[reference-shared-types-global-termvectors]] + +=== `Termvectors` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[TermvectorsFieldStatistics]] +==== TermvectorsFieldStatistics + +[source,ts,subs=+macros] +---- +interface TermvectorsFieldStatistics { + doc_count: <<integer>> + sum_doc_freq: <<long>> + sum_ttf: <<long>> +} +---- + + +[discrete] +[[TermvectorsFilter]] +==== TermvectorsFilter + +[source,ts,subs=+macros] +---- +interface TermvectorsFilter { + pass:[/**] @property max_doc_freq Ignore words which occur in more than this many docs. Defaults to unbounded. */ + max_doc_freq?: <<integer>> + pass:[/**] @property max_num_terms Maximum number of terms that must be returned per field. */ + max_num_terms?: <<integer>> + pass:[/**] @property max_term_freq Ignore words with more than this frequency in the source doc. Defaults to unbounded. */ + max_term_freq?: <<integer>> + pass:[/**] @property max_word_length The maximum word length above which words will be ignored. Defaults to unbounded. */ + max_word_length?: <<integer>> + pass:[/**] @property min_doc_freq Ignore terms which do not occur in at least this many docs. */ + min_doc_freq?: <<integer>> + pass:[/**] @property min_term_freq Ignore words with less than this frequency in the source doc. */ + min_term_freq?: <<integer>> + pass:[/**] @property min_word_length The minimum word length below which words will be ignored. */ + min_word_length?: <<integer>> +} +---- + + +[discrete] +[[TermvectorsRequest]] +==== TermvectorsRequest + +[source,ts,subs=+macros] +---- +interface TermvectorsRequest<TDocument = unknown> extends <<RequestBase>> { + index: <<IndexName>> + id?: <<Id>> + fields?: <<Fields>> + field_statistics?: boolean + offsets?: boolean + payloads?: boolean + positions?: boolean + preference?: string + realtime?: boolean + routing?: <<Routing>> + term_statistics?: boolean + version?: <<VersionNumber>> + version_type?: <<VersionType>> + doc?: TDocument + filter?: <<TermvectorsFilter>> + per_field_analyzer?: Record<<<Field>>, string> +} +---- + + +[discrete] +[[TermvectorsResponse]] +==== TermvectorsResponse + +[source,ts,subs=+macros] +---- +interface TermvectorsResponse { + found: boolean + _id?: <<Id>> + _index: <<IndexName>> + term_vectors?: Record<<<Field>>, <<TermvectorsTermVector>>> + took: <<long>> + _version: <<VersionNumber>> +} +---- + + +[discrete] +[[TermvectorsTerm]] +==== TermvectorsTerm + +[source,ts,subs=+macros] +---- +interface TermvectorsTerm { + doc_freq?: <<integer>> + score?: <<double>> + term_freq: <<integer>> + tokens?: <<TermvectorsToken>>[] + ttf?: <<integer>> +} +---- + + +[discrete] +[[TermvectorsTermVector]] +==== TermvectorsTermVector + +[source,ts,subs=+macros] +---- +interface TermvectorsTermVector { + field_statistics?: <<TermvectorsFieldStatistics>> + terms: Record<string, <<TermvectorsTerm>>> +} +---- + + +[discrete] +[[TermvectorsToken]] +==== TermvectorsToken + +[source,ts,subs=+macros] +---- +interface TermvectorsToken { + end_offset?: <<integer>> + payload?: string + position: <<integer>> + start_offset?: <<integer>> +} +---- + + diff --git a/docs/reference/shared-types/global-update-by-query-rethrottle.asciidoc b/docs/reference/shared-types/global-update-by-query-rethrottle.asciidoc new file mode 100644 index 000000000..5de108e6b --- /dev/null +++ b/docs/reference/shared-types/global-update-by-query-rethrottle.asciidoc @@ -0,0 +1,74 @@ +[[reference-shared-types-global-update-by-query-rethrottle]] + +=== `UpdateByQueryRethrottle` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[UpdateByQueryRethrottleRequest]] +==== UpdateByQueryRethrottleRequest + +[source,ts,subs=+macros] +---- +interface UpdateByQueryRethrottleRequest extends <<RequestBase>> { + task_id: <<Id>> + requests_per_second?: <<float>> +} +---- + + +[discrete] +[[UpdateByQueryRethrottleResponse]] +==== UpdateByQueryRethrottleResponse + +[source,ts,subs=+macros] +---- +interface UpdateByQueryRethrottleResponse { + nodes: Record<string, <<UpdateByQueryRethrottleUpdateByQueryRethrottleNode>>> +} +---- + + +[discrete] +[[UpdateByQueryRethrottleUpdateByQueryRethrottleNode]] +==== UpdateByQueryRethrottleUpdateByQueryRethrottleNode + +[source,ts,subs=+macros] +---- +interface UpdateByQueryRethrottleUpdateByQueryRethrottleNode extends <<SpecUtilsBaseNode>> { + tasks: Record<<<TaskId>>, <<TasksTaskInfo>>> +} +---- + + diff --git a/docs/reference/shared-types/global-update-by-query.asciidoc b/docs/reference/shared-types/global-update-by-query.asciidoc new file mode 100644 index 000000000..2db28d730 --- /dev/null +++ b/docs/reference/shared-types/global-update-by-query.asciidoc @@ -0,0 +1,110 @@ +[[reference-shared-types-global-update-by-query]] + +=== `UpdateByQuery` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[UpdateByQueryRequest]] +==== UpdateByQueryRequest + +[source,ts,subs=+macros] +---- +interface UpdateByQueryRequest extends <<RequestBase>> { + index: <<Indices>> + allow_no_indices?: boolean + analyzer?: string + analyze_wildcard?: boolean + default_operator?: <<QueryDslOperator>> + df?: string + expand_wildcards?: <<ExpandWildcards>> + from?: <<long>> + ignore_unavailable?: boolean + lenient?: boolean + pipeline?: string + preference?: string + q?: string + refresh?: boolean + request_cache?: boolean + requests_per_second?: <<float>> + routing?: <<Routing>> + scroll?: <<Duration>> + scroll_size?: <<long>> + search_timeout?: <<Duration>> + search_type?: <<SearchType>> + slices?: <<Slices>> + sort?: string[] + stats?: string[] + terminate_after?: <<long>> + timeout?: <<Duration>> + version?: boolean + version_type?: boolean + wait_for_active_shards?: <<WaitForActiveShards>> + wait_for_completion?: boolean + max_docs?: <<long>> + query?: <<QueryDslQueryContainer>> + script?: <<Script>> | string + slice?: <<SlicedScroll>> + conflicts?: <<Conflicts>> +} +---- + + +[discrete] +[[UpdateByQueryResponse]] +==== UpdateByQueryResponse + +[source,ts,subs=+macros] +---- +interface UpdateByQueryResponse { + batches?: <<long>> + failures?: <<BulkIndexByScrollFailure>>[] + noops?: <<long>> + deleted?: <<long>> + requests_per_second?: <<float>> + retries?: <<Retries>> + task?: <<TaskId>> + timed_out?: boolean + took?: <<DurationValue>><<<UnitMillis>>> + total?: <<long>> + updated?: <<long>> + version_conflicts?: <<long>> + throttled?: <<Duration>> + throttled_millis?: <<DurationValue>><<<UnitMillis>>> + throttled_until?: <<Duration>> + throttled_until_millis?: <<DurationValue>><<<UnitMillis>>> +} +---- + + diff --git a/docs/reference/shared-types/global-update.asciidoc b/docs/reference/shared-types/global-update.asciidoc new file mode 100644 index 000000000..4aeffe775 --- /dev/null +++ b/docs/reference/shared-types/global-update.asciidoc @@ -0,0 +1,90 @@ +[[reference-shared-types-global-update]] + +=== `Update` request types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[UpdateRequest]] +==== UpdateRequest + +[source,ts,subs=+macros] +---- +interface UpdateRequest<TDocument = unknown, TPartialDocument = unknown> extends <<RequestBase>> { + id: <<Id>> + index: <<IndexName>> + if_primary_term?: <<long>> + if_seq_no?: <<SequenceNumber>> + lang?: string + refresh?: <<Refresh>> + require_alias?: boolean + retry_on_conflict?: <<integer>> + routing?: <<Routing>> + timeout?: <<Duration>> + wait_for_active_shards?: <<WaitForActiveShards>> + _source_excludes?: <<Fields>> + _source_includes?: <<Fields>> + detect_noop?: boolean + doc?: TPartialDocument + doc_as_upsert?: boolean + script?: <<Script>> | string + scripted_upsert?: boolean + _source?: <<SearchSourceConfig>> + upsert?: TDocument +} +---- + + +[discrete] +[[UpdateResponse]] +==== UpdateResponse + +[source,ts,subs=+macros] +---- +type UpdateResponse<TDocument = unknown> = <<UpdateUpdateWriteResponseBase>><TDocument> +---- + + +[discrete] +[[UpdateUpdateWriteResponseBase]] +==== UpdateUpdateWriteResponseBase + +[source,ts,subs=+macros] +---- +interface UpdateUpdateWriteResponseBase<TDocument = unknown> extends <<WriteResponseBase>> { + get?: <<InlineGet>><TDocument> +} +---- + + diff --git a/docs/reference/shared-types/graph-types.asciidoc b/docs/reference/shared-types/graph-types.asciidoc new file mode 100644 index 000000000..c8d346d9b --- /dev/null +++ b/docs/reference/shared-types/graph-types.asciidoc @@ -0,0 +1,152 @@ +[[reference-shared-types-graph-types]] + +=== `Graph` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[GraphConnection]] +=== GraphConnection + +[source,ts,subs=+macros] +---- +interface GraphConnection { + doc_count: <<long>> + source: <<long>> + target: <<long>> + weight: <<double>> +} +---- + + +[discrete] +[[GraphExploreControls]] +=== GraphExploreControls + +[source,ts,subs=+macros] +---- +interface GraphExploreControls { + pass:[/**] @property sample_diversity To avoid the top-matching documents sample being dominated by a single source of results, it is sometimes necessary to request diversity in the sample. You can do this by selecting a single-value field and setting a maximum number of documents per value for that field. */ + sample_diversity?: <<GraphSampleDiversity>> + pass:[/**] @property sample_size Each hop considers a sample of the best-matching documents on each shard. Using samples improves the speed of execution and keeps exploration focused on meaningfully-connected terms. Very small values (less than 50) might not provide sufficient weight-of-evidence to identify significant connections between terms. Very large sample sizes can dilute the quality of the results and increase execution times. */ + sample_size?: <<integer>> + pass:[/**] @property timeout The length of time in milliseconds after which exploration will be halted and the results gathered so far are returned. This timeout is honored on a best-effort basis. Execution might overrun this timeout if, for example, a <<long>> pause is encountered while FieldData is loaded for a field. */ + timeout?: <<Duration>> + pass:[/**] @property use_significance Filters associated terms so only those that are significantly associated with your query are included. */ + use_significance: boolean +} +---- + + +[discrete] +[[GraphHop]] +=== GraphHop + +[source,ts,subs=+macros] +---- +interface GraphHop { + pass:[/**] @property connections Specifies one or more fields from which you want to extract terms that are associated with the specified vertices. */ + connections?: <<GraphHop>> + pass:[/**] @property query An optional guiding query that constrains the Graph API as it explores connected terms. */ + query: <<QueryDslQueryContainer>> + pass:[/**] @property vertices Contains the fields you are interested in. */ + vertices: <<GraphVertexDefinition>>[] +} +---- + + +[discrete] +[[GraphSampleDiversity]] +=== GraphSampleDiversity + +[source,ts,subs=+macros] +---- +interface GraphSampleDiversity { + field: <<Field>> + max_docs_per_value: <<integer>> +} +---- + + +[discrete] +[[GraphVertex]] +=== GraphVertex + +[source,ts,subs=+macros] +---- +interface GraphVertex { + depth: <<long>> + field: <<Field>> + term: string + weight: <<double>> +} +---- + + +[discrete] +[[GraphVertexDefinition]] +=== GraphVertexDefinition + +[source,ts,subs=+macros] +---- +interface GraphVertexDefinition { + pass:[/**] @property exclude Prevents the specified terms from being included in the results. */ + exclude?: string[] + pass:[/**] @property field Identifies a field in the documents of interest. */ + field: <<Field>> + pass:[/**] @property include Identifies the terms of interest that form the starting points from which you want to spider out. */ + include?: <<GraphVertexInclude>>[] + pass:[/**] @property min_doc_count Specifies how many documents must contain a pair of terms before it is considered to be a useful connection. This setting acts as a certainty threshold. */ + min_doc_count?: <<long>> + pass:[/**] @property shard_min_doc_count Controls how many documents on a particular shard have to contain a pair of terms before the connection is returned for global consideration. */ + shard_min_doc_count?: <<long>> + pass:[/**] @property size Specifies the maximum number of vertex terms returned for each field. */ + size?: <<integer>> +} +---- + + +[discrete] +[[GraphVertexInclude]] +=== GraphVertexInclude + +[source,ts,subs=+macros] +---- +interface GraphVertexInclude { + boost: <<double>> + term: string +} +---- + + diff --git a/docs/reference/shared-types/ilm-types.asciidoc b/docs/reference/shared-types/ilm-types.asciidoc new file mode 100644 index 000000000..59f2abf52 --- /dev/null +++ b/docs/reference/shared-types/ilm-types.asciidoc @@ -0,0 +1,254 @@ +[[reference-shared-types-ilm-types]] + +=== `Ilm` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[IlmActions]] +=== IlmActions + +[source,ts,subs=+macros] +---- +interface IlmActions { + pass:[/**] @property allocate Phases allowed: warm, cold. */ + allocate?: <<IlmAllocateAction>> + pass:[/**] @property delete Phases allowed: delete. */ + delete?: <<IlmDeleteAction>> + pass:[/**] @property downsample Phases allowed: hot, warm, cold. */ + downsample?: <<IlmDownsampleAction>> + pass:[/**] @property freeze The freeze action is a noop in 8.x */ + freeze?: <<EmptyObject>> + pass:[/**] @property forcemerge Phases allowed: hot, warm. */ + forcemerge?: <<IlmForceMergeAction>> + pass:[/**] @property migrate Phases allowed: warm, cold. */ + migrate?: <<IlmMigrateAction>> + pass:[/**] @property readonly Phases allowed: hot, warm, cold. */ + readonly?: <<EmptyObject>> + pass:[/**] @property rollover Phases allowed: hot. */ + rollover?: <<IlmRolloverAction>> + pass:[/**] @property set_priority Phases allowed: hot, warm, cold. */ + set_priority?: <<IlmSetPriorityAction>> + pass:[/**] @property searchable_snapshot Phases allowed: hot, cold, frozen. */ + searchable_snapshot?: <<IlmSearchableSnapshotAction>> + pass:[/**] @property shrink Phases allowed: hot, warm. */ + shrink?: <<IlmShrinkAction>> + pass:[/**] @property unfollow Phases allowed: hot, warm, cold, frozen. */ + unfollow?: <<EmptyObject>> + pass:[/**] @property wait_for_snapshot Phases allowed: delete. */ + wait_for_snapshot?: <<IlmWaitForSnapshotAction>> +} +---- + + +[discrete] +[[IlmAllocateAction]] +=== IlmAllocateAction + +[source,ts,subs=+macros] +---- +interface IlmAllocateAction { + number_of_replicas?: <<integer>> + total_shards_per_node?: <<integer>> + include?: Record<string, string> + exclude?: Record<string, string> + require?: Record<string, string> +} +---- + + +[discrete] +[[IlmDeleteAction]] +=== IlmDeleteAction + +[source,ts,subs=+macros] +---- +interface IlmDeleteAction { + delete_searchable_snapshot?: boolean +} +---- + + +[discrete] +[[IlmDownsampleAction]] +=== IlmDownsampleAction + +[source,ts,subs=+macros] +---- +interface IlmDownsampleAction { + fixed_interval: <<DurationLarge>> + wait_timeout?: <<Duration>> +} +---- + + +[discrete] +[[IlmForceMergeAction]] +=== IlmForceMergeAction + +[source,ts,subs=+macros] +---- +interface IlmForceMergeAction { + max_num_segments: <<integer>> + index_codec?: string +} +---- + + +[discrete] +[[IlmMigrateAction]] +=== IlmMigrateAction + +[source,ts,subs=+macros] +---- +interface IlmMigrateAction { + enabled?: boolean +} +---- + + +[discrete] +[[IlmPhase]] +=== IlmPhase + +[source,ts,subs=+macros] +---- +interface IlmPhase { + actions?: <<IlmActions>> + min_age?: <<Duration>> | <<long>> +} +---- + + +[discrete] +[[IlmPhases]] +=== IlmPhases + +[source,ts,subs=+macros] +---- +interface IlmPhases { + cold?: <<IlmPhase>> + delete?: <<IlmPhase>> + frozen?: <<IlmPhase>> + hot?: <<IlmPhase>> + warm?: <<IlmPhase>> +} +---- + + +[discrete] +[[IlmPolicy]] +=== IlmPolicy + +[source,ts,subs=+macros] +---- +interface IlmPolicy { + phases: <<IlmPhases>> + _meta?: <<Metadata>> +} +---- + + +[discrete] +[[IlmRolloverAction]] +=== IlmRolloverAction + +[source,ts,subs=+macros] +---- +interface IlmRolloverAction { + max_size?: <<ByteSize>> + max_primary_shard_size?: <<ByteSize>> + max_age?: <<Duration>> + max_docs?: <<long>> + max_primary_shard_docs?: <<long>> + min_size?: <<ByteSize>> + min_primary_shard_size?: <<ByteSize>> + min_age?: <<Duration>> + min_docs?: <<long>> + min_primary_shard_docs?: <<long>> +} +---- + + +[discrete] +[[IlmSearchableSnapshotAction]] +=== IlmSearchableSnapshotAction + +[source,ts,subs=+macros] +---- +interface IlmSearchableSnapshotAction { + snapshot_repository: string + force_merge_index?: boolean +} +---- + + +[discrete] +[[IlmSetPriorityAction]] +=== IlmSetPriorityAction + +[source,ts,subs=+macros] +---- +interface IlmSetPriorityAction { + priority?: <<integer>> +} +---- + + +[discrete] +[[IlmShrinkAction]] +=== IlmShrinkAction + +[source,ts,subs=+macros] +---- +interface IlmShrinkAction { + number_of_shards?: <<integer>> + max_primary_shard_size?: <<ByteSize>> + allow_write_after_shrink?: boolean +} +---- + + +[discrete] +[[IlmWaitForSnapshotAction]] +=== IlmWaitForSnapshotAction + +[source,ts,subs=+macros] +---- +interface IlmWaitForSnapshotAction { + policy: string +} +---- + + diff --git a/docs/reference/shared-types/index.asciidoc b/docs/reference/shared-types/index.asciidoc new file mode 100644 index 000000000..5a26157aa --- /dev/null +++ b/docs/reference/shared-types/index.asciidoc @@ -0,0 +1,13044 @@ +[[reference-shared-types]] + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + + +== Shared types + + +[discrete] +[[AcknowledgedResponseBase]] +=== AcknowledgedResponseBase + +[source,ts,subs=+macros] +---- +interface AcknowledgedResponseBase { + pass:[/**] @property acknowledged For a successful response, this value is always true. On failure, an exception is returned instead. */ + acknowledged: boolean +} +---- + + + +[discrete] +[[AggregateName]] +=== AggregateName + +[source,ts,subs=+macros] +---- +type AggregateName = string +---- + + + +[discrete] +[[BulkIndexByScrollFailure]] +=== BulkIndexByScrollFailure + +[source,ts,subs=+macros] +---- +interface BulkIndexByScrollFailure { + cause: <<ErrorCause>> + id: <<Id>> + index: <<IndexName>> + status: <<integer>> + type: string +} +---- + + + +[discrete] +[[BulkStats]] +=== BulkStats + +[source,ts,subs=+macros] +---- +interface BulkStats { + total_operations: <<long>> + total_time?: <<Duration>> + total_time_in_millis: <<DurationValue>><<<UnitMillis>>> + total_size?: <<ByteSize>> + total_size_in_bytes: <<long>> + avg_time?: <<Duration>> + avg_time_in_millis: <<DurationValue>><<<UnitMillis>>> + avg_size?: <<ByteSize>> + avg_size_in_bytes: <<long>> +} +---- + + + +[discrete] +[[ByteSize]] +=== ByteSize + +[source,ts,subs=+macros] +---- +type ByteSize = <<long>> | string +---- + + + +[discrete] +[[Bytes]] +=== Bytes + +[source,ts,subs=+macros] +---- +type Bytes = 'b' | 'kb' | 'mb' | 'gb' | 'tb' | 'pb' +---- + + + +[discrete] +[[CategoryId]] +=== CategoryId + +[source,ts,subs=+macros] +---- +type CategoryId = string +---- + + + +[discrete] +[[ClusterAlias]] +=== ClusterAlias + +[source,ts,subs=+macros] +---- +type ClusterAlias = string +---- + + + +[discrete] +[[ClusterDetails]] +=== ClusterDetails + +[source,ts,subs=+macros] +---- +interface ClusterDetails { + status: <<ClusterSearchStatus>> + indices: string + took?: <<DurationValue>><<<UnitMillis>>> + timed_out: boolean + _shards?: <<ShardStatistics>> + failures?: <<ShardFailure>>[] +} +---- + + + +[discrete] +[[ClusterInfoTarget]] +=== ClusterInfoTarget + +[source,ts,subs=+macros] +---- +type ClusterInfoTarget = '_all' | 'http' | 'ingest' | 'thread_pool' | 'script' +---- + + + +[discrete] +[[ClusterInfoTargets]] +=== ClusterInfoTargets + +[source,ts,subs=+macros] +---- +type ClusterInfoTargets = <<ClusterInfoTarget>> | <<ClusterInfoTarget>>[] +---- + + + +[discrete] +[[ClusterSearchStatus]] +=== ClusterSearchStatus + +[source,ts,subs=+macros] +---- +type ClusterSearchStatus = 'running' | 'successful' | 'partial' | 'skipped' | 'failed' +---- + + + +[discrete] +[[ClusterStatistics]] +=== ClusterStatistics + +[source,ts,subs=+macros] +---- +interface ClusterStatistics { + skipped: <<integer>> + successful: <<integer>> + total: <<integer>> + running: <<integer>> + partial: <<integer>> + failed: <<integer>> + details?: Record<<<ClusterAlias>>, <<ClusterDetails>>> +} +---- + + + +[discrete] +[[CompletionStats]] +=== CompletionStats + +[source,ts,subs=+macros] +---- +interface CompletionStats { + pass:[/**] @property size_in_bytes Total amount, in bytes, of memory used for completion across all shards assigned to selected nodes. */ + size_in_bytes: <<long>> + pass:[/**] @property size Total amount of memory used for completion across all shards assigned to selected nodes. */ + size?: <<ByteSize>> + fields?: Record<<<Field>>, <<FieldSizeUsage>>> +} +---- + + + +[discrete] +[[Conflicts]] +=== Conflicts + +[source,ts,subs=+macros] +---- +type Conflicts = 'abort' | 'proceed' +---- + + + +[discrete] +[[CoordsGeoBounds]] +=== CoordsGeoBounds + +[source,ts,subs=+macros] +---- +interface CoordsGeoBounds { + top: <<double>> + bottom: <<double>> + left: <<double>> + right: <<double>> +} +---- + + + +[discrete] +[[DFIIndependenceMeasure]] +=== DFIIndependenceMeasure + +[source,ts,subs=+macros] +---- +type DFIIndependenceMeasure = 'standardized' | 'saturated' | 'chisquared' +---- + + + +[discrete] +[[DFRAfterEffect]] +=== DFRAfterEffect + +[source,ts,subs=+macros] +---- +type DFRAfterEffect = 'no' | 'b' | 'l' +---- + + + +[discrete] +[[DFRBasicModel]] +=== DFRBasicModel + +[source,ts,subs=+macros] +---- +type DFRBasicModel = 'be' | 'd' | 'g' | 'if' | 'in' | 'ine' | 'p' +---- + + + +[discrete] +[[DataStreamName]] +=== DataStreamName + +[source,ts,subs=+macros] +---- +type DataStreamName = string +---- + + + +[discrete] +[[DataStreamNames]] +=== DataStreamNames + +[source,ts,subs=+macros] +---- +type DataStreamNames = <<DataStreamName>> | <<DataStreamName>>[] +---- + + + +[discrete] +[[DateFormat]] +=== DateFormat + +[source,ts,subs=+macros] +---- +type DateFormat = string +---- + + + +[discrete] +[[DateMath]] +=== DateMath + +[source,ts,subs=+macros] +---- +type DateMath = string | Date +---- + + + +[discrete] +[[DateTime]] +=== DateTime + +[source,ts,subs=+macros] +---- +type DateTime = string | <<EpochTime>><<<UnitMillis>>> | Date +---- + + + +[discrete] +[[Distance]] +=== Distance + +[source,ts,subs=+macros] +---- +type Distance = string +---- + + + +[discrete] +[[DistanceUnit]] +=== DistanceUnit + +[source,ts,subs=+macros] +---- +type DistanceUnit = 'in' | 'ft' | 'yd' | 'mi' | 'nmi' | 'km' | 'm' | 'cm' | 'mm' +---- + + + +[discrete] +[[DocStats]] +=== DocStats + +[source,ts,subs=+macros] +---- +interface DocStats { + pass:[/**] @property count Total number of non-deleted documents across all primary shards assigned to selected nodes. This number is based on documents in Lucene segments and may include documents from nested fields. */ + count: <<long>> + pass:[/**] @property deleted Total number of deleted documents across all primary shards assigned to selected nodes. This number is based on documents in Lucene segments. Elasticsearch reclaims the disk space of deleted Lucene documents when a segment is merged. */ + deleted?: <<long>> +} +---- + + + +[discrete] +[[Duration]] +=== Duration + +[source,ts,subs=+macros] +---- +type Duration = string | -1 | 0 +---- + + + +[discrete] +[[DurationLarge]] +=== DurationLarge + +[source,ts,subs=+macros] +---- +type DurationLarge = string +---- + + + +[discrete] +[[DurationValue]] +=== DurationValue + +[source,ts,subs=+macros] +---- +type DurationValue<Unit = unknown> = Unit +---- + + + +[discrete] +[[ElasticsearchVersionInfo]] +=== ElasticsearchVersionInfo + +[source,ts,subs=+macros] +---- +interface ElasticsearchVersionInfo { + build_date: <<DateTime>> + build_flavor: string + build_hash: string + build_snapshot: boolean + build_type: string + lucene_version: <<VersionString>> + minimum_index_compatibility_version: <<VersionString>> + minimum_wire_compatibility_version: <<VersionString>> + number: string +} +---- + + + +[discrete] +[[ElasticsearchVersionMinInfo]] +=== ElasticsearchVersionMinInfo + +[source,ts,subs=+macros] +---- +interface ElasticsearchVersionMinInfo { + build_flavor: string + minimum_index_compatibility_version: <<VersionString>> + minimum_wire_compatibility_version: <<VersionString>> + number: string +} +---- + + + +[discrete] +[[EmptyObject]] +=== EmptyObject + +[source,ts,subs=+macros] +---- +interface EmptyObject {} +---- + + + +[discrete] +[[EpochTime]] +=== EpochTime + +[source,ts,subs=+macros] +---- +type EpochTime<Unit = unknown> = Unit +---- + + + +[discrete] +[[ErrorCause]] +=== ErrorCause + +[source,ts,subs=+macros] +---- +interface ErrorCauseKeys { + type: string + reason?: string + stack_trace?: string + caused_by?: <<ErrorCause>> + root_cause?: <<ErrorCause>>[] + suppressed?: <<ErrorCause>>[] +} +type ErrorCause = ErrorCauseKeys + & { [property: string]: any } +---- + + + +[discrete] +[[ErrorResponseBase]] +=== ErrorResponseBase + +[source,ts,subs=+macros] +---- +interface ErrorResponseBase { + error: <<ErrorCause>> + status: <<integer>> +} +---- + + + +[discrete] +[[EsqlColumns]] +=== EsqlColumns + +[source,ts,subs=+macros] +---- +type EsqlColumns = ArrayBuffer +---- + + + +[discrete] +[[ExpandWildcard]] +=== ExpandWildcard + +[source,ts,subs=+macros] +---- +type ExpandWildcard = 'all' | 'open' | 'closed' | 'hidden' | 'none' +---- + + + +[discrete] +[[ExpandWildcards]] +=== ExpandWildcards + +[source,ts,subs=+macros] +---- +type ExpandWildcards = <<ExpandWildcard>> | <<ExpandWildcard>>[] +---- + + + +[discrete] +[[Field]] +=== Field + +[source,ts,subs=+macros] +---- +type Field = string +---- + + + +[discrete] +[[FieldMemoryUsage]] +=== FieldMemoryUsage + +[source,ts,subs=+macros] +---- +interface FieldMemoryUsage { + memory_size?: <<ByteSize>> + memory_size_in_bytes: <<long>> +} +---- + + + +[discrete] +[[FieldSizeUsage]] +=== FieldSizeUsage + +[source,ts,subs=+macros] +---- +interface FieldSizeUsage { + size?: <<ByteSize>> + size_in_bytes: <<long>> +} +---- + + + +[discrete] +[[FieldSort]] +=== FieldSort + +[source,ts,subs=+macros] +---- +interface FieldSort { + missing?: <<AggregationsMissing>> + mode?: <<SortMode>> + nested?: <<NestedSortValue>> + order?: <<SortOrder>> + unmapped_type?: <<MappingFieldType>> + numeric_type?: <<FieldSortNumericType>> + format?: string +} +---- + + + +[discrete] +[[FieldSortNumericType]] +=== FieldSortNumericType + +[source,ts,subs=+macros] +---- +type FieldSortNumericType = '<<long>>' | '<<double>>' | 'date' | 'date_nanos' +---- + + + +[discrete] +[[FieldValue]] +=== FieldValue + +[source,ts,subs=+macros] +---- +type FieldValue = <<long>> | <<double>> | string | boolean | null | any +---- + + + +[discrete] +[[FielddataStats]] +=== FielddataStats + +[source,ts,subs=+macros] +---- +interface FielddataStats { + evictions?: <<long>> + memory_size?: <<ByteSize>> + memory_size_in_bytes: <<long>> + fields?: Record<<<Field>>, <<FieldMemoryUsage>>> +} +---- + + + +[discrete] +[[Fields]] +=== Fields + +[source,ts,subs=+macros] +---- +type Fields = <<Field>> | <<Field>>[] +---- + + + +[discrete] +[[FlushStats]] +=== FlushStats + +[source,ts,subs=+macros] +---- +interface FlushStats { + periodic: <<long>> + total: <<long>> + total_time?: <<Duration>> + total_time_in_millis: <<DurationValue>><<<UnitMillis>>> +} +---- + + + +[discrete] +[[Fuzziness]] +=== Fuzziness + +[source,ts,subs=+macros] +---- +type Fuzziness = string | <<integer>> +---- + + + +[discrete] +[[GeoBounds]] +=== GeoBounds + +[source,ts,subs=+macros] +---- +type GeoBounds = <<CoordsGeoBounds>> | <<TopLeftBottomRightGeoBounds>> | <<TopRightBottomLeftGeoBounds>> | <<WktGeoBounds>> +---- + + + +[discrete] +[[GeoDistanceSort]] +=== GeoDistanceSort + +[source,ts,subs=+macros] +---- +interface GeoDistanceSortKeys { + mode?: <<SortMode>> + distance_type?: <<GeoDistanceType>> + ignore_unmapped?: boolean + order?: <<SortOrder>> + unit?: <<DistanceUnit>> + nested?: <<NestedSortValue>> +} +type GeoDistanceSort = GeoDistanceSortKeys + & { [property: string]: <<GeoLocation>> | <<GeoLocation>>[] | <<SortMode>> | <<GeoDistanceType>> | boolean | <<SortOrder>> | <<DistanceUnit>> | <<NestedSortValue>> } +---- + + + +[discrete] +[[GeoDistanceType]] +=== GeoDistanceType + +[source,ts,subs=+macros] +---- +type GeoDistanceType = 'arc' | 'plane' +---- + + + +[discrete] +[[GeoHash]] +=== GeoHash + +[source,ts,subs=+macros] +---- +type GeoHash = string +---- + + + +[discrete] +[[GeoHashLocation]] +=== GeoHashLocation + +[source,ts,subs=+macros] +---- +interface GeoHashLocation { + geohash: <<GeoHash>> +} +---- + + + +[discrete] +[[GeoHashPrecision]] +=== GeoHashPrecision + +[source,ts,subs=+macros] +---- +type GeoHashPrecision = number | string +---- + + + +[discrete] +[[GeoHexCell]] +=== GeoHexCell + +[source,ts,subs=+macros] +---- +type GeoHexCell = string +---- + + + +[discrete] +[[GeoLine]] +=== GeoLine + +[source,ts,subs=+macros] +---- +interface GeoLine { + pass:[/**] @property type Always `"LineString"` */ + type: string + pass:[/**] @property coordinates Array of `[lon, lat]` coordinates */ + coordinates: <<double>>[][] +} +---- + + + +[discrete] +[[GeoLocation]] +=== GeoLocation + +[source,ts,subs=+macros] +---- +type GeoLocation = <<LatLonGeoLocation>> | <<GeoHashLocation>> | <<double>>[] | string +---- + + + +[discrete] +[[GeoShape]] +=== GeoShape + +[source,ts,subs=+macros] +---- +type GeoShape = any +---- + + + +[discrete] +[[GeoShapeRelation]] +=== GeoShapeRelation + +[source,ts,subs=+macros] +---- +type GeoShapeRelation = 'intersects' | 'disjoint' | 'within' | 'contains' +---- + + + +[discrete] +[[GeoTile]] +=== GeoTile + +[source,ts,subs=+macros] +---- +type GeoTile = string +---- + + + +[discrete] +[[GeoTilePrecision]] +=== GeoTilePrecision + +[source,ts,subs=+macros] +---- +type GeoTilePrecision = number +---- + + + +[discrete] +[[GetStats]] +=== GetStats + +[source,ts,subs=+macros] +---- +interface GetStats { + current: <<long>> + exists_time?: <<Duration>> + exists_time_in_millis: <<DurationValue>><<<UnitMillis>>> + exists_total: <<long>> + missing_time?: <<Duration>> + missing_time_in_millis: <<DurationValue>><<<UnitMillis>>> + missing_total: <<long>> + time?: <<Duration>> + time_in_millis: <<DurationValue>><<<UnitMillis>>> + total: <<long>> +} +---- + + + +[discrete] +[[GrokPattern]] +=== GrokPattern + +[source,ts,subs=+macros] +---- +type GrokPattern = string +---- + + + +[discrete] +[[HealthStatus]] +=== HealthStatus + +[source,ts,subs=+macros] +---- +type HealthStatus = 'green' | 'GREEN' | 'yellow' | 'YELLOW' | 'red' | 'RED' +---- + + + +[discrete] +[[Host]] +=== Host + +[source,ts,subs=+macros] +---- +type Host = string +---- + + + +[discrete] +[[HttpHeaders]] +=== HttpHeaders + +[source,ts,subs=+macros] +---- +type HttpHeaders = Record<string, string | string[]> +---- + + + +[discrete] +[[IBDistribution]] +=== IBDistribution + +[source,ts,subs=+macros] +---- +type IBDistribution = 'll' | 'spl' +---- + + + +[discrete] +[[IBLambda]] +=== IBLambda + +[source,ts,subs=+macros] +---- +type IBLambda = 'df' | 'ttf' +---- + + + +[discrete] +[[Id]] +=== Id + +[source,ts,subs=+macros] +---- +type Id = string +---- + + + +[discrete] +[[Ids]] +=== Ids + +[source,ts,subs=+macros] +---- +type Ids = <<Id>> | <<Id>>[] +---- + + + +[discrete] +[[IndexAlias]] +=== IndexAlias + +[source,ts,subs=+macros] +---- +type IndexAlias = string +---- + + + +[discrete] +[[IndexName]] +=== IndexName + +[source,ts,subs=+macros] +---- +type IndexName = string +---- + + + +[discrete] +[[IndexPattern]] +=== IndexPattern + +[source,ts,subs=+macros] +---- +type IndexPattern = string +---- + + + +[discrete] +[[IndexPatterns]] +=== IndexPatterns + +[source,ts,subs=+macros] +---- +type IndexPatterns = <<IndexPattern>>[] +---- + + + +[discrete] +[[IndexingStats]] +=== IndexingStats + +[source,ts,subs=+macros] +---- +interface IndexingStats { + index_current: <<long>> + delete_current: <<long>> + delete_time?: <<Duration>> + delete_time_in_millis: <<DurationValue>><<<UnitMillis>>> + delete_total: <<long>> + is_throttled: boolean + noop_update_total: <<long>> + throttle_time?: <<Duration>> + throttle_time_in_millis: <<DurationValue>><<<UnitMillis>>> + index_time?: <<Duration>> + index_time_in_millis: <<DurationValue>><<<UnitMillis>>> + index_total: <<long>> + index_failed: <<long>> + types?: Record<string, <<IndexingStats>>> + write_load?: <<double>> +} +---- + + + +[discrete] +[[Indices]] +=== Indices + +[source,ts,subs=+macros] +---- +type Indices = <<IndexName>> | <<IndexName>>[] +---- + + + +[discrete] +[[IndicesOptions]] +=== IndicesOptions + +[source,ts,subs=+macros] +---- +interface IndicesOptions { + pass:[/**] @property allow_no_indices If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + allow_no_indices?: boolean + pass:[/**] @property expand_wildcards Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: <<ExpandWildcards>> + pass:[/**] @property ignore_unavailable If true, missing or closed indices are not included in the response. */ + ignore_unavailable?: boolean + pass:[/**] @property ignore_throttled If true, concrete, expanded or aliased indices are ignored when frozen. */ + ignore_throttled?: boolean +} +---- + + + +[discrete] +[[IndicesResponseBase]] +=== IndicesResponseBase + +[source,ts,subs=+macros] +---- +interface IndicesResponseBase extends <<AcknowledgedResponseBase>> { + _shards?: <<ShardStatistics>> +} +---- + + + +[discrete] +[[InlineGet]] +=== InlineGet + +[source,ts,subs=+macros] +---- +interface InlineGetKeys<TDocument = unknown> { + fields?: Record<string, any> + found: boolean + _seq_no?: <<SequenceNumber>> + _primary_term?: <<long>> + _routing?: <<Routing>> + _source?: TDocument +} +type InlineGet<TDocument = unknown> = InlineGetKeys<TDocument> + & { [property: string]: any } +---- + + + +[discrete] +[[Ip]] +=== Ip + +[source,ts,subs=+macros] +---- +type Ip = string +---- + + + +[discrete] +[[KnnQuery]] +=== KnnQuery + +[source,ts,subs=+macros] +---- +interface KnnQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property field The name of the vector field to search against */ + field: <<Field>> + pass:[/**] @property query_vector The query vector */ + query_vector?: <<QueryVector>> + pass:[/**] @property query_vector_builder The query vector builder. You must provide a query_vector_builder or query_vector, but not both. */ + query_vector_builder?: <<QueryVectorBuilder>> + pass:[/**] @property num_candidates The number of nearest neighbor candidates to consider per shard */ + num_candidates?: <<integer>> + pass:[/**] @property k The final number of nearest neighbors to return as top hits */ + k?: <<integer>> + pass:[/**] @property filter Filters for the kNN search query */ + filter?: <<QueryDslQueryContainer>> | <<QueryDslQueryContainer>>[] + pass:[/**] @property similarity The minimum similarity for a vector to be considered a match */ + similarity?: <<float>> +} +---- + + + +[discrete] +[[KnnRetriever]] +=== KnnRetriever + +[source,ts,subs=+macros] +---- +interface KnnRetriever extends <<RetrieverBase>> { + pass:[/**] @property field The name of the vector field to search against. */ + field: string + pass:[/**] @property query_vector Query vector. Must have the same number of dimensions as the vector field you are searching against. You must provide a query_vector_builder or query_vector, but not both. */ + query_vector?: <<QueryVector>> + pass:[/**] @property query_vector_builder Defines a model to build a query vector. */ + query_vector_builder?: <<QueryVectorBuilder>> + pass:[/**] @property k Number of nearest neighbors to return as top hits. */ + k: <<integer>> + pass:[/**] @property num_candidates Number of nearest neighbor candidates to consider per shard. */ + num_candidates: <<integer>> + pass:[/**] @property similarity The minimum similarity required for a document to be considered a match. */ + similarity?: <<float>> +} +---- + + + +[discrete] +[[KnnSearch]] +=== KnnSearch + +[source,ts,subs=+macros] +---- +interface KnnSearch { + pass:[/**] @property field The name of the vector field to search against */ + field: <<Field>> + pass:[/**] @property query_vector The query vector */ + query_vector?: <<QueryVector>> + pass:[/**] @property query_vector_builder The query vector builder. You must provide a query_vector_builder or query_vector, but not both. */ + query_vector_builder?: <<QueryVectorBuilder>> + pass:[/**] @property k The final number of nearest neighbors to return as top hits */ + k?: <<integer>> + pass:[/**] @property num_candidates The number of nearest neighbor candidates to consider per shard */ + num_candidates?: <<integer>> + pass:[/**] @property boost Boost value to apply to kNN scores */ + boost?: <<float>> + pass:[/**] @property filter Filters for the kNN search query */ + filter?: <<QueryDslQueryContainer>> | <<QueryDslQueryContainer>>[] + pass:[/**] @property similarity The minimum similarity for a vector to be considered a match */ + similarity?: <<float>> + pass:[/**] @property inner_hits If defined, each search hit will contain inner hits. */ + inner_hits?: <<SearchInnerHits>> +} +---- + + + +[discrete] +[[LatLonGeoLocation]] +=== LatLonGeoLocation + +[source,ts,subs=+macros] +---- +interface LatLonGeoLocation { + pass:[/**] @property lat Latitude */ + lat: <<double>> + pass:[/**] @property lon Longitude */ + lon: <<double>> +} +---- + + + +[discrete] +[[Level]] +=== Level + +[source,ts,subs=+macros] +---- +type Level = 'cluster' | 'indices' | 'shards' +---- + + + +[discrete] +[[LifecycleOperationMode]] +=== LifecycleOperationMode + +[source,ts,subs=+macros] +---- +type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED' +---- + + + +[discrete] +[[MapboxVectorTiles]] +=== MapboxVectorTiles + +[source,ts,subs=+macros] +---- +type MapboxVectorTiles = ArrayBuffer +---- + + + +[discrete] +[[MergesStats]] +=== MergesStats + +[source,ts,subs=+macros] +---- +interface MergesStats { + current: <<long>> + current_docs: <<long>> + current_size?: string + current_size_in_bytes: <<long>> + total: <<long>> + total_auto_throttle?: string + total_auto_throttle_in_bytes: <<long>> + total_docs: <<long>> + total_size?: string + total_size_in_bytes: <<long>> + total_stopped_time?: <<Duration>> + total_stopped_time_in_millis: <<DurationValue>><<<UnitMillis>>> + total_throttled_time?: <<Duration>> + total_throttled_time_in_millis: <<DurationValue>><<<UnitMillis>>> + total_time?: <<Duration>> + total_time_in_millis: <<DurationValue>><<<UnitMillis>>> +} +---- + + + +[discrete] +[[Metadata]] +=== Metadata + +[source,ts,subs=+macros] +---- +type Metadata = Record<string, any> +---- + + + +[discrete] +[[Metrics]] +=== Metrics + +[source,ts,subs=+macros] +---- +type Metrics = string | string[] +---- + + + +[discrete] +[[MinimumShouldMatch]] +=== MinimumShouldMatch + +[source,ts,subs=+macros] +---- +type MinimumShouldMatch = <<integer>> | string +---- + + + +[discrete] +[[MultiTermQueryRewrite]] +=== MultiTermQueryRewrite + +[source,ts,subs=+macros] +---- +type MultiTermQueryRewrite = string +---- + + + +[discrete] +[[Name]] +=== Name + +[source,ts,subs=+macros] +---- +type Name = string +---- + + + +[discrete] +[[Names]] +=== Names + +[source,ts,subs=+macros] +---- +type Names = <<Name>> | <<Name>>[] +---- + + + +[discrete] +[[Namespace]] +=== Namespace + +[source,ts,subs=+macros] +---- +type Namespace = string +---- + + + +[discrete] +[[NestedSortValue]] +=== NestedSortValue + +[source,ts,subs=+macros] +---- +interface NestedSortValue { + filter?: <<QueryDslQueryContainer>> + max_children?: <<integer>> + nested?: <<NestedSortValue>> + path: <<Field>> +} +---- + + + +[discrete] +[[NodeAttributes]] +=== NodeAttributes + +[source,ts,subs=+macros] +---- +interface NodeAttributes { + pass:[/**] @property attributes Lists node attributes. */ + attributes: Record<string, string> + pass:[/**] @property ephemeral_id The ephemeral ID of the node. */ + ephemeral_id: <<Id>> + pass:[/**] @property id The unique identifier of the node. */ + id?: <<NodeId>> + pass:[/**] @property name The unique identifier of the node. */ + name: <<NodeName>> + pass:[/**] @property transport_address The host and port where transport HTTP connections are accepted. */ + transport_address: <<TransportAddress>> +} +---- + + + +[discrete] +[[NodeId]] +=== NodeId + +[source,ts,subs=+macros] +---- +type NodeId = string +---- + + + +[discrete] +[[NodeIds]] +=== NodeIds + +[source,ts,subs=+macros] +---- +type NodeIds = <<NodeId>> | <<NodeId>>[] +---- + + + +[discrete] +[[NodeName]] +=== NodeName + +[source,ts,subs=+macros] +---- +type NodeName = string +---- + + + +[discrete] +[[NodeRole]] +=== NodeRole + +[source,ts,subs=+macros] +---- +type NodeRole = 'master' | 'data' | 'data_cold' | 'data_content' | 'data_frozen' | 'data_hot' | 'data_warm' | 'client' | 'ingest' | 'ml' | 'voting_only' | 'transform' | 'remote_cluster_client' | 'coordinating_only' +---- + + + +[discrete] +[[NodeRoles]] +=== NodeRoles + +[source,ts,subs=+macros] +---- +type NodeRoles = <<NodeRole>>[] +---- + + + +[discrete] +[[NodeShard]] +=== NodeShard + +[source,ts,subs=+macros] +---- +interface NodeShard { + state: IndicesStatsShardRoutingState + primary: boolean + node?: <<NodeName>> + shard: <<integer>> + index: <<IndexName>> + allocation_id?: Record<string, <<Id>>> + recovery_source?: Record<string, <<Id>>> + unassigned_info?: ClusterAllocationExplainUnassignedInformation + relocating_node?: <<NodeId>> | null + relocation_failure_info?: <<RelocationFailureInfo>> +} +---- + + + +[discrete] +[[NodeStatistics]] +=== NodeStatistics + +[source,ts,subs=+macros] +---- +interface NodeStatistics { + failures?: <<ErrorCause>>[] + pass:[/**] @property total Total number of nodes selected by the request. */ + total: <<integer>> + pass:[/**] @property successful Number of nodes that responded successfully to the request. */ + successful: <<integer>> + pass:[/**] @property failed Number of nodes that rejected the request or failed to respond. If this value is not 0, a reason for the rejection or failure is included in the response. */ + failed: <<integer>> +} +---- + + + +[discrete] +[[Normalization]] +=== Normalization + +[source,ts,subs=+macros] +---- +type Normalization = 'no' | 'h1' | 'h2' | 'h3' | 'z' +---- + + + +[discrete] +[[OpType]] +=== OpType + +[source,ts,subs=+macros] +---- +type OpType = 'index' | 'create' +---- + + + +[discrete] +[[Password]] +=== Password + +[source,ts,subs=+macros] +---- +type Password = string +---- + + + +[discrete] +[[Percentage]] +=== Percentage + +[source,ts,subs=+macros] +---- +type Percentage = string | <<float>> +---- + + + +[discrete] +[[PipelineName]] +=== PipelineName + +[source,ts,subs=+macros] +---- +type PipelineName = string +---- + + + +[discrete] +[[PluginStats]] +=== PluginStats + +[source,ts,subs=+macros] +---- +interface PluginStats { + classname: string + description: string + elasticsearch_version: <<VersionString>> + extended_plugins: string[] + has_native_controller: boolean + java_version: <<VersionString>> + name: <<Name>> + version: <<VersionString>> + licensed: boolean +} +---- + + + +[discrete] +[[PropertyName]] +=== PropertyName + +[source,ts,subs=+macros] +---- +type PropertyName = string +---- + + + +[discrete] +[[QueryCacheStats]] +=== QueryCacheStats + +[source,ts,subs=+macros] +---- +interface QueryCacheStats { + pass:[/**] @property cache_count Total number of entries added to the query cache across all shards assigned to selected nodes. This number includes current and evicted entries. */ + cache_count: <<long>> + pass:[/**] @property cache_size Total number of entries currently in the query cache across all shards assigned to selected nodes. */ + cache_size: <<long>> + pass:[/**] @property evictions Total number of query cache evictions across all shards assigned to selected nodes. */ + evictions: <<long>> + pass:[/**] @property hit_count Total count of query cache hits across all shards assigned to selected nodes. */ + hit_count: <<long>> + pass:[/**] @property memory_size Total amount of memory used for the query cache across all shards assigned to selected nodes. */ + memory_size?: <<ByteSize>> + pass:[/**] @property memory_size_in_bytes Total amount, in bytes, of memory used for the query cache across all shards assigned to selected nodes. */ + memory_size_in_bytes: <<long>> + pass:[/**] @property miss_count Total count of query cache misses across all shards assigned to selected nodes. */ + miss_count: <<long>> + pass:[/**] @property total_count Total count of hits and misses in the query cache across all shards assigned to selected nodes. */ + total_count: <<long>> +} +---- + + + +[discrete] +[[QueryVector]] +=== QueryVector + +[source,ts,subs=+macros] +---- +type QueryVector = <<float>>[] +---- + + + +[discrete] +[[QueryVectorBuilder]] +=== QueryVectorBuilder + +[source,ts,subs=+macros] +---- +interface QueryVectorBuilder { + text_embedding?: <<TextEmbedding>> +} +---- + + + +[discrete] +[[RRFRetriever]] +=== RRFRetriever + +[source,ts,subs=+macros] +---- +interface RRFRetriever extends <<RetrieverBase>> { + pass:[/**] @property retrievers A list of child retrievers to specify which sets of returned top documents will have the RRF formula applied to them. */ + retrievers: <<RetrieverContainer>>[] + pass:[/**] @property rank_constant This value determines how much influence documents in individual result sets per query have over the final ranked result set. */ + rank_constant?: <<integer>> + pass:[/**] @property rank_window_size This value determines the size of the individual result sets per query. */ + rank_window_size?: <<integer>> +} +---- + + + +[discrete] +[[RankBase]] +=== RankBase + +[source,ts,subs=+macros] +---- +interface RankBase {} +---- + + + +[discrete] +[[RankContainer]] +=== RankContainer + +[source,ts,subs=+macros] +---- +interface RankContainer { + pass:[/**] @property rrf The reciprocal rank fusion parameters */ + rrf?: <<RrfRank>> +} +---- + + + +[discrete] +[[RecoveryStats]] +=== RecoveryStats + +[source,ts,subs=+macros] +---- +interface RecoveryStats { + current_as_source: <<long>> + current_as_target: <<long>> + throttle_time?: <<Duration>> + throttle_time_in_millis: <<DurationValue>><<<UnitMillis>>> +} +---- + + + +[discrete] +[[Refresh]] +=== Refresh + +[source,ts,subs=+macros] +---- +type Refresh = boolean | 'true' | 'false' | 'wait_for' +---- + + + +[discrete] +[[RefreshStats]] +=== RefreshStats + +[source,ts,subs=+macros] +---- +interface RefreshStats { + external_total: <<long>> + external_total_time_in_millis: <<DurationValue>><<<UnitMillis>>> + listeners: <<long>> + total: <<long>> + total_time?: <<Duration>> + total_time_in_millis: <<DurationValue>><<<UnitMillis>>> +} +---- + + + +[discrete] +[[RelationName]] +=== RelationName + +[source,ts,subs=+macros] +---- +type RelationName = string +---- + + + +[discrete] +[[RelocationFailureInfo]] +=== RelocationFailureInfo + +[source,ts,subs=+macros] +---- +interface RelocationFailureInfo { + failed_attempts: <<integer>> +} +---- + + + +[discrete] +[[RequestBase]] +=== RequestBase + +[source,ts,subs=+macros] +---- +interface RequestBase extends <<SpecUtilsCommonQueryParameters>> {} +---- + + + +[discrete] +[[RequestCacheStats]] +=== RequestCacheStats + +[source,ts,subs=+macros] +---- +interface RequestCacheStats { + evictions: <<long>> + hit_count: <<long>> + memory_size?: string + memory_size_in_bytes: <<long>> + miss_count: <<long>> +} +---- + + + +[discrete] +[[Result]] +=== Result + +[source,ts,subs=+macros] +---- +type Result = 'created' | 'updated' | 'deleted' | 'not_found' | 'noop' +---- + + + +[discrete] +[[Retries]] +=== Retries + +[source,ts,subs=+macros] +---- +interface Retries { + bulk: <<long>> + search: <<long>> +} +---- + + + +[discrete] +[[RetrieverBase]] +=== RetrieverBase + +[source,ts,subs=+macros] +---- +interface RetrieverBase { + pass:[/**] @property filter Query to filter the documents that can match. */ + filter?: <<QueryDslQueryContainer>> | <<QueryDslQueryContainer>>[] + pass:[/**] @property min_score Minimum _score for matching documents. Documents with a lower _score are not included in the top documents. */ + min_score?: <<float>> +} +---- + + + +[discrete] +[[RetrieverContainer]] +=== RetrieverContainer + +[source,ts,subs=+macros] +---- +interface RetrieverContainer { + pass:[/**] @property standard A retriever that replaces the functionality of a traditional query. */ + standard?: <<StandardRetriever>> + pass:[/**] @property knn A retriever that replaces the functionality of a knn search. */ + knn?: <<KnnRetriever>> + pass:[/**] @property rrf A retriever that produces top documents from reciprocal rank fusion (RRF). */ + rrf?: <<RRFRetriever>> + pass:[/**] @property text_similarity_reranker A retriever that reranks the top documents based on a reranking model using the InferenceAPI */ + text_similarity_reranker?: <<TextSimilarityReranker>> + pass:[/**] @property rule A retriever that replaces the functionality of a rule query. */ + rule?: <<RuleRetriever>> +} +---- + + + +[discrete] +[[Routing]] +=== Routing + +[source,ts,subs=+macros] +---- +type Routing = string +---- + + + +[discrete] +[[RrfRank]] +=== RrfRank + +[source,ts,subs=+macros] +---- +interface RrfRank { + pass:[/**] @property rank_constant How much influence documents in individual result sets per query have over the final ranked result set */ + rank_constant?: <<long>> + pass:[/**] @property rank_window_size Size of the individual result sets per query */ + rank_window_size?: <<long>> +} +---- + + + +[discrete] +[[RuleRetriever]] +=== RuleRetriever + +[source,ts,subs=+macros] +---- +interface RuleRetriever extends <<RetrieverBase>> { + pass:[/**] @property ruleset_ids The ruleset IDs containing the rules this retriever is evaluating against. */ + ruleset_ids: <<Id>>[] + pass:[/**] @property match_criteria The match criteria that will determine if a rule in the provided rulesets should be applied. */ + match_criteria: any + pass:[/**] @property retriever The retriever whose results rules should be applied to. */ + retriever: <<RetrieverContainer>> + pass:[/**] @property rank_window_size This value determines the size of the individual result set. */ + rank_window_size?: <<integer>> +} +---- + + + +[discrete] +[[ScalarValue]] +=== ScalarValue + +[source,ts,subs=+macros] +---- +type ScalarValue = <<long>> | <<double>> | string | boolean | null +---- + + + +[discrete] +[[ScoreSort]] +=== ScoreSort + +[source,ts,subs=+macros] +---- +interface ScoreSort { + order?: <<SortOrder>> +} +---- + + + +[discrete] +[[Script]] +=== Script + +[source,ts,subs=+macros] +---- +interface Script { + pass:[/**] @property source The script source. */ + source?: string + pass:[/**] @property id The `id` for a stored script. */ + id?: <<Id>> + pass:[/**] @property params Specifies any named parameters that are passed into the script as variables. Use parameters instead of hard-coded values to decrease compile time. */ + params?: Record<string, any> + pass:[/**] @property lang Specifies the language the script is written in. */ + lang?: <<ScriptLanguage>> + options?: Record<string, string> +} +---- + + + +[discrete] +[[ScriptField]] +=== ScriptField + +[source,ts,subs=+macros] +---- +interface ScriptField { + script: <<Script>> | string + ignore_failure?: boolean +} +---- + + + +[discrete] +[[ScriptLanguage]] +=== ScriptLanguage + +[source,ts,subs=+macros] +---- +type ScriptLanguage = 'painless' | 'expression' | 'mustache' | 'java' | string +---- + + + +[discrete] +[[ScriptSort]] +=== ScriptSort + +[source,ts,subs=+macros] +---- +interface ScriptSort { + order?: <<SortOrder>> + script: <<Script>> | string + type?: <<ScriptSortType>> + mode?: <<SortMode>> + nested?: <<NestedSortValue>> +} +---- + + + +[discrete] +[[ScriptSortType]] +=== ScriptSortType + +[source,ts,subs=+macros] +---- +type ScriptSortType = 'string' | 'number' | 'version' +---- + + + +[discrete] +[[ScriptTransform]] +=== ScriptTransform + +[source,ts,subs=+macros] +---- +interface ScriptTransform { + lang?: string + params?: Record<string, any> + source?: string + id?: string +} +---- + + + +[discrete] +[[ScrollId]] +=== ScrollId + +[source,ts,subs=+macros] +---- +type ScrollId = string +---- + + + +[discrete] +[[ScrollIds]] +=== ScrollIds + +[source,ts,subs=+macros] +---- +type ScrollIds = <<ScrollId>> | <<ScrollId>>[] +---- + + + +[discrete] +[[SearchStats]] +=== SearchStats + +[source,ts,subs=+macros] +---- +interface SearchStats { + fetch_current: <<long>> + fetch_time?: <<Duration>> + fetch_time_in_millis: <<DurationValue>><<<UnitMillis>>> + fetch_total: <<long>> + open_contexts?: <<long>> + query_current: <<long>> + query_time?: <<Duration>> + query_time_in_millis: <<DurationValue>><<<UnitMillis>>> + query_total: <<long>> + scroll_current: <<long>> + scroll_time?: <<Duration>> + scroll_time_in_millis: <<DurationValue>><<<UnitMillis>>> + scroll_total: <<long>> + suggest_current: <<long>> + suggest_time?: <<Duration>> + suggest_time_in_millis: <<DurationValue>><<<UnitMillis>>> + suggest_total: <<long>> + groups?: Record<string, <<SearchStats>>> +} +---- + + + +[discrete] +[[SearchTransform]] +=== SearchTransform + +[source,ts,subs=+macros] +---- +interface SearchTransform { + request: <<WatcherSearchInputRequestDefinition>> + timeout: <<Duration>> +} +---- + + + +[discrete] +[[SearchType]] +=== SearchType + +[source,ts,subs=+macros] +---- +type SearchType = 'query_then_fetch' | 'dfs_query_then_fetch' +---- + + + +[discrete] +[[SegmentsStats]] +=== SegmentsStats + +[source,ts,subs=+macros] +---- +interface SegmentsStats { + pass:[/**] @property count Total number of segments across all shards assigned to selected nodes. */ + count: <<integer>> + pass:[/**] @property doc_values_memory Total amount of memory used for doc values across all shards assigned to selected nodes. */ + doc_values_memory?: <<ByteSize>> + pass:[/**] @property doc_values_memory_in_bytes Total amount, in bytes, of memory used for doc values across all shards assigned to selected nodes. */ + doc_values_memory_in_bytes: <<long>> + pass:[/**] @property file_sizes This object is not populated by the cluster stats API. To get information on segment files, use the node stats API. */ + file_sizes: Record<string, IndicesStatsShardFileSizeInfo> + pass:[/**] @property fixed_bit_set Total amount of memory used by fixed bit sets across all shards assigned to selected nodes. Fixed bit sets are used for nested object field types and type filters for join fields. */ + fixed_bit_set?: <<ByteSize>> + pass:[/**] @property fixed_bit_set_memory_in_bytes Total amount of memory, in bytes, used by fixed bit sets across all shards assigned to selected nodes. */ + fixed_bit_set_memory_in_bytes: <<long>> + pass:[/**] @property index_writer_memory Total amount of memory used by all index writers across all shards assigned to selected nodes. */ + index_writer_memory?: <<ByteSize>> + index_writer_max_memory_in_bytes?: <<long>> + pass:[/**] @property index_writer_memory_in_bytes Total amount, in bytes, of memory used by all index writers across all shards assigned to selected nodes. */ + index_writer_memory_in_bytes: <<long>> + pass:[/**] @property max_unsafe_auto_id_timestamp Unix timestamp, in milliseconds, of the most recently retried indexing request. */ + max_unsafe_auto_id_timestamp: <<long>> + pass:[/**] @property memory Total amount of memory used for segments across all shards assigned to selected nodes. */ + memory?: <<ByteSize>> + pass:[/**] @property memory_in_bytes Total amount, in bytes, of memory used for segments across all shards assigned to selected nodes. */ + memory_in_bytes: <<long>> + pass:[/**] @property norms_memory Total amount of memory used for normalization factors across all shards assigned to selected nodes. */ + norms_memory?: <<ByteSize>> + pass:[/**] @property norms_memory_in_bytes Total amount, in bytes, of memory used for normalization factors across all shards assigned to selected nodes. */ + norms_memory_in_bytes: <<long>> + pass:[/**] @property points_memory Total amount of memory used for points across all shards assigned to selected nodes. */ + points_memory?: <<ByteSize>> + pass:[/**] @property points_memory_in_bytes Total amount, in bytes, of memory used for points across all shards assigned to selected nodes. */ + points_memory_in_bytes: <<long>> + stored_memory?: <<ByteSize>> + pass:[/**] @property stored_fields_memory_in_bytes Total amount, in bytes, of memory used for stored fields across all shards assigned to selected nodes. */ + stored_fields_memory_in_bytes: <<long>> + pass:[/**] @property terms_memory_in_bytes Total amount, in bytes, of memory used for terms across all shards assigned to selected nodes. */ + terms_memory_in_bytes: <<long>> + pass:[/**] @property terms_memory Total amount of memory used for terms across all shards assigned to selected nodes. */ + terms_memory?: <<ByteSize>> + pass:[/**] @property term_vectory_memory Total amount of memory used for term vectors across all shards assigned to selected nodes. */ + term_vectory_memory?: <<ByteSize>> + pass:[/**] @property term_vectors_memory_in_bytes Total amount, in bytes, of memory used for term vectors across all shards assigned to selected nodes. */ + term_vectors_memory_in_bytes: <<long>> + pass:[/**] @property version_map_memory Total amount of memory used by all version maps across all shards assigned to selected nodes. */ + version_map_memory?: <<ByteSize>> + pass:[/**] @property version_map_memory_in_bytes Total amount, in bytes, of memory used by all version maps across all shards assigned to selected nodes. */ + version_map_memory_in_bytes: <<long>> +} +---- + + + +[discrete] +[[SequenceNumber]] +=== SequenceNumber + +[source,ts,subs=+macros] +---- +type SequenceNumber = <<long>> +---- + + + +[discrete] +[[Service]] +=== Service + +[source,ts,subs=+macros] +---- +type Service = string +---- + + + +[discrete] +[[ShardFailure]] +=== ShardFailure + +[source,ts,subs=+macros] +---- +interface ShardFailure { + index?: <<IndexName>> + node?: string + reason: <<ErrorCause>> + shard: <<integer>> + status?: string +} +---- + + + +[discrete] +[[ShardStatistics]] +=== ShardStatistics + +[source,ts,subs=+macros] +---- +interface ShardStatistics { + failed: <<uint>> + pass:[/**] @property successful Indicates how many shards have successfully run the search. */ + successful: <<uint>> + pass:[/**] @property total Indicates how many shards the search will run on overall. */ + total: <<uint>> + failures?: <<ShardFailure>>[] + skipped?: <<uint>> +} +---- + + + +[discrete] +[[ShardsOperationResponseBase]] +=== ShardsOperationResponseBase + +[source,ts,subs=+macros] +---- +interface ShardsOperationResponseBase { + _shards?: <<ShardStatistics>> +} +---- + + + +[discrete] +[[SlicedScroll]] +=== SlicedScroll + +[source,ts,subs=+macros] +---- +interface SlicedScroll { + field?: <<Field>> + id: <<Id>> + max: <<integer>> +} +---- + + + +[discrete] +[[Slices]] +=== Slices + +[source,ts,subs=+macros] +---- +type Slices = <<integer>> | <<SlicesCalculation>> +---- + + + +[discrete] +[[SlicesCalculation]] +=== SlicesCalculation + +[source,ts,subs=+macros] +---- +type SlicesCalculation = 'auto' +---- + + + +[discrete] +[[Sort]] +=== Sort + +[source,ts,subs=+macros] +---- +type Sort = <<SortCombinations>> | <<SortCombinations>>[] +---- + + + +[discrete] +[[SortCombinations]] +=== SortCombinations + +[source,ts,subs=+macros] +---- +type SortCombinations = <<Field>> | <<SortOptions>> +---- + + + +[discrete] +[[SortMode]] +=== SortMode + +[source,ts,subs=+macros] +---- +type SortMode = 'min' | 'max' | 'sum' | 'avg' | 'median' +---- + + + +[discrete] +[[SortOptions]] +=== SortOptions + +[source,ts,subs=+macros] +---- +interface SortOptionsKeys { + _score?: <<ScoreSort>> + _doc?: <<ScoreSort>> + _geo_distance?: <<GeoDistanceSort>> + _script?: <<ScriptSort>> +} +type SortOptions = SortOptionsKeys + & { [property: string]: <<FieldSort>> | <<SortOrder>> | <<ScoreSort>> | <<GeoDistanceSort>> | <<ScriptSort>> } +---- + + + +[discrete] +[[SortOrder]] +=== SortOrder + +[source,ts,subs=+macros] +---- +type SortOrder = 'asc' | 'desc' +---- + + + +[discrete] +[[SortResults]] +=== SortResults + +[source,ts,subs=+macros] +---- +type SortResults = <<FieldValue>>[] +---- + + + +[discrete] +[[StandardRetriever]] +=== StandardRetriever + +[source,ts,subs=+macros] +---- +interface StandardRetriever extends <<RetrieverBase>> { + pass:[/**] @property query Defines a query to retrieve a set of top documents. */ + query?: <<QueryDslQueryContainer>> + pass:[/**] @property search_after Defines a search after object parameter used for pagination. */ + search_after?: <<SortResults>> + pass:[/**] @property terminate_after Maximum number of documents to collect for each shard. */ + terminate_after?: <<integer>> + pass:[/**] @property sort A sort object that that specifies the order of matching documents. */ + sort?: <<Sort>> + pass:[/**] @property collapse Collapses the top documents by a specified key into a single top document per key. */ + collapse?: <<SearchFieldCollapse>> +} +---- + + + +[discrete] +[[StoreStats]] +=== StoreStats + +[source,ts,subs=+macros] +---- +interface StoreStats { + pass:[/**] @property size Total size of all shards assigned to selected nodes. */ + size?: <<ByteSize>> + pass:[/**] @property size_in_bytes Total size, in bytes, of all shards assigned to selected nodes. */ + size_in_bytes: <<long>> + pass:[/**] @property reserved A prediction of how much larger the shard stores will eventually grow due to ongoing peer recoveries, restoring snapshots, and similar activities. */ + reserved?: <<ByteSize>> + pass:[/**] @property reserved_in_bytes A prediction, in bytes, of how much larger the shard stores will eventually grow due to ongoing peer recoveries, restoring snapshots, and similar activities. */ + reserved_in_bytes: <<long>> + pass:[/**] @property total_data_set_size Total data set size of all shards assigned to selected nodes. This includes the size of shards not stored fully on the nodes, such as the cache for partially mounted indices. */ + total_data_set_size?: <<ByteSize>> + pass:[/**] @property total_data_set_size_in_bytes Total data set size, in bytes, of all shards assigned to selected nodes. This includes the size of shards not stored fully on the nodes, such as the cache for partially mounted indices. */ + total_data_set_size_in_bytes?: <<long>> +} +---- + + + +[discrete] +[[StoredScript]] +=== StoredScript + +[source,ts,subs=+macros] +---- +interface StoredScript { + pass:[/**] @property lang Specifies the language the script is written in. */ + lang: <<ScriptLanguage>> + options?: Record<string, string> + pass:[/**] @property source The script source. */ + source: string +} +---- + + + +[discrete] +[[SuggestMode]] +=== SuggestMode + +[source,ts,subs=+macros] +---- +type SuggestMode = 'missing' | 'popular' | 'always' +---- + + + +[discrete] +[[SuggestionName]] +=== SuggestionName + +[source,ts,subs=+macros] +---- +type SuggestionName = string +---- + + + +[discrete] +[[TaskFailure]] +=== TaskFailure + +[source,ts,subs=+macros] +---- +interface TaskFailure { + task_id: <<long>> + node_id: <<NodeId>> + status: string + reason: <<ErrorCause>> +} +---- + + + +[discrete] +[[TaskId]] +=== TaskId + +[source,ts,subs=+macros] +---- +type TaskId = string | <<integer>> +---- + + + +[discrete] +[[TextEmbedding]] +=== TextEmbedding + +[source,ts,subs=+macros] +---- +interface TextEmbedding { + model_id: string + model_text: string +} +---- + + + +[discrete] +[[TextSimilarityReranker]] +=== TextSimilarityReranker + +[source,ts,subs=+macros] +---- +interface TextSimilarityReranker extends <<RetrieverBase>> { + pass:[/**] @property retriever The nested retriever which will produce the first-level results, that will later be used for reranking. */ + retriever: <<RetrieverContainer>> + pass:[/**] @property rank_window_size This value determines how many documents we will consider from the nested retriever. */ + rank_window_size?: <<integer>> + pass:[/**] @property inference_id Unique identifier of the inference endpoint created using the inference API. */ + inference_id?: string + pass:[/**] @property inference_text The text snippet used as the basis for similarity comparison */ + inference_text?: string + pass:[/**] @property field The document field to be used for text similarity comparisons. This field should contain the text that will be evaluated against the inference_text */ + field?: string +} +---- + + + +[discrete] +[[ThreadType]] +=== ThreadType + +[source,ts,subs=+macros] +---- +type ThreadType = 'cpu' | 'wait' | 'block' | 'gpu' | 'mem' +---- + + + +[discrete] +[[TimeOfDay]] +=== TimeOfDay + +[source,ts,subs=+macros] +---- +type TimeOfDay = string +---- + + + +[discrete] +[[TimeUnit]] +=== TimeUnit + +[source,ts,subs=+macros] +---- +type TimeUnit = 'nanos' | 'micros' | 'ms' | 's' | 'm' | 'h' | 'd' +---- + + + +[discrete] +[[TimeZone]] +=== TimeZone + +[source,ts,subs=+macros] +---- +type TimeZone = string +---- + + + +[discrete] +[[TopLeftBottomRightGeoBounds]] +=== TopLeftBottomRightGeoBounds + +[source,ts,subs=+macros] +---- +interface TopLeftBottomRightGeoBounds { + top_left: <<GeoLocation>> + bottom_right: <<GeoLocation>> +} +---- + + + +[discrete] +[[TopRightBottomLeftGeoBounds]] +=== TopRightBottomLeftGeoBounds + +[source,ts,subs=+macros] +---- +interface TopRightBottomLeftGeoBounds { + top_right: <<GeoLocation>> + bottom_left: <<GeoLocation>> +} +---- + + + +[discrete] +[[TransformContainer]] +=== TransformContainer + +[source,ts,subs=+macros] +---- +interface TransformContainer { + chain?: <<TransformContainer>>[] + script?: <<ScriptTransform>> + search?: <<SearchTransform>> +} +---- + + + +[discrete] +[[TranslogStats]] +=== TranslogStats + +[source,ts,subs=+macros] +---- +interface TranslogStats { + earliest_last_modified_age: <<long>> + operations: <<long>> + size?: string + size_in_bytes: <<long>> + uncommitted_operations: <<integer>> + uncommitted_size?: string + uncommitted_size_in_bytes: <<long>> +} +---- + + + +[discrete] +[[TransportAddress]] +=== TransportAddress + +[source,ts,subs=+macros] +---- +type TransportAddress = string +---- + + + +[discrete] +[[UnitFloatMillis]] +=== UnitFloatMillis + +[source,ts,subs=+macros] +---- +type UnitFloatMillis = <<double>> +---- + + + +[discrete] +[[UnitMillis]] +=== UnitMillis + +[source,ts,subs=+macros] +---- +type UnitMillis = <<long>> +---- + + + +[discrete] +[[UnitNanos]] +=== UnitNanos + +[source,ts,subs=+macros] +---- +type UnitNanos = <<long>> +---- + + + +[discrete] +[[UnitSeconds]] +=== UnitSeconds + +[source,ts,subs=+macros] +---- +type UnitSeconds = <<long>> +---- + + + +[discrete] +[[Username]] +=== Username + +[source,ts,subs=+macros] +---- +type Username = string +---- + + + +[discrete] +[[Uuid]] +=== Uuid + +[source,ts,subs=+macros] +---- +type Uuid = string +---- + + + +[discrete] +[[VersionNumber]] +=== VersionNumber + +[source,ts,subs=+macros] +---- +type VersionNumber = <<long>> +---- + + + +[discrete] +[[VersionString]] +=== VersionString + +[source,ts,subs=+macros] +---- +type VersionString = string +---- + + + +[discrete] +[[VersionType]] +=== VersionType + +[source,ts,subs=+macros] +---- +type VersionType = 'internal' | 'external' | 'external_gte' | 'force' +---- + + + +[discrete] +[[WaitForActiveShardOptions]] +=== WaitForActiveShardOptions + +[source,ts,subs=+macros] +---- +type WaitForActiveShardOptions = 'all' | 'index-setting' +---- + + + +[discrete] +[[WaitForActiveShards]] +=== WaitForActiveShards + +[source,ts,subs=+macros] +---- +type WaitForActiveShards = <<integer>> | <<WaitForActiveShardOptions>> +---- + + + +[discrete] +[[WaitForEvents]] +=== WaitForEvents + +[source,ts,subs=+macros] +---- +type WaitForEvents = 'immediate' | 'urgent' | 'high' | 'normal' | 'low' | 'languid' +---- + + + +[discrete] +[[WarmerStats]] +=== WarmerStats + +[source,ts,subs=+macros] +---- +interface WarmerStats { + current: <<long>> + total: <<long>> + total_time?: <<Duration>> + total_time_in_millis: <<DurationValue>><<<UnitMillis>>> +} +---- + + + +[discrete] +[[WktGeoBounds]] +=== WktGeoBounds + +[source,ts,subs=+macros] +---- +interface WktGeoBounds { + wkt: string +} +---- + + + +[discrete] +[[WriteResponseBase]] +=== WriteResponseBase + +[source,ts,subs=+macros] +---- +interface WriteResponseBase { + _id: <<Id>> + _index: <<IndexName>> + _primary_term?: <<long>> + result: <<Result>> + _seq_no?: <<SequenceNumber>> + _shards: <<ShardStatistics>> + _version: <<VersionNumber>> + forced_refresh?: boolean +} +---- + + + +[discrete] +[[byte]] +=== byte + +[source,ts,subs=+macros] +---- +type byte = number +---- + + + +[discrete] +[[double]] +=== double + +[source,ts,subs=+macros] +---- +type double = number +---- + + + +[discrete] +[[float]] +=== float + +[source,ts,subs=+macros] +---- +type float = number +---- + + + +[discrete] +[[integer]] +=== integer + +[source,ts,subs=+macros] +---- +type integer = number +---- + + + +[discrete] +[[long]] +=== long + +[source,ts,subs=+macros] +---- +type long = number +---- + + + +[discrete] +[[short]] +=== short + +[source,ts,subs=+macros] +---- +type short = number +---- + + + +[discrete] +[[uint]] +=== uint + +[source,ts,subs=+macros] +---- +type uint = number +---- + + + +[discrete] +[[ulong]] +=== ulong + +[source,ts,subs=+macros] +---- +type ulong = number +---- + + + +[discrete] +[[SpecUtilsBaseNode]] +=== SpecUtilsBaseNode + +[source,ts,subs=+macros] +---- +interface SpecUtilsBaseNode { + attributes: Record<string, string> + host: <<Host>> + ip: <<Ip>> + name: <<Name>> + roles?: <<NodeRoles>> + transport_address: <<TransportAddress>> +} +---- + + + +[discrete] +[[SpecUtilsNullValue]] +=== SpecUtilsNullValue + +[source,ts,subs=+macros] +---- +type SpecUtilsNullValue = null +---- + + + +[discrete] +[[SpecUtilsPipeSeparatedFlags]] +=== SpecUtilsPipeSeparatedFlags + +[source,ts,subs=+macros] +---- +type SpecUtilsPipeSeparatedFlags<T = unknown> = T | string +---- + + + +[discrete] +[[SpecUtilsStringified]] +=== SpecUtilsStringified + +[source,ts,subs=+macros] +---- +type SpecUtilsStringified<T = unknown> = T | string +---- + + + +[discrete] +[[SpecUtilsVoid]] +=== SpecUtilsVoid + +[source,ts,subs=+macros] +---- + +---- + + + +[discrete] +[[SpecUtilsWithNullValue]] +=== SpecUtilsWithNullValue + +[source,ts,subs=+macros] +---- +type SpecUtilsWithNullValue<T = unknown> = T | <<SpecUtilsNullValue>> +---- + + + +[discrete] +[[SpecUtilsAdditionalProperties]] +=== SpecUtilsAdditionalProperties + +[source,ts,subs=+macros] +---- +interface SpecUtilsAdditionalProperties<TKey = unknown, TValue = unknown> {} +---- + + + +[discrete] +[[SpecUtilsAdditionalProperty]] +=== SpecUtilsAdditionalProperty + +[source,ts,subs=+macros] +---- +interface SpecUtilsAdditionalProperty<TKey = unknown, TValue = unknown> {} +---- + + + +[discrete] +[[SpecUtilsCommonQueryParameters]] +=== SpecUtilsCommonQueryParameters + +[source,ts,subs=+macros] +---- +interface SpecUtilsCommonQueryParameters { + pass:[/**] @property error_trace When set to `true` Elasticsearch will include the full stack trace of errors when they occur. */ + error_trace?: boolean + pass:[/**] @property filter_path Comma-separated list of filters in dot notation which reduce the response returned by Elasticsearch. */ + filter_path?: string | string[] + pass:[/**] @property human When set to `true` will return statistics in a format suitable for humans. For example `"exists_time": "1h"` for humans and `"eixsts_time_in_millis": 3600000` for computers. When disabled the human readable values will be omitted. This makes sense for responses being consumed only by machines. */ + human?: boolean + pass:[/**] @property pretty If set to `true` the returned JSON will be "pretty-formatted". Only use this option for debugging only. */ + pretty?: boolean +} +---- + + + +[discrete] +[[SpecUtilsCommonCatQueryParameters]] +=== SpecUtilsCommonCatQueryParameters + +[source,ts,subs=+macros] +---- +interface SpecUtilsCommonCatQueryParameters { + pass:[/**] @property format Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. */ + format?: string + pass:[/**] @property h List of columns to appear in the response. Supports simple wildcards. */ + h?: <<Names>> + pass:[/**] @property help When set to `true` will output available columns. This option can't be combined with any other query string option. */ + help?: boolean + pass:[/**] @property master_timeout Period to wait for a connection to the master node. */ + master_timeout?: <<Duration>> + pass:[/**] @property s List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + s?: <<Names>> + pass:[/**] @property v When set to `true` will enable verbose output. */ + v?: boolean +} +---- + + + +[discrete] +[[SpecUtilsOverloadOf]] +=== SpecUtilsOverloadOf + +[source,ts,subs=+macros] +---- +interface SpecUtilsOverloadOf<TDefinition = unknown> {} +---- + + + +[discrete] +[[AggregationsAdjacencyMatrixAggregate]] +=== AggregationsAdjacencyMatrixAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsAdjacencyMatrixAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsAdjacencyMatrixBucket>>> {} +---- + + + +[discrete] +[[AggregationsAdjacencyMatrixAggregation]] +=== AggregationsAdjacencyMatrixAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsAdjacencyMatrixAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property filters Filters used to create buckets. At least one filter is required. */ + filters?: Record<string, <<QueryDslQueryContainer>>> + pass:[/**] @property separator Separator used to concatenate filter names. Defaults to &. */ + separator?: string +} +---- + + + +[discrete] +[[AggregationsAdjacencyMatrixBucket]] +=== AggregationsAdjacencyMatrixBucket + +[source,ts,subs=+macros] +---- +interface AggregationsAdjacencyMatrixBucketKeys extends <<AggregationsMultiBucketBase>> { + key: string +} +type AggregationsAdjacencyMatrixBucket = AggregationsAdjacencyMatrixBucketKeys + & { [property: string]: <<AggregationsAggregate>> | string | <<long>> } +---- + + + +[discrete] +[[AggregationsAggregate]] +=== AggregationsAggregate + +[source,ts,subs=+macros] +---- +type AggregationsAggregate = <<AggregationsCardinalityAggregate>> | <<AggregationsHdrPercentilesAggregate>> | <<AggregationsHdrPercentileRanksAggregate>> | <<AggregationsTDigestPercentilesAggregate>> | <<AggregationsTDigestPercentileRanksAggregate>> | <<AggregationsPercentilesBucketAggregate>> | <<AggregationsMedianAbsoluteDeviationAggregate>> | <<AggregationsMinAggregate>> | <<AggregationsMaxAggregate>> | <<AggregationsSumAggregate>> | <<AggregationsAvgAggregate>> | <<AggregationsWeightedAvgAggregate>> | <<AggregationsValueCountAggregate>> | <<AggregationsSimpleValueAggregate>> | <<AggregationsDerivativeAggregate>> | <<AggregationsBucketMetricValueAggregate>> | <<AggregationsStatsAggregate>> | <<AggregationsStatsBucketAggregate>> | <<AggregationsExtendedStatsAggregate>> | <<AggregationsExtendedStatsBucketAggregate>> | <<AggregationsGeoBoundsAggregate>> | <<AggregationsGeoCentroidAggregate>> | <<AggregationsHistogramAggregate>> | <<AggregationsDateHistogramAggregate>> | <<AggregationsAutoDateHistogramAggregate>> | <<AggregationsVariableWidthHistogramAggregate>> | <<AggregationsStringTermsAggregate>> | <<AggregationsLongTermsAggregate>> | <<AggregationsDoubleTermsAggregate>> | <<AggregationsUnmappedTermsAggregate>> | <<AggregationsLongRareTermsAggregate>> | <<AggregationsStringRareTermsAggregate>> | <<AggregationsUnmappedRareTermsAggregate>> | <<AggregationsMultiTermsAggregate>> | <<AggregationsMissingAggregate>> | <<AggregationsNestedAggregate>> | <<AggregationsReverseNestedAggregate>> | <<AggregationsGlobalAggregate>> | <<AggregationsFilterAggregate>> | <<AggregationsChildrenAggregate>> | <<AggregationsParentAggregate>> | <<AggregationsSamplerAggregate>> | <<AggregationsUnmappedSamplerAggregate>> | <<AggregationsGeoHashGridAggregate>> | <<AggregationsGeoTileGridAggregate>> | <<AggregationsGeoHexGridAggregate>> | <<AggregationsRangeAggregate>> | <<AggregationsDateRangeAggregate>> | <<AggregationsGeoDistanceAggregate>> | <<AggregationsIpRangeAggregate>> | <<AggregationsIpPrefixAggregate>> | <<AggregationsFiltersAggregate>> | <<AggregationsAdjacencyMatrixAggregate>> | <<AggregationsSignificantLongTermsAggregate>> | <<AggregationsSignificantStringTermsAggregate>> | <<AggregationsUnmappedSignificantTermsAggregate>> | <<AggregationsCompositeAggregate>> | <<AggregationsFrequentItemSetsAggregate>> | <<AggregationsTimeSeriesAggregate>> | <<AggregationsScriptedMetricAggregate>> | <<AggregationsTopHitsAggregate>> | <<AggregationsInferenceAggregate>> | <<AggregationsStringStatsAggregate>> | <<AggregationsBoxPlotAggregate>> | <<AggregationsTopMetricsAggregate>> | <<AggregationsTTestAggregate>> | <<AggregationsRateAggregate>> | <<AggregationsCumulativeCardinalityAggregate>> | <<AggregationsMatrixStatsAggregate>> | <<AggregationsGeoLineAggregate>> +---- + + + +[discrete] +[[AggregationsAggregateBase]] +=== AggregationsAggregateBase + +[source,ts,subs=+macros] +---- +interface AggregationsAggregateBase { + meta?: <<Metadata>> +} +---- + + + +[discrete] +[[AggregationsAggregateOrder]] +=== AggregationsAggregateOrder + +[source,ts,subs=+macros] +---- +type AggregationsAggregateOrder = Partial<Record<<<Field>>, <<SortOrder>>>> | Partial<Record<<<Field>>, <<SortOrder>>>>[] +---- + + + +[discrete] +[[AggregationsAggregation]] +=== AggregationsAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsAggregation {} +---- + + + +[discrete] +[[AggregationsAggregationContainer]] +=== AggregationsAggregationContainer + +[source,ts,subs=+macros] +---- +interface AggregationsAggregationContainer { + pass:[/**] @property aggregations Sub-aggregations for this aggregation. Only applies to bucket aggregations. */ + aggregations?: Record<string, <<AggregationsAggregationContainer>>> + pass:[/**] @property aggs Sub-aggregations for this aggregation. Only applies to bucket aggregations. */ + aggs?: Record<string, <<AggregationsAggregationContainer>>> + meta?: <<Metadata>> + pass:[/**] @property adjacency_matrix A bucket aggregation returning a form of adjacency matrix. The request provides a collection of named filter expressions, similar to the `filters` aggregation. Each bucket in the response represents a non-empty cell in the matrix of intersecting filters. */ + adjacency_matrix?: <<AggregationsAdjacencyMatrixAggregation>> + pass:[/**] @property auto_date_histogram A multi-bucket aggregation similar to the date histogram, except instead of providing an interval to use as the width of each bucket, a target number of buckets is provided. */ + auto_date_histogram?: <<AggregationsAutoDateHistogramAggregation>> + pass:[/**] @property avg A single-value metrics aggregation that computes the average of numeric values that are extracted from the aggregated documents. */ + avg?: <<AggregationsAverageAggregation>> + pass:[/**] @property avg_bucket A sibling pipeline aggregation which calculates the mean value of a specified metric in a sibling aggregation. The specified metric must be numeric and the sibling aggregation must be a multi-bucket aggregation. */ + avg_bucket?: <<AggregationsAverageBucketAggregation>> + pass:[/**] @property boxplot A metrics aggregation that computes a box plot of numeric values extracted from the aggregated documents. */ + boxplot?: <<AggregationsBoxplotAggregation>> + pass:[/**] @property bucket_script A parent pipeline aggregation which runs a script which can perform per bucket computations on metrics in the parent multi-bucket aggregation. */ + bucket_script?: <<AggregationsBucketScriptAggregation>> + pass:[/**] @property bucket_selector A parent pipeline aggregation which runs a script to determine whether the current bucket will be retained in the parent multi-bucket aggregation. */ + bucket_selector?: <<AggregationsBucketSelectorAggregation>> + pass:[/**] @property bucket_sort A parent pipeline aggregation which sorts the buckets of its parent multi-bucket aggregation. */ + bucket_sort?: <<AggregationsBucketSortAggregation>> + pass:[/**] @property bucket_count_ks_test A sibling pipeline aggregation which runs a two sample Kolmogorov–Smirnov test ("K-S test") against a provided distribution and the distribution implied by the documents counts in the configured sibling aggregation. */ + bucket_count_ks_test?: <<AggregationsBucketKsAggregation>> + pass:[/**] @property bucket_correlation A sibling pipeline aggregation which runs a correlation function on the configured sibling multi-bucket aggregation. */ + bucket_correlation?: <<AggregationsBucketCorrelationAggregation>> + pass:[/**] @property cardinality A single-value metrics aggregation that calculates an approximate count of distinct values. */ + cardinality?: <<AggregationsCardinalityAggregation>> + pass:[/**] @property categorize_text A multi-bucket aggregation that groups semi-structured text into buckets. */ + categorize_text?: <<AggregationsCategorizeTextAggregation>> + pass:[/**] @property children A single bucket aggregation that selects child documents that have the specified type, as defined in a `join` field. */ + children?: <<AggregationsChildrenAggregation>> + pass:[/**] @property composite A multi-bucket aggregation that creates composite buckets from different sources. Unlike the other multi-bucket aggregations, you can use the `composite` aggregation to paginate *all* buckets from a multi-level aggregation efficiently. */ + composite?: <<AggregationsCompositeAggregation>> + pass:[/**] @property cumulative_cardinality A parent pipeline aggregation which calculates the cumulative cardinality in a parent `histogram` or `date_histogram` aggregation. */ + cumulative_cardinality?: <<AggregationsCumulativeCardinalityAggregation>> + pass:[/**] @property cumulative_sum A parent pipeline aggregation which calculates the cumulative sum of a specified metric in a parent `histogram` or `date_histogram` aggregation. */ + cumulative_sum?: <<AggregationsCumulativeSumAggregation>> + pass:[/**] @property date_histogram A multi-bucket values source based aggregation that can be applied on date values or date range values extracted from the documents. It dynamically builds fixed size (interval) buckets over the values. */ + date_histogram?: <<AggregationsDateHistogramAggregation>> + pass:[/**] @property date_range A multi-bucket value source based aggregation that enables the user to define a set of date ranges - each representing a bucket. */ + date_range?: <<AggregationsDateRangeAggregation>> + pass:[/**] @property derivative A parent pipeline aggregation which calculates the derivative of a specified metric in a parent `histogram` or `date_histogram` aggregation. */ + derivative?: <<AggregationsDerivativeAggregation>> + pass:[/**] @property diversified_sampler A filtering aggregation used to limit any sub aggregations' processing to a sample of the top-scoring documents. Similar to the `sampler` aggregation, but adds the ability to limit the number of matches that share a common value. */ + diversified_sampler?: <<AggregationsDiversifiedSamplerAggregation>> + pass:[/**] @property extended_stats A multi-value metrics aggregation that computes stats over numeric values extracted from the aggregated documents. */ + extended_stats?: <<AggregationsExtendedStatsAggregation>> + pass:[/**] @property extended_stats_bucket A sibling pipeline aggregation which calculates a variety of stats across all bucket of a specified metric in a sibling aggregation. */ + extended_stats_bucket?: <<AggregationsExtendedStatsBucketAggregation>> + pass:[/**] @property frequent_item_sets A bucket aggregation which finds frequent item sets, a form of association rules mining that identifies items that often occur together. */ + frequent_item_sets?: <<AggregationsFrequentItemSetsAggregation>> + pass:[/**] @property filter A single bucket aggregation that narrows the set of documents to those that match a query. */ + filter?: <<QueryDslQueryContainer>> + pass:[/**] @property filters A multi-bucket aggregation where each bucket contains the documents that match a query. */ + filters?: <<AggregationsFiltersAggregation>> + pass:[/**] @property geo_bounds A metric aggregation that computes the geographic bounding box containing all values for a Geopoint or Geoshape field. */ + geo_bounds?: <<AggregationsGeoBoundsAggregation>> + pass:[/**] @property geo_centroid A metric aggregation that computes the weighted centroid from all coordinate values for geo fields. */ + geo_centroid?: <<AggregationsGeoCentroidAggregation>> + pass:[/**] @property geo_distance A multi-bucket aggregation that works on `geo_point` fields. Evaluates the distance of each document value from an origin point and determines the buckets it belongs to, based on ranges defined in the request. */ + geo_distance?: <<AggregationsGeoDistanceAggregation>> + pass:[/**] @property geohash_grid A multi-bucket aggregation that groups `geo_point` and `geo_shape` values into buckets that represent a grid. Each cell is labeled using a geohash which is of user-definable precision. */ + geohash_grid?: <<AggregationsGeoHashGridAggregation>> + pass:[/**] @property geo_line Aggregates all `geo_point` values within a bucket into a `LineString` ordered by the chosen sort field. */ + geo_line?: <<AggregationsGeoLineAggregation>> + pass:[/**] @property geotile_grid A multi-bucket aggregation that groups `geo_point` and `geo_shape` values into buckets that represent a grid. Each cell corresponds to a map tile as used by many online map sites. */ + geotile_grid?: <<AggregationsGeoTileGridAggregation>> + pass:[/**] @property geohex_grid A multi-bucket aggregation that groups `geo_point` and `geo_shape` values into buckets that represent a grid. Each cell corresponds to a H3 cell index and is labeled using the H3Index representation. */ + geohex_grid?: <<AggregationsGeohexGridAggregation>> + pass:[/**] @property global Defines a single bucket of all the documents within the search execution context. This context is defined by the indices and the document types you’re searching on, but is not influenced by the search query itself. */ + global?: <<AggregationsGlobalAggregation>> + pass:[/**] @property histogram A multi-bucket values source based aggregation that can be applied on numeric values or numeric range values extracted from the documents. It dynamically builds fixed size (interval) buckets over the values. */ + histogram?: <<AggregationsHistogramAggregation>> + pass:[/**] @property ip_range A multi-bucket value source based aggregation that enables the user to define a set of IP ranges - each representing a bucket. */ + ip_range?: <<AggregationsIpRangeAggregation>> + pass:[/**] @property ip_prefix A bucket aggregation that groups documents based on the network or sub-network of an IP address. */ + ip_prefix?: <<AggregationsIpPrefixAggregation>> + pass:[/**] @property inference A parent pipeline aggregation which loads a pre-trained model and performs inference on the collated result fields from the parent bucket aggregation. */ + inference?: <<AggregationsInferenceAggregation>> + line?: <<AggregationsGeoLineAggregation>> + pass:[/**] @property matrix_stats A numeric aggregation that computes the following statistics over a set of document fields: `count`, `mean`, `variance`, `skewness`, `kurtosis`, `covariance`, and `covariance`. */ + matrix_stats?: <<AggregationsMatrixStatsAggregation>> + pass:[/**] @property max A single-value metrics aggregation that returns the maximum value among the numeric values extracted from the aggregated documents. */ + max?: <<AggregationsMaxAggregation>> + pass:[/**] @property max_bucket A sibling pipeline aggregation which identifies the bucket(s) with the maximum value of a specified metric in a sibling aggregation and outputs both the value and the key(s) of the bucket(s). */ + max_bucket?: <<AggregationsMaxBucketAggregation>> + pass:[/**] @property median_absolute_deviation A single-value aggregation that approximates the median absolute deviation of its search results. */ + median_absolute_deviation?: <<AggregationsMedianAbsoluteDeviationAggregation>> + pass:[/**] @property min A single-value metrics aggregation that returns the minimum value among numeric values extracted from the aggregated documents. */ + min?: <<AggregationsMinAggregation>> + pass:[/**] @property min_bucket A sibling pipeline aggregation which identifies the bucket(s) with the minimum value of a specified metric in a sibling aggregation and outputs both the value and the key(s) of the bucket(s). */ + min_bucket?: <<AggregationsMinBucketAggregation>> + pass:[/**] @property missing A field data based single bucket aggregation, that creates a bucket of all documents in the current document set context that are missing a field value (effectively, missing a field or having the configured NULL value set). */ + missing?: <<AggregationsMissingAggregation>> + moving_avg?: <<AggregationsMovingAverageAggregation>> + pass:[/**] @property moving_percentiles Given an ordered series of percentiles, "slides" a window across those percentiles and computes cumulative percentiles. */ + moving_percentiles?: <<AggregationsMovingPercentilesAggregation>> + pass:[/**] @property moving_fn Given an ordered series of data, "slides" a window across the data and runs a custom script on each window of data. For convenience, a number of common functions are predefined such as `min`, `max`, and moving averages. */ + moving_fn?: <<AggregationsMovingFunctionAggregation>> + pass:[/**] @property multi_terms A multi-bucket value source based aggregation where buckets are dynamically built - one per unique set of values. */ + multi_terms?: <<AggregationsMultiTermsAggregation>> + pass:[/**] @property nested A special single bucket aggregation that enables aggregating nested documents. */ + nested?: <<AggregationsNestedAggregation>> + pass:[/**] @property normalize A parent pipeline aggregation which calculates the specific normalized/rescaled value for a specific bucket value. */ + normalize?: <<AggregationsNormalizeAggregation>> + pass:[/**] @property parent A special single bucket aggregation that selects parent documents that have the specified type, as defined in a `join` field. */ + parent?: <<AggregationsParentAggregation>> + pass:[/**] @property percentile_ranks A multi-value metrics aggregation that calculates one or more percentile ranks over numeric values extracted from the aggregated documents. */ + percentile_ranks?: <<AggregationsPercentileRanksAggregation>> + pass:[/**] @property percentiles A multi-value metrics aggregation that calculates one or more percentiles over numeric values extracted from the aggregated documents. */ + percentiles?: <<AggregationsPercentilesAggregation>> + pass:[/**] @property percentiles_bucket A sibling pipeline aggregation which calculates percentiles across all bucket of a specified metric in a sibling aggregation. */ + percentiles_bucket?: <<AggregationsPercentilesBucketAggregation>> + pass:[/**] @property range A multi-bucket value source based aggregation that enables the user to define a set of ranges - each representing a bucket. */ + range?: <<AggregationsRangeAggregation>> + pass:[/**] @property rare_terms A multi-bucket value source based aggregation which finds "rare" terms — terms that are at the <<long>>-tail of the distribution and are not frequent. */ + rare_terms?: <<AggregationsRareTermsAggregation>> + pass:[/**] @property rate Calculates a rate of documents or a field in each bucket. Can only be used inside a `date_histogram` or `composite` aggregation. */ + rate?: <<AggregationsRateAggregation>> + pass:[/**] @property reverse_nested A special single bucket aggregation that enables aggregating on parent documents from nested documents. Should only be defined inside a `nested` aggregation. */ + reverse_nested?: <<AggregationsReverseNestedAggregation>> + pass:[/**] @property random_sampler A single bucket aggregation that randomly includes documents in the aggregated results. Sampling provides significant speed improvement at the cost of accuracy. */ + random_sampler?: <<AggregationsRandomSamplerAggregation>> + pass:[/**] @property sampler A filtering aggregation used to limit any sub aggregations' processing to a sample of the top-scoring documents. */ + sampler?: <<AggregationsSamplerAggregation>> + pass:[/**] @property scripted_metric A metric aggregation that uses scripts to provide a metric output. */ + scripted_metric?: <<AggregationsScriptedMetricAggregation>> + pass:[/**] @property serial_diff An aggregation that subtracts values in a time series from themselves at different time lags or periods. */ + serial_diff?: <<AggregationsSerialDifferencingAggregation>> + pass:[/**] @property significant_terms Returns interesting or unusual occurrences of terms in a set. */ + significant_terms?: <<AggregationsSignificantTermsAggregation>> + pass:[/**] @property significant_text Returns interesting or unusual occurrences of free-text terms in a set. */ + significant_text?: <<AggregationsSignificantTextAggregation>> + pass:[/**] @property stats A multi-value metrics aggregation that computes stats over numeric values extracted from the aggregated documents. */ + stats?: <<AggregationsStatsAggregation>> + pass:[/**] @property stats_bucket A sibling pipeline aggregation which calculates a variety of stats across all bucket of a specified metric in a sibling aggregation. */ + stats_bucket?: <<AggregationsStatsBucketAggregation>> + pass:[/**] @property string_stats A multi-value metrics aggregation that computes statistics over string values extracted from the aggregated documents. */ + string_stats?: <<AggregationsStringStatsAggregation>> + pass:[/**] @property sum A single-value metrics aggregation that sums numeric values that are extracted from the aggregated documents. */ + sum?: <<AggregationsSumAggregation>> + pass:[/**] @property sum_bucket A sibling pipeline aggregation which calculates the sum of a specified metric across all buckets in a sibling aggregation. */ + sum_bucket?: <<AggregationsSumBucketAggregation>> + pass:[/**] @property terms A multi-bucket value source based aggregation where buckets are dynamically built - one per unique value. */ + terms?: <<AggregationsTermsAggregation>> + pass:[/**] @property time_series The time series aggregation queries data created using a time series index. This is typically data such as metrics or other data streams with a time component, and requires creating an index using the time series mode. */ + time_series?: <<AggregationsTimeSeriesAggregation>> + pass:[/**] @property top_hits A metric aggregation that returns the top matching documents per bucket. */ + top_hits?: <<AggregationsTopHitsAggregation>> + pass:[/**] @property t_test A metrics aggregation that performs a statistical hypothesis test in which the test statistic follows a Student’s t-distribution under the null hypothesis on numeric values extracted from the aggregated documents. */ + t_test?: <<AggregationsTTestAggregation>> + pass:[/**] @property top_metrics A metric aggregation that selects metrics from the document with the largest or smallest sort value. */ + top_metrics?: <<AggregationsTopMetricsAggregation>> + pass:[/**] @property value_count A single-value metrics aggregation that counts the number of values that are extracted from the aggregated documents. */ + value_count?: <<AggregationsValueCountAggregation>> + pass:[/**] @property weighted_avg A single-value metrics aggregation that computes the weighted average of numeric values that are extracted from the aggregated documents. */ + weighted_avg?: <<AggregationsWeightedAverageAggregation>> + pass:[/**] @property variable_width_histogram A multi-bucket aggregation similar to the histogram, except instead of providing an interval to use as the width of each bucket, a target number of buckets is provided. */ + variable_width_histogram?: <<AggregationsVariableWidthHistogramAggregation>> +} +---- + + + +[discrete] +[[AggregationsAggregationRange]] +=== AggregationsAggregationRange + +[source,ts,subs=+macros] +---- +interface AggregationsAggregationRange { + pass:[/**] @property from Start of the range (inclusive). */ + from?: <<double>> | null + pass:[/**] @property key Custom key to return the range with. */ + key?: string + pass:[/**] @property to End of the range (exclusive). */ + to?: <<double>> | null +} +---- + + + +[discrete] +[[AggregationsArrayPercentilesItem]] +=== AggregationsArrayPercentilesItem + +[source,ts,subs=+macros] +---- +interface AggregationsArrayPercentilesItem { + key: string + value: <<double>> | null + value_as_string?: string +} +---- + + + +[discrete] +[[AggregationsAutoDateHistogramAggregate]] +=== AggregationsAutoDateHistogramAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsAutoDateHistogramAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsDateHistogramBucket>>> { + interval: <<DurationLarge>> +} +---- + + + +[discrete] +[[AggregationsAutoDateHistogramAggregation]] +=== AggregationsAutoDateHistogramAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsAutoDateHistogramAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property buckets The target number of buckets. */ + buckets?: <<integer>> + pass:[/**] @property field The field on which to run the aggregation. */ + field?: <<Field>> + pass:[/**] @property format The date format used to format `key_as_string` in the response. If no `format` is specified, the first date format specified in the field mapping is used. */ + format?: string + pass:[/**] @property minimum_interval The minimum rounding interval. This can make the collection process more efficient, as the aggregation will not attempt to round at any interval lower than `minimum_interval`. */ + minimum_interval?: <<AggregationsMinimumInterval>> + pass:[/**] @property missing The value to apply to documents that do not have a value. By default, documents without a value are ignored. */ + missing?: <<DateTime>> + pass:[/**] @property offset Time zone specified as a ISO 8601 UTC offset. */ + offset?: string + params?: Record<string, any> + script?: <<Script>> | string + pass:[/**] @property time_zone Time zone ID. */ + time_zone?: <<TimeZone>> +} +---- + + + +[discrete] +[[AggregationsAverageAggregation]] +=== AggregationsAverageAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsAverageAggregation extends <<AggregationsFormatMetricAggregationBase>> {} +---- + + + +[discrete] +[[AggregationsAverageBucketAggregation]] +=== AggregationsAverageBucketAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsAverageBucketAggregation extends <<AggregationsPipelineAggregationBase>> {} +---- + + + +[discrete] +[[AggregationsAvgAggregate]] +=== AggregationsAvgAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsAvgAggregate extends <<AggregationsSingleMetricAggregateBase>> {} +---- + + + +[discrete] +[[AggregationsBoxPlotAggregate]] +=== AggregationsBoxPlotAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsBoxPlotAggregate extends <<AggregationsAggregateBase>> { + min: <<double>> + max: <<double>> + q1: <<double>> + q2: <<double>> + q3: <<double>> + lower: <<double>> + upper: <<double>> + min_as_string?: string + max_as_string?: string + q1_as_string?: string + q2_as_string?: string + q3_as_string?: string + lower_as_string?: string + upper_as_string?: string +} +---- + + + +[discrete] +[[AggregationsBoxplotAggregation]] +=== AggregationsBoxplotAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsBoxplotAggregation extends <<AggregationsMetricAggregationBase>> { + pass:[/**] @property compression Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error. */ + compression?: <<double>> +} +---- + + + +[discrete] +[[AggregationsBucketAggregationBase]] +=== AggregationsBucketAggregationBase + +[source,ts,subs=+macros] +---- +interface AggregationsBucketAggregationBase {} +---- + + + +[discrete] +[[AggregationsBucketCorrelationAggregation]] +=== AggregationsBucketCorrelationAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsBucketCorrelationAggregation extends <<AggregationsBucketPathAggregation>> { + pass:[/**] @property function The correlation function to execute. */ + function: <<AggregationsBucketCorrelationFunction>> +} +---- + + + +[discrete] +[[AggregationsBucketCorrelationFunction]] +=== AggregationsBucketCorrelationFunction + +[source,ts,subs=+macros] +---- +interface AggregationsBucketCorrelationFunction { + pass:[/**] @property count_correlation The configuration to calculate a count correlation. This function is designed for determining the correlation of a term value and a given metric. */ + count_correlation: <<AggregationsBucketCorrelationFunctionCountCorrelation>> +} +---- + + + +[discrete] +[[AggregationsBucketCorrelationFunctionCountCorrelation]] +=== AggregationsBucketCorrelationFunctionCountCorrelation + +[source,ts,subs=+macros] +---- +interface AggregationsBucketCorrelationFunctionCountCorrelation { + pass:[/**] @property indicator The indicator with which to correlate the configured `bucket_path` values. */ + indicator: <<AggregationsBucketCorrelationFunctionCountCorrelationIndicator>> +} +---- + + + +[discrete] +[[AggregationsBucketCorrelationFunctionCountCorrelationIndicator]] +=== AggregationsBucketCorrelationFunctionCountCorrelationIndicator + +[source,ts,subs=+macros] +---- +interface AggregationsBucketCorrelationFunctionCountCorrelationIndicator { + pass:[/**] @property doc_count The total number of documents that initially created the expectations. It’s required to be greater than or equal to the sum of all values in the buckets_path as this is the originating superset of data to which the term values are correlated. */ + doc_count: <<integer>> + pass:[/**] @property expectations An array of numbers with which to correlate the configured `bucket_path` values. The length of this value must always equal the number of buckets returned by the `bucket_path`. */ + expectations: <<double>>[] + pass:[/**] @property fractions An array of fractions to use when averaging and calculating variance. This should be used if the pre-calculated data and the buckets_path have known gaps. The length of fractions, if provided, must equal expectations. */ + fractions?: <<double>>[] +} +---- + + + +[discrete] +[[AggregationsBucketKsAggregation]] +=== AggregationsBucketKsAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsBucketKsAggregation extends <<AggregationsBucketPathAggregation>> { + pass:[/**] @property alternative A list of string values indicating which K-S test alternative to calculate. The valid values are: "greater", "less", "two_sided". This parameter is key for determining the K-S statistic used when calculating the K-S test. Default value is all possible alternative hypotheses. */ + alternative?: string[] + pass:[/**] @property fractions A list of doubles indicating the distribution of the samples with which to compare to the `buckets_path` results. In typical usage this is the overall proportion of documents in each bucket, which is compared with the actual document proportions in each bucket from the sibling aggregation counts. The default is to assume that overall documents are uniformly distributed on these buckets, which they would be if one used equal percentiles of a metric to define the bucket end points. */ + fractions?: <<double>>[] + pass:[/**] @property sampling_method Indicates the sampling methodology when calculating the K-S test. Note, this is sampling of the returned values. This determines the cumulative distribution function (CDF) points used comparing the two samples. Default is `upper_tail`, which emphasizes the upper end of the CDF points. Valid options are: `upper_tail`, `uniform`, and `lower_tail`. */ + sampling_method?: string +} +---- + + + +[discrete] +[[AggregationsBucketMetricValueAggregate]] +=== AggregationsBucketMetricValueAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsBucketMetricValueAggregate extends <<AggregationsSingleMetricAggregateBase>> { + keys: string[] +} +---- + + + +[discrete] +[[AggregationsBucketPathAggregation]] +=== AggregationsBucketPathAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsBucketPathAggregation { + pass:[/**] @property buckets_path Path to the buckets that contain one set of values to correlate. */ + buckets_path?: <<AggregationsBucketsPath>> +} +---- + + + +[discrete] +[[AggregationsBucketScriptAggregation]] +=== AggregationsBucketScriptAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsBucketScriptAggregation extends <<AggregationsPipelineAggregationBase>> { + pass:[/**] @property script The script to run for this aggregation. */ + script?: <<Script>> | string +} +---- + + + +[discrete] +[[AggregationsBucketSelectorAggregation]] +=== AggregationsBucketSelectorAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsBucketSelectorAggregation extends <<AggregationsPipelineAggregationBase>> { + pass:[/**] @property script The script to run for this aggregation. */ + script?: <<Script>> | string +} +---- + + + +[discrete] +[[AggregationsBucketSortAggregation]] +=== AggregationsBucketSortAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsBucketSortAggregation { + pass:[/**] @property from Buckets in positions prior to `from` will be truncated. */ + from?: <<integer>> + pass:[/**] @property gap_policy The policy to apply when gaps are found in the data. */ + gap_policy?: <<AggregationsGapPolicy>> + pass:[/**] @property size The number of buckets to return. Defaults to all buckets of the parent aggregation. */ + size?: <<integer>> + pass:[/**] @property sort The list of fields to sort on. */ + sort?: <<Sort>> +} +---- + + + +[discrete] +[[AggregationsBuckets]] +=== AggregationsBuckets + +[source,ts,subs=+macros] +---- +type AggregationsBuckets<TBucket = unknown> = Record<string, TBucket> | TBucket[] +---- + + + +[discrete] +[[AggregationsBucketsPath]] +=== AggregationsBucketsPath + +[source,ts,subs=+macros] +---- +type AggregationsBucketsPath = string | string[] | Record<string, string> +---- + + + +[discrete] +[[AggregationsCalendarInterval]] +=== AggregationsCalendarInterval + +[source,ts,subs=+macros] +---- +type AggregationsCalendarInterval = 'second' | '1s' | 'minute' | '1m' | 'hour' | '1h' | 'day' | '1d' | 'week' | '1w' | 'month' | '1M' | 'quarter' | '1q' | 'year' | '1y' +---- + + + +[discrete] +[[AggregationsCardinalityAggregate]] +=== AggregationsCardinalityAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsCardinalityAggregate extends <<AggregationsAggregateBase>> { + value: <<long>> +} +---- + + + +[discrete] +[[AggregationsCardinalityAggregation]] +=== AggregationsCardinalityAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsCardinalityAggregation extends <<AggregationsMetricAggregationBase>> { + pass:[/**] @property precision_threshold A unique count below which counts are expected to be close to accurate. This allows to trade memory for accuracy. */ + precision_threshold?: <<integer>> + rehash?: boolean + pass:[/**] @property execution_hint Mechanism by which cardinality aggregations is run. */ + execution_hint?: <<AggregationsCardinalityExecutionMode>> +} +---- + + + +[discrete] +[[AggregationsCardinalityExecutionMode]] +=== AggregationsCardinalityExecutionMode + +[source,ts,subs=+macros] +---- +type AggregationsCardinalityExecutionMode = 'global_ordinals' | 'segment_ordinals' | 'direct' | 'save_memory_heuristic' | 'save_time_heuristic' +---- + + + +[discrete] +[[AggregationsCategorizeTextAggregation]] +=== AggregationsCategorizeTextAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsCategorizeTextAggregation { + pass:[/**] @property field The semi-structured text field to categorize. */ + field: <<Field>> + pass:[/**] @property max_unique_tokens The maximum number of unique tokens at any position up to max_matched_tokens. Must be larger than 1. Smaller values use less memory and create fewer categories. Larger values will use more memory and create narrower categories. Max allowed value is 100. */ + max_unique_tokens?: <<integer>> + pass:[/**] @property max_matched_tokens The maximum number of token positions to match on before attempting to merge categories. Larger values will use more memory and create narrower categories. Max allowed value is 100. */ + max_matched_tokens?: <<integer>> + pass:[/**] @property similarity_threshold The minimum percentage of tokens that must match for text to be added to the category bucket. Must be between 1 and 100. The larger the value the narrower the categories. Larger values will increase memory usage and create narrower categories. */ + similarity_threshold?: <<integer>> + pass:[/**] @property categorization_filters This property expects an array of regular expressions. The expressions are used to filter out matching sequences from the categorization field values. You can use this functionality to fine tune the categorization by excluding sequences from consideration when categories are defined. For example, you can exclude SQL statements that appear in your log files. This property cannot be used at the same time as categorization_analyzer. If you only want to define simple regular expression filters that are applied prior to tokenization, setting this property is the easiest method. If you also want to customize the tokenizer or post-tokenization filtering, use the categorization_analyzer property instead and include the filters as pattern_replace character filters. */ + categorization_filters?: string[] + pass:[/**] @property categorization_analyzer The categorization analyzer specifies how the text is analyzed and tokenized before being categorized. The syntax is very similar to that used to define the analyzer in the [Analyze endpoint](https://www.elastic.co/guide/en/elasticsearch/reference/8.0/indices-analyze.html). This property cannot be used at the same time as categorization_filters. */ + categorization_analyzer?: <<AggregationsCategorizeTextAnalyzer>> + pass:[/**] @property shard_size The number of categorization buckets to return from each shard before merging all the results. */ + shard_size?: <<integer>> + pass:[/**] @property size The number of buckets to return. */ + size?: <<integer>> + pass:[/**] @property min_doc_count The minimum number of documents in a bucket to be returned to the results. */ + min_doc_count?: <<integer>> + pass:[/**] @property shard_min_doc_count The minimum number of documents in a bucket to be returned from the shard before merging. */ + shard_min_doc_count?: <<integer>> +} +---- + + + +[discrete] +[[AggregationsCategorizeTextAnalyzer]] +=== AggregationsCategorizeTextAnalyzer + +[source,ts,subs=+macros] +---- +type AggregationsCategorizeTextAnalyzer = string | <<AggregationsCustomCategorizeTextAnalyzer>> +---- + + + +[discrete] +[[AggregationsChiSquareHeuristic]] +=== AggregationsChiSquareHeuristic + +[source,ts,subs=+macros] +---- +interface AggregationsChiSquareHeuristic { + pass:[/**] @property background_is_superset Set to `false` if you defined a custom background filter that represents a different set of documents that you want to compare to. */ + background_is_superset: boolean + pass:[/**] @property include_negatives Set to `false` to filter out the terms that appear less often in the subset than in documents outside the subset. */ + include_negatives: boolean +} +---- + + + +[discrete] +[[AggregationsChildrenAggregate]] +=== AggregationsChildrenAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsChildrenAggregateKeys extends <<AggregationsSingleBucketAggregateBase>> {} +type AggregationsChildrenAggregate = AggregationsChildrenAggregateKeys + & { [property: string]: <<AggregationsAggregate>> | <<long>> | <<Metadata>> } +---- + + + +[discrete] +[[AggregationsChildrenAggregation]] +=== AggregationsChildrenAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsChildrenAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property type The child type that should be selected. */ + type?: <<RelationName>> +} +---- + + + +[discrete] +[[AggregationsCompositeAggregate]] +=== AggregationsCompositeAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsCompositeAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsCompositeBucket>>> { + after_key?: <<AggregationsCompositeAggregateKey>> +} +---- + + + +[discrete] +[[AggregationsCompositeAggregateKey]] +=== AggregationsCompositeAggregateKey + +[source,ts,subs=+macros] +---- +type AggregationsCompositeAggregateKey = Record<<<Field>>, <<FieldValue>>> +---- + + + +[discrete] +[[AggregationsCompositeAggregation]] +=== AggregationsCompositeAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsCompositeAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property after When paginating, use the `after_key` value returned in the previous response to retrieve the next page. */ + after?: <<AggregationsCompositeAggregateKey>> + pass:[/**] @property size The number of composite buckets that should be returned. */ + size?: <<integer>> + pass:[/**] @property sources The value sources used to build composite buckets. Keys are returned in the order of the `sources` definition. */ + sources?: Record<string, <<AggregationsCompositeAggregationSource>>>[] +} +---- + + + +[discrete] +[[AggregationsCompositeAggregationBase]] +=== AggregationsCompositeAggregationBase + +[source,ts,subs=+macros] +---- +interface AggregationsCompositeAggregationBase { + pass:[/**] @property field Either `field` or `script` must be present */ + field?: <<Field>> + missing_bucket?: boolean + missing_order?: <<AggregationsMissingOrder>> + pass:[/**] @property script Either `field` or `script` must be present */ + script?: <<Script>> | string + value_type?: <<AggregationsValueType>> + order?: <<SortOrder>> +} +---- + + + +[discrete] +[[AggregationsCompositeAggregationSource]] +=== AggregationsCompositeAggregationSource + +[source,ts,subs=+macros] +---- +interface AggregationsCompositeAggregationSource { + pass:[/**] @property terms A terms aggregation. */ + terms?: <<AggregationsCompositeTermsAggregation>> + pass:[/**] @property histogram A histogram aggregation. */ + histogram?: <<AggregationsCompositeHistogramAggregation>> + pass:[/**] @property date_histogram A date histogram aggregation. */ + date_histogram?: <<AggregationsCompositeDateHistogramAggregation>> + pass:[/**] @property geotile_grid A geotile grid aggregation. */ + geotile_grid?: <<AggregationsCompositeGeoTileGridAggregation>> +} +---- + + + +[discrete] +[[AggregationsCompositeBucket]] +=== AggregationsCompositeBucket + +[source,ts,subs=+macros] +---- +interface AggregationsCompositeBucketKeys extends <<AggregationsMultiBucketBase>> { + key: <<AggregationsCompositeAggregateKey>> +} +type AggregationsCompositeBucket = AggregationsCompositeBucketKeys + & { [property: string]: <<AggregationsAggregate>> | <<AggregationsCompositeAggregateKey>> | <<long>> } +---- + + + +[discrete] +[[AggregationsCompositeDateHistogramAggregation]] +=== AggregationsCompositeDateHistogramAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsCompositeDateHistogramAggregation extends <<AggregationsCompositeAggregationBase>> { + format?: string + pass:[/**] @property calendar_interval Either `calendar_interval` or `fixed_interval` must be present */ + calendar_interval?: <<DurationLarge>> + pass:[/**] @property fixed_interval Either `calendar_interval` or `fixed_interval` must be present */ + fixed_interval?: <<DurationLarge>> + offset?: <<Duration>> + time_zone?: <<TimeZone>> +} +---- + + + +[discrete] +[[AggregationsCompositeGeoTileGridAggregation]] +=== AggregationsCompositeGeoTileGridAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsCompositeGeoTileGridAggregation extends <<AggregationsCompositeAggregationBase>> { + precision?: <<integer>> + bounds?: <<GeoBounds>> +} +---- + + + +[discrete] +[[AggregationsCompositeHistogramAggregation]] +=== AggregationsCompositeHistogramAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsCompositeHistogramAggregation extends <<AggregationsCompositeAggregationBase>> { + interval: <<double>> +} +---- + + + +[discrete] +[[AggregationsCompositeTermsAggregation]] +=== AggregationsCompositeTermsAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsCompositeTermsAggregation extends <<AggregationsCompositeAggregationBase>> {} +---- + + + +[discrete] +[[AggregationsCumulativeCardinalityAggregate]] +=== AggregationsCumulativeCardinalityAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsCumulativeCardinalityAggregate extends <<AggregationsAggregateBase>> { + value: <<long>> + value_as_string?: string +} +---- + + + +[discrete] +[[AggregationsCumulativeCardinalityAggregation]] +=== AggregationsCumulativeCardinalityAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsCumulativeCardinalityAggregation extends <<AggregationsPipelineAggregationBase>> {} +---- + + + +[discrete] +[[AggregationsCumulativeSumAggregation]] +=== AggregationsCumulativeSumAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsCumulativeSumAggregation extends <<AggregationsPipelineAggregationBase>> {} +---- + + + +[discrete] +[[AggregationsCustomCategorizeTextAnalyzer]] +=== AggregationsCustomCategorizeTextAnalyzer + +[source,ts,subs=+macros] +---- +interface AggregationsCustomCategorizeTextAnalyzer { + char_filter?: string[] + tokenizer?: string + filter?: string[] +} +---- + + + +[discrete] +[[AggregationsDateHistogramAggregate]] +=== AggregationsDateHistogramAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsDateHistogramAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsDateHistogramBucket>>> {} +---- + + + +[discrete] +[[AggregationsDateHistogramAggregation]] +=== AggregationsDateHistogramAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsDateHistogramAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property calendar_interval Calendar-aware interval. Can be specified using the unit name, such as `month`, or as a single unit quantity, such as `1M`. */ + calendar_interval?: <<AggregationsCalendarInterval>> + pass:[/**] @property extended_bounds Enables extending the bounds of the histogram beyond the data itself. */ + extended_bounds?: <<AggregationsExtendedBounds>><<<AggregationsFieldDateMath>>> + pass:[/**] @property hard_bounds Limits the histogram to specified bounds. */ + hard_bounds?: <<AggregationsExtendedBounds>><<<AggregationsFieldDateMath>>> + pass:[/**] @property field The date field whose values are use to build a histogram. */ + field?: <<Field>> + pass:[/**] @property fixed_interval Fixed intervals: a fixed number of SI units and never deviate, regardless of where they fall on the calendar. */ + fixed_interval?: <<Duration>> + pass:[/**] @property format The date format used to format `key_as_string` in the response. If no `format` is specified, the first date format specified in the field mapping is used. */ + format?: string + interval?: <<Duration>> + pass:[/**] @property min_doc_count Only returns buckets that have `min_doc_count` number of documents. By default, all buckets between the first bucket that matches documents and the last one are returned. */ + min_doc_count?: <<integer>> + pass:[/**] @property missing The value to apply to documents that do not have a value. By default, documents without a value are ignored. */ + missing?: <<DateTime>> + pass:[/**] @property offset Changes the start value of each bucket by the specified positive (`+`) or negative offset (`-`) duration. */ + offset?: <<Duration>> + pass:[/**] @property order The sort order of the returned buckets. */ + order?: <<AggregationsAggregateOrder>> + params?: Record<string, any> + script?: <<Script>> | string + pass:[/**] @property time_zone Time zone used for bucketing and rounding. Defaults to Coordinated Universal Time (UTC). */ + time_zone?: <<TimeZone>> + pass:[/**] @property keyed Set to `true` to associate a unique string key with each bucket and return the ranges as a hash rather than an array. */ + keyed?: boolean +} +---- + + + +[discrete] +[[AggregationsDateHistogramBucket]] +=== AggregationsDateHistogramBucket + +[source,ts,subs=+macros] +---- +interface AggregationsDateHistogramBucketKeys extends <<AggregationsMultiBucketBase>> { + key_as_string?: string + key: <<EpochTime>><<<UnitMillis>>> +} +type AggregationsDateHistogramBucket = AggregationsDateHistogramBucketKeys + & { [property: string]: <<AggregationsAggregate>> | string | <<EpochTime>><<<UnitMillis>>> | <<long>> } +---- + + + +[discrete] +[[AggregationsDateRangeAggregate]] +=== AggregationsDateRangeAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsDateRangeAggregate extends <<AggregationsRangeAggregate>> {} +---- + + + +[discrete] +[[AggregationsDateRangeAggregation]] +=== AggregationsDateRangeAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsDateRangeAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property field The date field whose values are use to build ranges. */ + field?: <<Field>> + pass:[/**] @property format The date format used to format `from` and `to` in the response. */ + format?: string + pass:[/**] @property missing The value to apply to documents that do not have a value. By default, documents without a value are ignored. */ + missing?: <<AggregationsMissing>> + pass:[/**] @property ranges Array of date ranges. */ + ranges?: <<AggregationsDateRangeExpression>>[] + pass:[/**] @property time_zone Time zone used to convert dates from another time zone to UTC. */ + time_zone?: <<TimeZone>> + pass:[/**] @property keyed Set to `true` to associate a unique string key with each bucket and returns the ranges as a hash rather than an array. */ + keyed?: boolean +} +---- + + + +[discrete] +[[AggregationsDateRangeExpression]] +=== AggregationsDateRangeExpression + +[source,ts,subs=+macros] +---- +interface AggregationsDateRangeExpression { + pass:[/**] @property from Start of the range (inclusive). */ + from?: <<AggregationsFieldDateMath>> + pass:[/**] @property key Custom key to return the range with. */ + key?: string + pass:[/**] @property to End of the range (exclusive). */ + to?: <<AggregationsFieldDateMath>> +} +---- + + + +[discrete] +[[AggregationsDerivativeAggregate]] +=== AggregationsDerivativeAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsDerivativeAggregate extends <<AggregationsSingleMetricAggregateBase>> { + normalized_value?: <<double>> + normalized_value_as_string?: string +} +---- + + + +[discrete] +[[AggregationsDerivativeAggregation]] +=== AggregationsDerivativeAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsDerivativeAggregation extends <<AggregationsPipelineAggregationBase>> {} +---- + + + +[discrete] +[[AggregationsDiversifiedSamplerAggregation]] +=== AggregationsDiversifiedSamplerAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsDiversifiedSamplerAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property execution_hint The type of value used for de-duplication. */ + execution_hint?: <<AggregationsSamplerAggregationExecutionHint>> + pass:[/**] @property max_docs_per_value Limits how many documents are permitted per choice of de-duplicating value. */ + max_docs_per_value?: <<integer>> + script?: <<Script>> | string + pass:[/**] @property shard_size Limits how many top-scoring documents are collected in the sample processed on each shard. */ + shard_size?: <<integer>> + pass:[/**] @property field The field used to provide values used for de-duplication. */ + field?: <<Field>> +} +---- + + + +[discrete] +[[AggregationsDoubleTermsAggregate]] +=== AggregationsDoubleTermsAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsDoubleTermsAggregate extends <<AggregationsTermsAggregateBase>><<<AggregationsDoubleTermsBucket>>> {} +---- + + + +[discrete] +[[AggregationsDoubleTermsBucket]] +=== AggregationsDoubleTermsBucket + +[source,ts,subs=+macros] +---- +interface AggregationsDoubleTermsBucketKeys extends <<AggregationsTermsBucketBase>> { + key: <<double>> + key_as_string?: string +} +type AggregationsDoubleTermsBucket = AggregationsDoubleTermsBucketKeys + & { [property: string]: <<AggregationsAggregate>> | <<double>> | string | <<long>> } +---- + + + +[discrete] +[[AggregationsEwmaModelSettings]] +=== AggregationsEwmaModelSettings + +[source,ts,subs=+macros] +---- +interface AggregationsEwmaModelSettings { + alpha?: <<float>> +} +---- + + + +[discrete] +[[AggregationsEwmaMovingAverageAggregation]] +=== AggregationsEwmaMovingAverageAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsEwmaMovingAverageAggregation extends <<AggregationsMovingAverageAggregationBase>> { + model: 'ewma' + settings: <<AggregationsEwmaModelSettings>> +} +---- + + + +[discrete] +[[AggregationsExtendedBounds]] +=== AggregationsExtendedBounds + +[source,ts,subs=+macros] +---- +interface AggregationsExtendedBounds<T = unknown> { + pass:[/**] @property max Maximum value for the bound. */ + max?: T + pass:[/**] @property min Minimum value for the bound. */ + min?: T +} +---- + + + +[discrete] +[[AggregationsExtendedStatsAggregate]] +=== AggregationsExtendedStatsAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsExtendedStatsAggregate extends <<AggregationsStatsAggregate>> { + sum_of_squares: <<double>> | null + variance: <<double>> | null + variance_population: <<double>> | null + variance_sampling: <<double>> | null + std_deviation: <<double>> | null + std_deviation_population: <<double>> | null + std_deviation_sampling: <<double>> | null + std_deviation_bounds?: <<AggregationsStandardDeviationBounds>> + sum_of_squares_as_string?: string + variance_as_string?: string + variance_population_as_string?: string + variance_sampling_as_string?: string + std_deviation_as_string?: string + std_deviation_bounds_as_string?: <<AggregationsStandardDeviationBoundsAsString>> +} +---- + + + +[discrete] +[[AggregationsExtendedStatsAggregation]] +=== AggregationsExtendedStatsAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsExtendedStatsAggregation extends <<AggregationsFormatMetricAggregationBase>> { + pass:[/**] @property sigma The number of standard deviations above/below the mean to display. */ + sigma?: <<double>> +} +---- + + + +[discrete] +[[AggregationsExtendedStatsBucketAggregate]] +=== AggregationsExtendedStatsBucketAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsExtendedStatsBucketAggregate extends <<AggregationsExtendedStatsAggregate>> {} +---- + + + +[discrete] +[[AggregationsExtendedStatsBucketAggregation]] +=== AggregationsExtendedStatsBucketAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsExtendedStatsBucketAggregation extends <<AggregationsPipelineAggregationBase>> { + pass:[/**] @property sigma The number of standard deviations above/below the mean to display. */ + sigma?: <<double>> +} +---- + + + +[discrete] +[[AggregationsFieldDateMath]] +=== AggregationsFieldDateMath + +[source,ts,subs=+macros] +---- +type AggregationsFieldDateMath = <<DateMath>> | <<double>> +---- + + + +[discrete] +[[AggregationsFilterAggregate]] +=== AggregationsFilterAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsFilterAggregateKeys extends <<AggregationsSingleBucketAggregateBase>> {} +type AggregationsFilterAggregate = AggregationsFilterAggregateKeys + & { [property: string]: <<AggregationsAggregate>> | <<long>> | <<Metadata>> } +---- + + + +[discrete] +[[AggregationsFiltersAggregate]] +=== AggregationsFiltersAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsFiltersAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsFiltersBucket>>> {} +---- + + + +[discrete] +[[AggregationsFiltersAggregation]] +=== AggregationsFiltersAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsFiltersAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property filters Collection of queries from which to build buckets. */ + filters?: <<AggregationsBuckets>><<<QueryDslQueryContainer>>> + pass:[/**] @property other_bucket Set to `true` to add a bucket to the response which will contain all documents that do not match any of the given filters. */ + other_bucket?: boolean + pass:[/**] @property other_bucket_key The key with which the other bucket is returned. */ + other_bucket_key?: string + pass:[/**] @property keyed By default, the named filters aggregation returns the buckets as an object. Set to `false` to return the buckets as an array of objects. */ + keyed?: boolean +} +---- + + + +[discrete] +[[AggregationsFiltersBucket]] +=== AggregationsFiltersBucket + +[source,ts,subs=+macros] +---- +interface AggregationsFiltersBucketKeys extends <<AggregationsMultiBucketBase>> {} +type AggregationsFiltersBucket = AggregationsFiltersBucketKeys + & { [property: string]: <<AggregationsAggregate>> | <<long>> } +---- + + + +[discrete] +[[AggregationsFormatMetricAggregationBase]] +=== AggregationsFormatMetricAggregationBase + +[source,ts,subs=+macros] +---- +interface AggregationsFormatMetricAggregationBase extends <<AggregationsMetricAggregationBase>> { + format?: string +} +---- + + + +[discrete] +[[AggregationsFormattableMetricAggregation]] +=== AggregationsFormattableMetricAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsFormattableMetricAggregation extends <<AggregationsMetricAggregationBase>> { + format?: string +} +---- + + + +[discrete] +[[AggregationsFrequentItemSetsAggregate]] +=== AggregationsFrequentItemSetsAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsFrequentItemSetsAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsFrequentItemSetsBucket>>> {} +---- + + + +[discrete] +[[AggregationsFrequentItemSetsAggregation]] +=== AggregationsFrequentItemSetsAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsFrequentItemSetsAggregation { + pass:[/**] @property fields <<Fields>> to analyze. */ + fields: <<AggregationsFrequentItemSetsField>>[] + pass:[/**] @property minimum_set_size The minimum size of one item set. */ + minimum_set_size?: <<integer>> + pass:[/**] @property minimum_support The minimum support of one item set. */ + minimum_support?: <<double>> + pass:[/**] @property size The number of top item sets to return. */ + size?: <<integer>> + pass:[/**] @property filter Query that filters documents from analysis. */ + filter?: <<QueryDslQueryContainer>> +} +---- + + + +[discrete] +[[AggregationsFrequentItemSetsBucket]] +=== AggregationsFrequentItemSetsBucket + +[source,ts,subs=+macros] +---- +interface AggregationsFrequentItemSetsBucketKeys extends <<AggregationsMultiBucketBase>> { + key: Record<<<Field>>, string[]> + support: <<double>> +} +type AggregationsFrequentItemSetsBucket = AggregationsFrequentItemSetsBucketKeys + & { [property: string]: <<AggregationsAggregate>> | Record<<<Field>>, string[]> | <<double>> | <<long>> } +---- + + + +[discrete] +[[AggregationsFrequentItemSetsField]] +=== AggregationsFrequentItemSetsField + +[source,ts,subs=+macros] +---- +interface AggregationsFrequentItemSetsField { + field: <<Field>> + pass:[/**] @property exclude Values to exclude. Can be regular expression strings or arrays of strings of exact terms. */ + exclude?: <<AggregationsTermsExclude>> + pass:[/**] @property include Values to include. Can be regular expression strings or arrays of strings of exact terms. */ + include?: <<AggregationsTermsInclude>> +} +---- + + + +[discrete] +[[AggregationsGapPolicy]] +=== AggregationsGapPolicy + +[source,ts,subs=+macros] +---- +type AggregationsGapPolicy = 'skip' | 'insert_zeros' | 'keep_values' +---- + + + +[discrete] +[[AggregationsGeoBoundsAggregate]] +=== AggregationsGeoBoundsAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsGeoBoundsAggregate extends <<AggregationsAggregateBase>> { + bounds?: <<GeoBounds>> +} +---- + + + +[discrete] +[[AggregationsGeoBoundsAggregation]] +=== AggregationsGeoBoundsAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsGeoBoundsAggregation extends <<AggregationsMetricAggregationBase>> { + pass:[/**] @property wrap_longitude Specifies whether the bounding box should be allowed to overlap the international date line. */ + wrap_longitude?: boolean +} +---- + + + +[discrete] +[[AggregationsGeoCentroidAggregate]] +=== AggregationsGeoCentroidAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsGeoCentroidAggregate extends <<AggregationsAggregateBase>> { + count: <<long>> + location?: <<GeoLocation>> +} +---- + + + +[discrete] +[[AggregationsGeoCentroidAggregation]] +=== AggregationsGeoCentroidAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsGeoCentroidAggregation extends <<AggregationsMetricAggregationBase>> { + count?: <<long>> + location?: <<GeoLocation>> +} +---- + + + +[discrete] +[[AggregationsGeoDistanceAggregate]] +=== AggregationsGeoDistanceAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsGeoDistanceAggregate extends <<AggregationsRangeAggregate>> {} +---- + + + +[discrete] +[[AggregationsGeoDistanceAggregation]] +=== AggregationsGeoDistanceAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsGeoDistanceAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property distance_type The distance calculation type. */ + distance_type?: <<GeoDistanceType>> + pass:[/**] @property field A field of type `geo_point` used to evaluate the distance. */ + field?: <<Field>> + pass:[/**] @property origin The origin used to evaluate the distance. */ + origin?: <<GeoLocation>> + pass:[/**] @property ranges An array of ranges used to bucket documents. */ + ranges?: <<AggregationsAggregationRange>>[] + pass:[/**] @property unit The distance unit. */ + unit?: <<DistanceUnit>> +} +---- + + + +[discrete] +[[AggregationsGeoHashGridAggregate]] +=== AggregationsGeoHashGridAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsGeoHashGridAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsGeoHashGridBucket>>> {} +---- + + + +[discrete] +[[AggregationsGeoHashGridAggregation]] +=== AggregationsGeoHashGridAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsGeoHashGridAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property bounds The bounding box to filter the points in each bucket. */ + bounds?: <<GeoBounds>> + pass:[/**] @property field <<Field>> containing indexed `geo_point` or `geo_shape` values. If the field contains an array, `geohash_grid` aggregates all array values. */ + field?: <<Field>> + pass:[/**] @property precision The string length of the geohashes used to define cells/buckets in the results. */ + precision?: <<GeoHashPrecision>> + pass:[/**] @property shard_size Allows for more accurate counting of the top cells returned in the final result the aggregation. Defaults to returning `max(10,(size x number-of-shards))` buckets from each shard. */ + shard_size?: <<integer>> + pass:[/**] @property size The maximum number of geohash buckets to return. */ + size?: <<integer>> +} +---- + + + +[discrete] +[[AggregationsGeoHashGridBucket]] +=== AggregationsGeoHashGridBucket + +[source,ts,subs=+macros] +---- +interface AggregationsGeoHashGridBucketKeys extends <<AggregationsMultiBucketBase>> { + key: <<GeoHash>> +} +type AggregationsGeoHashGridBucket = AggregationsGeoHashGridBucketKeys + & { [property: string]: <<AggregationsAggregate>> | <<GeoHash>> | <<long>> } +---- + + + +[discrete] +[[AggregationsGeoHexGridAggregate]] +=== AggregationsGeoHexGridAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsGeoHexGridAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsGeoHexGridBucket>>> {} +---- + + + +[discrete] +[[AggregationsGeoHexGridBucket]] +=== AggregationsGeoHexGridBucket + +[source,ts,subs=+macros] +---- +interface AggregationsGeoHexGridBucketKeys extends <<AggregationsMultiBucketBase>> { + key: <<GeoHexCell>> +} +type AggregationsGeoHexGridBucket = AggregationsGeoHexGridBucketKeys + & { [property: string]: <<AggregationsAggregate>> | <<GeoHexCell>> | <<long>> } +---- + + + +[discrete] +[[AggregationsGeoLineAggregate]] +=== AggregationsGeoLineAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsGeoLineAggregate extends <<AggregationsAggregateBase>> { + type: string + geometry: <<GeoLine>> + properties: any +} +---- + + + +[discrete] +[[AggregationsGeoLineAggregation]] +=== AggregationsGeoLineAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsGeoLineAggregation { + pass:[/**] @property point The name of the geo_point field. */ + point: <<AggregationsGeoLinePoint>> + pass:[/**] @property sort The name of the numeric field to use as the sort key for ordering the points. When the `geo_line` aggregation is nested inside a `time_series` aggregation, this field defaults to `@timestamp`, and any other value will result in error. */ + sort: <<AggregationsGeoLineSort>> + pass:[/**] @property include_sort When `true`, returns an additional array of the sort values in the feature properties. */ + include_sort?: boolean + pass:[/**] @property sort_order The order in which the line is sorted (ascending or descending). */ + sort_order?: <<SortOrder>> + pass:[/**] @property size The maximum length of the line represented in the aggregation. Valid sizes are between 1 and 10000. */ + size?: <<integer>> +} +---- + + + +[discrete] +[[AggregationsGeoLinePoint]] +=== AggregationsGeoLinePoint + +[source,ts,subs=+macros] +---- +interface AggregationsGeoLinePoint { + pass:[/**] @property field The name of the geo_point field. */ + field: <<Field>> +} +---- + + + +[discrete] +[[AggregationsGeoLineSort]] +=== AggregationsGeoLineSort + +[source,ts,subs=+macros] +---- +interface AggregationsGeoLineSort { + pass:[/**] @property field The name of the numeric field to use as the sort key for ordering the points. */ + field: <<Field>> +} +---- + + + +[discrete] +[[AggregationsGeoTileGridAggregate]] +=== AggregationsGeoTileGridAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsGeoTileGridAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsGeoTileGridBucket>>> {} +---- + + + +[discrete] +[[AggregationsGeoTileGridAggregation]] +=== AggregationsGeoTileGridAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsGeoTileGridAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property field <<Field>> containing indexed `geo_point` or `geo_shape` values. If the field contains an array, `geotile_grid` aggregates all array values. */ + field?: <<Field>> + pass:[/**] @property precision Integer zoom of the key used to define cells/buckets in the results. Values outside of the range [0,29] will be rejected. */ + precision?: <<GeoTilePrecision>> + pass:[/**] @property shard_size Allows for more accurate counting of the top cells returned in the final result the aggregation. Defaults to returning `max(10,(size x number-of-shards))` buckets from each shard. */ + shard_size?: <<integer>> + pass:[/**] @property size The maximum number of buckets to return. */ + size?: <<integer>> + pass:[/**] @property bounds A bounding box to filter the geo-points or geo-shapes in each bucket. */ + bounds?: <<GeoBounds>> +} +---- + + + +[discrete] +[[AggregationsGeoTileGridBucket]] +=== AggregationsGeoTileGridBucket + +[source,ts,subs=+macros] +---- +interface AggregationsGeoTileGridBucketKeys extends <<AggregationsMultiBucketBase>> { + key: <<GeoTile>> +} +type AggregationsGeoTileGridBucket = AggregationsGeoTileGridBucketKeys + & { [property: string]: <<AggregationsAggregate>> | <<GeoTile>> | <<long>> } +---- + + + +[discrete] +[[AggregationsGeohexGridAggregation]] +=== AggregationsGeohexGridAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsGeohexGridAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property field <<Field>> containing indexed `geo_point` or `geo_shape` values. If the field contains an array, `geohex_grid` aggregates all array values. */ + field: <<Field>> + pass:[/**] @property precision Integer zoom of the key used to defined cells or buckets in the results. Value should be between 0-15. */ + precision?: <<integer>> + pass:[/**] @property bounds Bounding box used to filter the geo-points in each bucket. */ + bounds?: <<GeoBounds>> + pass:[/**] @property size Maximum number of buckets to return. */ + size?: <<integer>> + pass:[/**] @property shard_size Number of buckets returned from each shard. */ + shard_size?: <<integer>> +} +---- + + + +[discrete] +[[AggregationsGlobalAggregate]] +=== AggregationsGlobalAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsGlobalAggregateKeys extends <<AggregationsSingleBucketAggregateBase>> {} +type AggregationsGlobalAggregate = AggregationsGlobalAggregateKeys + & { [property: string]: <<AggregationsAggregate>> | <<long>> | <<Metadata>> } +---- + + + +[discrete] +[[AggregationsGlobalAggregation]] +=== AggregationsGlobalAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsGlobalAggregation extends <<AggregationsBucketAggregationBase>> {} +---- + + + +[discrete] +[[AggregationsGoogleNormalizedDistanceHeuristic]] +=== AggregationsGoogleNormalizedDistanceHeuristic + +[source,ts,subs=+macros] +---- +interface AggregationsGoogleNormalizedDistanceHeuristic { + pass:[/**] @property background_is_superset Set to `false` if you defined a custom background filter that represents a different set of documents that you want to compare to. */ + background_is_superset?: boolean +} +---- + + + +[discrete] +[[AggregationsHdrMethod]] +=== AggregationsHdrMethod + +[source,ts,subs=+macros] +---- +interface AggregationsHdrMethod { + pass:[/**] @property number_of_significant_value_digits Specifies the resolution of values for the histogram in number of significant digits. */ + number_of_significant_value_digits?: <<integer>> +} +---- + + + +[discrete] +[[AggregationsHdrPercentileRanksAggregate]] +=== AggregationsHdrPercentileRanksAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsHdrPercentileRanksAggregate extends <<AggregationsPercentilesAggregateBase>> {} +---- + + + +[discrete] +[[AggregationsHdrPercentilesAggregate]] +=== AggregationsHdrPercentilesAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsHdrPercentilesAggregate extends <<AggregationsPercentilesAggregateBase>> {} +---- + + + +[discrete] +[[AggregationsHistogramAggregate]] +=== AggregationsHistogramAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsHistogramAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsHistogramBucket>>> {} +---- + + + +[discrete] +[[AggregationsHistogramAggregation]] +=== AggregationsHistogramAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsHistogramAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property extended_bounds Enables extending the bounds of the histogram beyond the data itself. */ + extended_bounds?: <<AggregationsExtendedBounds>><<<double>>> + pass:[/**] @property hard_bounds Limits the range of buckets in the histogram. It is particularly useful in the case of open data ranges that can result in a very large number of buckets. */ + hard_bounds?: <<AggregationsExtendedBounds>><<<double>>> + pass:[/**] @property field The name of the field to aggregate on. */ + field?: <<Field>> + pass:[/**] @property interval The interval for the buckets. Must be a positive decimal. */ + interval?: <<double>> + pass:[/**] @property min_doc_count Only returns buckets that have `min_doc_count` number of documents. By default, the response will fill gaps in the histogram with empty buckets. */ + min_doc_count?: <<integer>> + pass:[/**] @property missing The value to apply to documents that do not have a value. By default, documents without a value are ignored. */ + missing?: <<double>> + pass:[/**] @property offset By default, the bucket keys start with 0 and then continue in even spaced steps of `interval`. The bucket boundaries can be shifted by using the `offset` option. */ + offset?: <<double>> + pass:[/**] @property order The sort order of the returned buckets. By default, the returned buckets are sorted by their key ascending. */ + order?: <<AggregationsAggregateOrder>> + script?: <<Script>> | string + format?: string + pass:[/**] @property keyed If `true`, returns buckets as a hash instead of an array, keyed by the bucket keys. */ + keyed?: boolean +} +---- + + + +[discrete] +[[AggregationsHistogramBucket]] +=== AggregationsHistogramBucket + +[source,ts,subs=+macros] +---- +interface AggregationsHistogramBucketKeys extends <<AggregationsMultiBucketBase>> { + key_as_string?: string + key: <<double>> +} +type AggregationsHistogramBucket = AggregationsHistogramBucketKeys + & { [property: string]: <<AggregationsAggregate>> | string | <<double>> | <<long>> } +---- + + + +[discrete] +[[AggregationsHoltLinearModelSettings]] +=== AggregationsHoltLinearModelSettings + +[source,ts,subs=+macros] +---- +interface AggregationsHoltLinearModelSettings { + alpha?: <<float>> + beta?: <<float>> +} +---- + + + +[discrete] +[[AggregationsHoltMovingAverageAggregation]] +=== AggregationsHoltMovingAverageAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsHoltMovingAverageAggregation extends <<AggregationsMovingAverageAggregationBase>> { + model: 'holt' + settings: <<AggregationsHoltLinearModelSettings>> +} +---- + + + +[discrete] +[[AggregationsHoltWintersModelSettings]] +=== AggregationsHoltWintersModelSettings + +[source,ts,subs=+macros] +---- +interface AggregationsHoltWintersModelSettings { + alpha?: <<float>> + beta?: <<float>> + gamma?: <<float>> + pad?: boolean + period?: <<integer>> + type?: <<AggregationsHoltWintersType>> +} +---- + + + +[discrete] +[[AggregationsHoltWintersMovingAverageAggregation]] +=== AggregationsHoltWintersMovingAverageAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsHoltWintersMovingAverageAggregation extends <<AggregationsMovingAverageAggregationBase>> { + model: 'holt_winters' + settings: <<AggregationsHoltWintersModelSettings>> +} +---- + + + +[discrete] +[[AggregationsHoltWintersType]] +=== AggregationsHoltWintersType + +[source,ts,subs=+macros] +---- +type AggregationsHoltWintersType = 'add' | 'mult' +---- + + + +[discrete] +[[AggregationsInferenceAggregate]] +=== AggregationsInferenceAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsInferenceAggregateKeys extends <<AggregationsAggregateBase>> { + value?: <<FieldValue>> + feature_importance?: <<AggregationsInferenceFeatureImportance>>[] + top_classes?: <<AggregationsInferenceTopClassEntry>>[] + warning?: string +} +type AggregationsInferenceAggregate = AggregationsInferenceAggregateKeys + & { [property: string]: any } +---- + + + +[discrete] +[[AggregationsInferenceAggregation]] +=== AggregationsInferenceAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsInferenceAggregation extends <<AggregationsPipelineAggregationBase>> { + pass:[/**] @property model_id The ID or alias for the trained model. */ + model_id: <<Name>> + pass:[/**] @property inference_config Contains the inference type and its options. */ + inference_config?: <<AggregationsInferenceConfigContainer>> +} +---- + + + +[discrete] +[[AggregationsInferenceClassImportance]] +=== AggregationsInferenceClassImportance + +[source,ts,subs=+macros] +---- +interface AggregationsInferenceClassImportance { + class_name: string + importance: <<double>> +} +---- + + + +[discrete] +[[AggregationsInferenceConfigContainer]] +=== AggregationsInferenceConfigContainer + +[source,ts,subs=+macros] +---- +interface AggregationsInferenceConfigContainer { + pass:[/**] @property regression Regression configuration for inference. */ + regression?: <<MlRegressionInferenceOptions>> + pass:[/**] @property classification Classification configuration for inference. */ + classification?: <<MlClassificationInferenceOptions>> +} +---- + + + +[discrete] +[[AggregationsInferenceFeatureImportance]] +=== AggregationsInferenceFeatureImportance + +[source,ts,subs=+macros] +---- +interface AggregationsInferenceFeatureImportance { + feature_name: string + importance?: <<double>> + classes?: <<AggregationsInferenceClassImportance>>[] +} +---- + + + +[discrete] +[[AggregationsInferenceTopClassEntry]] +=== AggregationsInferenceTopClassEntry + +[source,ts,subs=+macros] +---- +interface AggregationsInferenceTopClassEntry { + class_name: <<FieldValue>> + class_probability: <<double>> + class_score: <<double>> +} +---- + + + +[discrete] +[[AggregationsIpPrefixAggregate]] +=== AggregationsIpPrefixAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsIpPrefixAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsIpPrefixBucket>>> {} +---- + + + +[discrete] +[[AggregationsIpPrefixAggregation]] +=== AggregationsIpPrefixAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsIpPrefixAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property field The IP address field to aggregation on. The field mapping type must be `ip`. */ + field: <<Field>> + pass:[/**] @property prefix_length Length of the network prefix. For IPv4 addresses the accepted range is [0, 32]. For IPv6 addresses the accepted range is [0, 128]. */ + prefix_length: <<integer>> + pass:[/**] @property is_ipv6 Defines whether the prefix applies to IPv6 addresses. */ + is_ipv6?: boolean + pass:[/**] @property append_prefix_length Defines whether the prefix length is appended to IP address keys in the response. */ + append_prefix_length?: boolean + pass:[/**] @property keyed Defines whether buckets are returned as a hash rather than an array in the response. */ + keyed?: boolean + pass:[/**] @property min_doc_count Minimum number of documents in a bucket for it to be included in the response. */ + min_doc_count?: <<long>> +} +---- + + + +[discrete] +[[AggregationsIpPrefixBucket]] +=== AggregationsIpPrefixBucket + +[source,ts,subs=+macros] +---- +interface AggregationsIpPrefixBucketKeys extends <<AggregationsMultiBucketBase>> { + is_ipv6: boolean + key: string + prefix_length: <<integer>> + netmask?: string +} +type AggregationsIpPrefixBucket = AggregationsIpPrefixBucketKeys + & { [property: string]: <<AggregationsAggregate>> | boolean | string | <<integer>> | <<long>> } +---- + + + +[discrete] +[[AggregationsIpRangeAggregate]] +=== AggregationsIpRangeAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsIpRangeAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsIpRangeBucket>>> {} +---- + + + +[discrete] +[[AggregationsIpRangeAggregation]] +=== AggregationsIpRangeAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsIpRangeAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property field The date field whose values are used to build ranges. */ + field?: <<Field>> + pass:[/**] @property ranges Array of IP ranges. */ + ranges?: <<AggregationsIpRangeAggregationRange>>[] +} +---- + + + +[discrete] +[[AggregationsIpRangeAggregationRange]] +=== AggregationsIpRangeAggregationRange + +[source,ts,subs=+macros] +---- +interface AggregationsIpRangeAggregationRange { + pass:[/**] @property from Start of the range. */ + from?: string | null + pass:[/**] @property mask IP range defined as a CIDR mask. */ + mask?: string + pass:[/**] @property to End of the range. */ + to?: string | null +} +---- + + + +[discrete] +[[AggregationsIpRangeBucket]] +=== AggregationsIpRangeBucket + +[source,ts,subs=+macros] +---- +interface AggregationsIpRangeBucketKeys extends <<AggregationsMultiBucketBase>> { + key?: string + from?: string + to?: string +} +type AggregationsIpRangeBucket = AggregationsIpRangeBucketKeys + & { [property: string]: <<AggregationsAggregate>> | string | <<long>> } +---- + + + +[discrete] +[[AggregationsKeyedPercentiles]] +=== AggregationsKeyedPercentiles + +[source,ts,subs=+macros] +---- +type AggregationsKeyedPercentiles = Record<string, string | <<long>> | null> +---- + + + +[discrete] +[[AggregationsLinearMovingAverageAggregation]] +=== AggregationsLinearMovingAverageAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsLinearMovingAverageAggregation extends <<AggregationsMovingAverageAggregationBase>> { + model: 'linear' + settings: <<EmptyObject>> +} +---- + + + +[discrete] +[[AggregationsLongRareTermsAggregate]] +=== AggregationsLongRareTermsAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsLongRareTermsAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsLongRareTermsBucket>>> {} +---- + + + +[discrete] +[[AggregationsLongRareTermsBucket]] +=== AggregationsLongRareTermsBucket + +[source,ts,subs=+macros] +---- +interface AggregationsLongRareTermsBucketKeys extends <<AggregationsMultiBucketBase>> { + key: <<long>> + key_as_string?: string +} +type AggregationsLongRareTermsBucket = AggregationsLongRareTermsBucketKeys + & { [property: string]: <<AggregationsAggregate>> | <<long>> | string } +---- + + + +[discrete] +[[AggregationsLongTermsAggregate]] +=== AggregationsLongTermsAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsLongTermsAggregate extends <<AggregationsTermsAggregateBase>><<<AggregationsLongTermsBucket>>> {} +---- + + + +[discrete] +[[AggregationsLongTermsBucket]] +=== AggregationsLongTermsBucket + +[source,ts,subs=+macros] +---- +interface AggregationsLongTermsBucketKeys extends <<AggregationsTermsBucketBase>> { + key: <<long>> + key_as_string?: string +} +type AggregationsLongTermsBucket = AggregationsLongTermsBucketKeys + & { [property: string]: <<AggregationsAggregate>> | <<long>> | string } +---- + + + +[discrete] +[[AggregationsMatrixAggregation]] +=== AggregationsMatrixAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsMatrixAggregation { + pass:[/**] @property fields An array of fields for computing the statistics. */ + fields?: <<Fields>> + pass:[/**] @property missing The value to apply to documents that do not have a value. By default, documents without a value are ignored. */ + missing?: Record<<<Field>>, <<double>>> +} +---- + + + +[discrete] +[[AggregationsMatrixStatsAggregate]] +=== AggregationsMatrixStatsAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsMatrixStatsAggregate extends <<AggregationsAggregateBase>> { + doc_count: <<long>> + fields?: <<AggregationsMatrixStatsFields>>[] +} +---- + + + +[discrete] +[[AggregationsMatrixStatsAggregation]] +=== AggregationsMatrixStatsAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsMatrixStatsAggregation extends <<AggregationsMatrixAggregation>> { + pass:[/**] @property mode Array value the aggregation will use for array or multi-valued fields. */ + mode?: <<SortMode>> +} +---- + + + +[discrete] +[[AggregationsMatrixStatsFields]] +=== AggregationsMatrixStatsFields + +[source,ts,subs=+macros] +---- +interface AggregationsMatrixStatsFields { + name: <<Field>> + count: <<long>> + mean: <<double>> + variance: <<double>> + skewness: <<double>> + kurtosis: <<double>> + covariance: Record<<<Field>>, <<double>>> + correlation: Record<<<Field>>, <<double>>> +} +---- + + + +[discrete] +[[AggregationsMaxAggregate]] +=== AggregationsMaxAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsMaxAggregate extends <<AggregationsSingleMetricAggregateBase>> {} +---- + + + +[discrete] +[[AggregationsMaxAggregation]] +=== AggregationsMaxAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsMaxAggregation extends <<AggregationsFormatMetricAggregationBase>> {} +---- + + + +[discrete] +[[AggregationsMaxBucketAggregation]] +=== AggregationsMaxBucketAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsMaxBucketAggregation extends <<AggregationsPipelineAggregationBase>> {} +---- + + + +[discrete] +[[AggregationsMedianAbsoluteDeviationAggregate]] +=== AggregationsMedianAbsoluteDeviationAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsMedianAbsoluteDeviationAggregate extends <<AggregationsSingleMetricAggregateBase>> {} +---- + + + +[discrete] +[[AggregationsMedianAbsoluteDeviationAggregation]] +=== AggregationsMedianAbsoluteDeviationAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsMedianAbsoluteDeviationAggregation extends <<AggregationsFormatMetricAggregationBase>> { + pass:[/**] @property compression Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error. */ + compression?: <<double>> +} +---- + + + +[discrete] +[[AggregationsMetricAggregationBase]] +=== AggregationsMetricAggregationBase + +[source,ts,subs=+macros] +---- +interface AggregationsMetricAggregationBase { + pass:[/**] @property field The field on which to run the aggregation. */ + field?: <<Field>> + pass:[/**] @property missing The value to apply to documents that do not have a value. By default, documents without a value are ignored. */ + missing?: <<AggregationsMissing>> + script?: <<Script>> | string +} +---- + + + +[discrete] +[[AggregationsMinAggregate]] +=== AggregationsMinAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsMinAggregate extends <<AggregationsSingleMetricAggregateBase>> {} +---- + + + +[discrete] +[[AggregationsMinAggregation]] +=== AggregationsMinAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsMinAggregation extends <<AggregationsFormatMetricAggregationBase>> {} +---- + + + +[discrete] +[[AggregationsMinBucketAggregation]] +=== AggregationsMinBucketAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsMinBucketAggregation extends <<AggregationsPipelineAggregationBase>> {} +---- + + + +[discrete] +[[AggregationsMinimumInterval]] +=== AggregationsMinimumInterval + +[source,ts,subs=+macros] +---- +type AggregationsMinimumInterval = 'second' | 'minute' | 'hour' | 'day' | 'month' | 'year' +---- + + + +[discrete] +[[AggregationsMissing]] +=== AggregationsMissing + +[source,ts,subs=+macros] +---- +type AggregationsMissing = string | <<integer>> | <<double>> | boolean +---- + + + +[discrete] +[[AggregationsMissingAggregate]] +=== AggregationsMissingAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsMissingAggregateKeys extends <<AggregationsSingleBucketAggregateBase>> {} +type AggregationsMissingAggregate = AggregationsMissingAggregateKeys + & { [property: string]: <<AggregationsAggregate>> | <<long>> | <<Metadata>> } +---- + + + +[discrete] +[[AggregationsMissingAggregation]] +=== AggregationsMissingAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsMissingAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property field The name of the field. */ + field?: <<Field>> + missing?: <<AggregationsMissing>> +} +---- + + + +[discrete] +[[AggregationsMissingOrder]] +=== AggregationsMissingOrder + +[source,ts,subs=+macros] +---- +type AggregationsMissingOrder = 'first' | 'last' | 'default' +---- + + + +[discrete] +[[AggregationsMovingAverageAggregation]] +=== AggregationsMovingAverageAggregation + +[source,ts,subs=+macros] +---- +type AggregationsMovingAverageAggregation = <<AggregationsLinearMovingAverageAggregation>> | <<AggregationsSimpleMovingAverageAggregation>> | <<AggregationsEwmaMovingAverageAggregation>> | <<AggregationsHoltMovingAverageAggregation>> | <<AggregationsHoltWintersMovingAverageAggregation>> +---- + + + +[discrete] +[[AggregationsMovingAverageAggregationBase]] +=== AggregationsMovingAverageAggregationBase + +[source,ts,subs=+macros] +---- +interface AggregationsMovingAverageAggregationBase extends <<AggregationsPipelineAggregationBase>> { + minimize?: boolean + predict?: <<integer>> + window?: <<integer>> +} +---- + + + +[discrete] +[[AggregationsMovingFunctionAggregation]] +=== AggregationsMovingFunctionAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsMovingFunctionAggregation extends <<AggregationsPipelineAggregationBase>> { + pass:[/**] @property script The script that should be executed on each window of data. */ + script?: string + pass:[/**] @property shift By default, the window consists of the last n values excluding the current bucket. Increasing `shift` by 1, moves the starting window position by 1 to the right. */ + shift?: <<integer>> + pass:[/**] @property window The size of window to "slide" across the histogram. */ + window?: <<integer>> +} +---- + + + +[discrete] +[[AggregationsMovingPercentilesAggregation]] +=== AggregationsMovingPercentilesAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsMovingPercentilesAggregation extends <<AggregationsPipelineAggregationBase>> { + pass:[/**] @property window The size of window to "slide" across the histogram. */ + window?: <<integer>> + pass:[/**] @property shift By default, the window consists of the last n values excluding the current bucket. Increasing `shift` by 1, moves the starting window position by 1 to the right. */ + shift?: <<integer>> + keyed?: boolean +} +---- + + + +[discrete] +[[AggregationsMultiBucketAggregateBase]] +=== AggregationsMultiBucketAggregateBase + +[source,ts,subs=+macros] +---- +interface AggregationsMultiBucketAggregateBase<TBucket = unknown> extends <<AggregationsAggregateBase>> { + buckets: <<AggregationsBuckets>><TBucket> +} +---- + + + +[discrete] +[[AggregationsMultiBucketBase]] +=== AggregationsMultiBucketBase + +[source,ts,subs=+macros] +---- +interface AggregationsMultiBucketBase { + doc_count: <<long>> +} +---- + + + +[discrete] +[[AggregationsMultiTermLookup]] +=== AggregationsMultiTermLookup + +[source,ts,subs=+macros] +---- +interface AggregationsMultiTermLookup { + pass:[/**] @property field A fields from which to retrieve terms. */ + field: <<Field>> + pass:[/**] @property missing The value to apply to documents that do not have a value. By default, documents without a value are ignored. */ + missing?: <<AggregationsMissing>> +} +---- + + + +[discrete] +[[AggregationsMultiTermsAggregate]] +=== AggregationsMultiTermsAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsMultiTermsAggregate extends <<AggregationsTermsAggregateBase>><<<AggregationsMultiTermsBucket>>> {} +---- + + + +[discrete] +[[AggregationsMultiTermsAggregation]] +=== AggregationsMultiTermsAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsMultiTermsAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property collect_mode Specifies the strategy for data collection. */ + collect_mode?: <<AggregationsTermsAggregationCollectMode>> + pass:[/**] @property order Specifies the sort order of the buckets. Defaults to sorting by descending document count. */ + order?: <<AggregationsAggregateOrder>> + pass:[/**] @property min_doc_count The minimum number of documents in a bucket for it to be returned. */ + min_doc_count?: <<long>> + pass:[/**] @property shard_min_doc_count The minimum number of documents in a bucket on each shard for it to be returned. */ + shard_min_doc_count?: <<long>> + pass:[/**] @property shard_size The number of candidate terms produced by each shard. By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */ + shard_size?: <<integer>> + pass:[/**] @property show_term_doc_count_error Calculates the doc count error on per term basis. */ + show_term_doc_count_error?: boolean + pass:[/**] @property size The number of term buckets should be returned out of the overall terms list. */ + size?: <<integer>> + pass:[/**] @property terms The field from which to generate sets of terms. */ + terms: <<AggregationsMultiTermLookup>>[] +} +---- + + + +[discrete] +[[AggregationsMultiTermsBucket]] +=== AggregationsMultiTermsBucket + +[source,ts,subs=+macros] +---- +interface AggregationsMultiTermsBucketKeys extends <<AggregationsMultiBucketBase>> { + key: <<FieldValue>>[] + key_as_string?: string + doc_count_error_upper_bound?: <<long>> +} +type AggregationsMultiTermsBucket = AggregationsMultiTermsBucketKeys + & { [property: string]: <<AggregationsAggregate>> | <<FieldValue>>[] | string | <<long>> } +---- + + + +[discrete] +[[AggregationsMutualInformationHeuristic]] +=== AggregationsMutualInformationHeuristic + +[source,ts,subs=+macros] +---- +interface AggregationsMutualInformationHeuristic { + pass:[/**] @property background_is_superset Set to `false` if you defined a custom background filter that represents a different set of documents that you want to compare to. */ + background_is_superset?: boolean + pass:[/**] @property include_negatives Set to `false` to filter out the terms that appear less often in the subset than in documents outside the subset. */ + include_negatives?: boolean +} +---- + + + +[discrete] +[[AggregationsNestedAggregate]] +=== AggregationsNestedAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsNestedAggregateKeys extends <<AggregationsSingleBucketAggregateBase>> {} +type AggregationsNestedAggregate = AggregationsNestedAggregateKeys + & { [property: string]: <<AggregationsAggregate>> | <<long>> | <<Metadata>> } +---- + + + +[discrete] +[[AggregationsNestedAggregation]] +=== AggregationsNestedAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsNestedAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property path The path to the field of type `nested`. */ + path?: <<Field>> +} +---- + + + +[discrete] +[[AggregationsNormalizeAggregation]] +=== AggregationsNormalizeAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsNormalizeAggregation extends <<AggregationsPipelineAggregationBase>> { + pass:[/**] @property method The specific method to apply. */ + method?: <<AggregationsNormalizeMethod>> +} +---- + + + +[discrete] +[[AggregationsNormalizeMethod]] +=== AggregationsNormalizeMethod + +[source,ts,subs=+macros] +---- +type AggregationsNormalizeMethod = 'rescale_0_1' | 'rescale_0_100' | 'percent_of_sum' | 'mean' | 'z-score' | 'softmax' +---- + + + +[discrete] +[[AggregationsParentAggregate]] +=== AggregationsParentAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsParentAggregateKeys extends <<AggregationsSingleBucketAggregateBase>> {} +type AggregationsParentAggregate = AggregationsParentAggregateKeys + & { [property: string]: <<AggregationsAggregate>> | <<long>> | <<Metadata>> } +---- + + + +[discrete] +[[AggregationsParentAggregation]] +=== AggregationsParentAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsParentAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property type The child type that should be selected. */ + type?: <<RelationName>> +} +---- + + + +[discrete] +[[AggregationsPercentageScoreHeuristic]] +=== AggregationsPercentageScoreHeuristic + +[source,ts,subs=+macros] +---- +interface AggregationsPercentageScoreHeuristic {} +---- + + + +[discrete] +[[AggregationsPercentileRanksAggregation]] +=== AggregationsPercentileRanksAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsPercentileRanksAggregation extends <<AggregationsFormatMetricAggregationBase>> { + pass:[/**] @property keyed By default, the aggregation associates a unique string key with each bucket and returns the ranges as a hash rather than an array. Set to `false` to disable this behavior. */ + keyed?: boolean + pass:[/**] @property values An array of values for which to calculate the percentile ranks. */ + values?: <<double>>[] | null + pass:[/**] @property hdr Uses the alternative High Dynamic Range Histogram algorithm to calculate percentile ranks. */ + hdr?: <<AggregationsHdrMethod>> + pass:[/**] @property tdigest Sets parameters for the default TDigest algorithm used to calculate percentile ranks. */ + tdigest?: <<AggregationsTDigest>> +} +---- + + + +[discrete] +[[AggregationsPercentiles]] +=== AggregationsPercentiles + +[source,ts,subs=+macros] +---- +type AggregationsPercentiles = <<AggregationsKeyedPercentiles>> | <<AggregationsArrayPercentilesItem>>[] +---- + + + +[discrete] +[[AggregationsPercentilesAggregateBase]] +=== AggregationsPercentilesAggregateBase + +[source,ts,subs=+macros] +---- +interface AggregationsPercentilesAggregateBase extends <<AggregationsAggregateBase>> { + values: <<AggregationsPercentiles>> +} +---- + + + +[discrete] +[[AggregationsPercentilesAggregation]] +=== AggregationsPercentilesAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsPercentilesAggregation extends <<AggregationsFormatMetricAggregationBase>> { + pass:[/**] @property keyed By default, the aggregation associates a unique string key with each bucket and returns the ranges as a hash rather than an array. Set to `false` to disable this behavior. */ + keyed?: boolean + pass:[/**] @property percents The percentiles to calculate. */ + percents?: <<double>>[] + pass:[/**] @property hdr Uses the alternative High Dynamic Range Histogram algorithm to calculate percentiles. */ + hdr?: <<AggregationsHdrMethod>> + pass:[/**] @property tdigest Sets parameters for the default TDigest algorithm used to calculate percentiles. */ + tdigest?: <<AggregationsTDigest>> +} +---- + + + +[discrete] +[[AggregationsPercentilesBucketAggregate]] +=== AggregationsPercentilesBucketAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsPercentilesBucketAggregate extends <<AggregationsPercentilesAggregateBase>> {} +---- + + + +[discrete] +[[AggregationsPercentilesBucketAggregation]] +=== AggregationsPercentilesBucketAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsPercentilesBucketAggregation extends <<AggregationsPipelineAggregationBase>> { + pass:[/**] @property percents The list of percentiles to calculate. */ + percents?: <<double>>[] +} +---- + + + +[discrete] +[[AggregationsPipelineAggregationBase]] +=== AggregationsPipelineAggregationBase + +[source,ts,subs=+macros] +---- +interface AggregationsPipelineAggregationBase extends <<AggregationsBucketPathAggregation>> { + pass:[/**] @property format `DecimalFormat` pattern for the output value. If specified, the formatted value is returned in the aggregation’s `value_as_string` property. */ + format?: string + pass:[/**] @property gap_policy Policy to apply when gaps are found in the data. */ + gap_policy?: <<AggregationsGapPolicy>> +} +---- + + + +[discrete] +[[AggregationsRandomSamplerAggregation]] +=== AggregationsRandomSamplerAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsRandomSamplerAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property probability The probability that a document will be included in the aggregated data. Must be greater than 0, less than 0.5, or exactly 1. The lower the probability, the fewer documents are matched. */ + probability: <<double>> + pass:[/**] @property seed The seed to generate the random sampling of documents. When a seed is provided, the random subset of documents is the same between calls. */ + seed?: <<integer>> + pass:[/**] @property shard_seed When combined with seed, setting shard_seed ensures 100% consistent sampling over shards where data is exactly the same. */ + shard_seed?: <<integer>> +} +---- + + + +[discrete] +[[AggregationsRangeAggregate]] +=== AggregationsRangeAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsRangeAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsRangeBucket>>> {} +---- + + + +[discrete] +[[AggregationsRangeAggregation]] +=== AggregationsRangeAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsRangeAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property field The date field whose values are use to build ranges. */ + field?: <<Field>> + pass:[/**] @property missing The value to apply to documents that do not have a value. By default, documents without a value are ignored. */ + missing?: <<integer>> + pass:[/**] @property ranges An array of ranges used to bucket documents. */ + ranges?: <<AggregationsAggregationRange>>[] + script?: <<Script>> | string + pass:[/**] @property keyed Set to `true` to associate a unique string key with each bucket and return the ranges as a hash rather than an array. */ + keyed?: boolean + format?: string +} +---- + + + +[discrete] +[[AggregationsRangeBucket]] +=== AggregationsRangeBucket + +[source,ts,subs=+macros] +---- +interface AggregationsRangeBucketKeys extends <<AggregationsMultiBucketBase>> { + from?: <<double>> + to?: <<double>> + from_as_string?: string + to_as_string?: string + key?: string +} +type AggregationsRangeBucket = AggregationsRangeBucketKeys + & { [property: string]: <<AggregationsAggregate>> | <<double>> | string | <<long>> } +---- + + + +[discrete] +[[AggregationsRareTermsAggregation]] +=== AggregationsRareTermsAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsRareTermsAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property exclude Terms that should be excluded from the aggregation. */ + exclude?: <<AggregationsTermsExclude>> + pass:[/**] @property field The field from which to return rare terms. */ + field?: <<Field>> + pass:[/**] @property include Terms that should be included in the aggregation. */ + include?: <<AggregationsTermsInclude>> + pass:[/**] @property max_doc_count The maximum number of documents a term should appear in. */ + max_doc_count?: <<long>> + pass:[/**] @property missing The value to apply to documents that do not have a value. By default, documents without a value are ignored. */ + missing?: <<AggregationsMissing>> + pass:[/**] @property precision The precision of the internal CuckooFilters. Smaller precision leads to better approximation, but higher memory usage. */ + precision?: <<double>> + value_type?: string +} +---- + + + +[discrete] +[[AggregationsRateAggregate]] +=== AggregationsRateAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsRateAggregate extends <<AggregationsAggregateBase>> { + value: <<double>> + value_as_string?: string +} +---- + + + +[discrete] +[[AggregationsRateAggregation]] +=== AggregationsRateAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsRateAggregation extends <<AggregationsFormatMetricAggregationBase>> { + pass:[/**] @property unit The interval used to calculate the rate. By default, the interval of the `date_histogram` is used. */ + unit?: <<AggregationsCalendarInterval>> + pass:[/**] @property mode How the rate is calculated. */ + mode?: <<AggregationsRateMode>> +} +---- + + + +[discrete] +[[AggregationsRateMode]] +=== AggregationsRateMode + +[source,ts,subs=+macros] +---- +type AggregationsRateMode = 'sum' | 'value_count' +---- + + + +[discrete] +[[AggregationsReverseNestedAggregate]] +=== AggregationsReverseNestedAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsReverseNestedAggregateKeys extends <<AggregationsSingleBucketAggregateBase>> {} +type AggregationsReverseNestedAggregate = AggregationsReverseNestedAggregateKeys + & { [property: string]: <<AggregationsAggregate>> | <<long>> | <<Metadata>> } +---- + + + +[discrete] +[[AggregationsReverseNestedAggregation]] +=== AggregationsReverseNestedAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsReverseNestedAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property path Defines the nested object field that should be joined back to. The default is empty, which means that it joins back to the root/main document level. */ + path?: <<Field>> +} +---- + + + +[discrete] +[[AggregationsSamplerAggregate]] +=== AggregationsSamplerAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsSamplerAggregateKeys extends <<AggregationsSingleBucketAggregateBase>> {} +type AggregationsSamplerAggregate = AggregationsSamplerAggregateKeys + & { [property: string]: <<AggregationsAggregate>> | <<long>> | <<Metadata>> } +---- + + + +[discrete] +[[AggregationsSamplerAggregation]] +=== AggregationsSamplerAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsSamplerAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property shard_size Limits how many top-scoring documents are collected in the sample processed on each shard. */ + shard_size?: <<integer>> +} +---- + + + +[discrete] +[[AggregationsSamplerAggregationExecutionHint]] +=== AggregationsSamplerAggregationExecutionHint + +[source,ts,subs=+macros] +---- +type AggregationsSamplerAggregationExecutionHint = 'map' | 'global_ordinals' | 'bytes_hash' +---- + + + +[discrete] +[[AggregationsScriptedHeuristic]] +=== AggregationsScriptedHeuristic + +[source,ts,subs=+macros] +---- +interface AggregationsScriptedHeuristic { + script: <<Script>> | string +} +---- + + + +[discrete] +[[AggregationsScriptedMetricAggregate]] +=== AggregationsScriptedMetricAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsScriptedMetricAggregate extends <<AggregationsAggregateBase>> { + value: any +} +---- + + + +[discrete] +[[AggregationsScriptedMetricAggregation]] +=== AggregationsScriptedMetricAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsScriptedMetricAggregation extends <<AggregationsMetricAggregationBase>> { + pass:[/**] @property combine_script Runs once on each shard after document collection is complete. Allows the aggregation to consolidate the state returned from each shard. */ + combine_script?: <<Script>> | string + pass:[/**] @property init_script Runs prior to any collection of documents. Allows the aggregation to set up any initial state. */ + init_script?: <<Script>> | string + pass:[/**] @property map_script Run once per document collected. If no `combine_script` is specified, the resulting state needs to be stored in the `state` object. */ + map_script?: <<Script>> | string + pass:[/**] @property params A global object with script parameters for `init`, `map` and `combine` scripts. It is shared between the scripts. */ + params?: Record<string, any> + pass:[/**] @property reduce_script Runs once on the coordinating node after all shards have returned their results. The script is provided with access to a variable `states`, which is an array of the result of the `combine_script` on each shard. */ + reduce_script?: <<Script>> | string +} +---- + + + +[discrete] +[[AggregationsSerialDifferencingAggregation]] +=== AggregationsSerialDifferencingAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsSerialDifferencingAggregation extends <<AggregationsPipelineAggregationBase>> { + pass:[/**] @property lag The historical bucket to subtract from the current value. Must be a positive, non-zero <<integer>>. */ + lag?: <<integer>> +} +---- + + + +[discrete] +[[AggregationsSignificantLongTermsAggregate]] +=== AggregationsSignificantLongTermsAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsSignificantLongTermsAggregate extends <<AggregationsSignificantTermsAggregateBase>><<<AggregationsSignificantLongTermsBucket>>> {} +---- + + + +[discrete] +[[AggregationsSignificantLongTermsBucket]] +=== AggregationsSignificantLongTermsBucket + +[source,ts,subs=+macros] +---- +interface AggregationsSignificantLongTermsBucketKeys extends <<AggregationsSignificantTermsBucketBase>> { + key: <<long>> + key_as_string?: string +} +type AggregationsSignificantLongTermsBucket = AggregationsSignificantLongTermsBucketKeys + & { [property: string]: <<AggregationsAggregate>> | <<long>> | string | <<double>> } +---- + + + +[discrete] +[[AggregationsSignificantStringTermsAggregate]] +=== AggregationsSignificantStringTermsAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsSignificantStringTermsAggregate extends <<AggregationsSignificantTermsAggregateBase>><<<AggregationsSignificantStringTermsBucket>>> {} +---- + + + +[discrete] +[[AggregationsSignificantStringTermsBucket]] +=== AggregationsSignificantStringTermsBucket + +[source,ts,subs=+macros] +---- +interface AggregationsSignificantStringTermsBucketKeys extends <<AggregationsSignificantTermsBucketBase>> { + key: string +} +type AggregationsSignificantStringTermsBucket = AggregationsSignificantStringTermsBucketKeys + & { [property: string]: <<AggregationsAggregate>> | string | <<double>> | <<long>> } +---- + + + +[discrete] +[[AggregationsSignificantTermsAggregateBase]] +=== AggregationsSignificantTermsAggregateBase + +[source,ts,subs=+macros] +---- +interface AggregationsSignificantTermsAggregateBase<T = unknown> extends <<AggregationsMultiBucketAggregateBase>><T> { + bg_count?: <<long>> + doc_count?: <<long>> +} +---- + + + +[discrete] +[[AggregationsSignificantTermsAggregation]] +=== AggregationsSignificantTermsAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsSignificantTermsAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property background_filter A background filter that can be used to focus in on significant terms within a narrower context, instead of the entire index. */ + background_filter?: <<QueryDslQueryContainer>> + pass:[/**] @property chi_square Use Chi square, as described in "Information Retrieval", Manning et al., Chapter 13.5.2, as the significance score. */ + chi_square?: <<AggregationsChiSquareHeuristic>> + pass:[/**] @property exclude Terms to exclude. */ + exclude?: <<AggregationsTermsExclude>> + pass:[/**] @property execution_hint Mechanism by which the aggregation should be executed: using field values directly or using global ordinals. */ + execution_hint?: <<AggregationsTermsAggregationExecutionHint>> + pass:[/**] @property field The field from which to return significant terms. */ + field?: <<Field>> + pass:[/**] @property gnd Use Google normalized distance as described in "The Google Similarity <<Distance>>", Cilibrasi and Vitanyi, 2007, as the significance score. */ + gnd?: <<AggregationsGoogleNormalizedDistanceHeuristic>> + pass:[/**] @property include Terms to include. */ + include?: <<AggregationsTermsInclude>> + pass:[/**] @property jlh Use JLH score as the significance score. */ + jlh?: <<EmptyObject>> + pass:[/**] @property min_doc_count Only return terms that are found in more than `min_doc_count` hits. */ + min_doc_count?: <<long>> + pass:[/**] @property mutual_information Use mutual information as described in "Information Retrieval", Manning et al., Chapter 13.5.1, as the significance score. */ + mutual_information?: <<AggregationsMutualInformationHeuristic>> + pass:[/**] @property percentage A simple calculation of the number of documents in the foreground sample with a term divided by the number of documents in the background with the term. */ + percentage?: <<AggregationsPercentageScoreHeuristic>> + pass:[/**] @property script_heuristic Customized score, implemented via a script. */ + script_heuristic?: <<AggregationsScriptedHeuristic>> + pass:[/**] @property shard_min_doc_count Regulates the certainty a shard has if the term should actually be added to the candidate list or not with respect to the `min_doc_count`. Terms will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. */ + shard_min_doc_count?: <<long>> + pass:[/**] @property shard_size Can be used to control the volumes of candidate terms produced by each shard. By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */ + shard_size?: <<integer>> + pass:[/**] @property size The number of buckets returned out of the overall terms list. */ + size?: <<integer>> +} +---- + + + +[discrete] +[[AggregationsSignificantTermsBucketBase]] +=== AggregationsSignificantTermsBucketBase + +[source,ts,subs=+macros] +---- +interface AggregationsSignificantTermsBucketBase extends <<AggregationsMultiBucketBase>> { + score: <<double>> + bg_count: <<long>> +} +---- + + + +[discrete] +[[AggregationsSignificantTextAggregation]] +=== AggregationsSignificantTextAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsSignificantTextAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property background_filter A background filter that can be used to focus in on significant terms within a narrower context, instead of the entire index. */ + background_filter?: <<QueryDslQueryContainer>> + pass:[/**] @property chi_square Use Chi square, as described in "Information Retrieval", Manning et al., Chapter 13.5.2, as the significance score. */ + chi_square?: <<AggregationsChiSquareHeuristic>> + pass:[/**] @property exclude Values to exclude. */ + exclude?: <<AggregationsTermsExclude>> + pass:[/**] @property execution_hint Determines whether the aggregation will use field values directly or global ordinals. */ + execution_hint?: <<AggregationsTermsAggregationExecutionHint>> + pass:[/**] @property field The field from which to return significant text. */ + field?: <<Field>> + pass:[/**] @property filter_duplicate_text Whether to out duplicate text to deal with noisy data. */ + filter_duplicate_text?: boolean + pass:[/**] @property gnd Use Google normalized distance as described in "The Google Similarity <<Distance>>", Cilibrasi and Vitanyi, 2007, as the significance score. */ + gnd?: <<AggregationsGoogleNormalizedDistanceHeuristic>> + pass:[/**] @property include Values to include. */ + include?: <<AggregationsTermsInclude>> + pass:[/**] @property jlh Use JLH score as the significance score. */ + jlh?: <<EmptyObject>> + pass:[/**] @property min_doc_count Only return values that are found in more than `min_doc_count` hits. */ + min_doc_count?: <<long>> + pass:[/**] @property mutual_information Use mutual information as described in "Information Retrieval", Manning et al., Chapter 13.5.1, as the significance score. */ + mutual_information?: <<AggregationsMutualInformationHeuristic>> + pass:[/**] @property percentage A simple calculation of the number of documents in the foreground sample with a term divided by the number of documents in the background with the term. */ + percentage?: <<AggregationsPercentageScoreHeuristic>> + pass:[/**] @property script_heuristic Customized score, implemented via a script. */ + script_heuristic?: <<AggregationsScriptedHeuristic>> + pass:[/**] @property shard_min_doc_count Regulates the certainty a shard has if the values should actually be added to the candidate list or not with respect to the min_doc_count. Values will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. */ + shard_min_doc_count?: <<long>> + pass:[/**] @property shard_size The number of candidate terms produced by each shard. By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */ + shard_size?: <<integer>> + pass:[/**] @property size The number of buckets returned out of the overall terms list. */ + size?: <<integer>> + pass:[/**] @property source_fields Overrides the JSON `_source` fields from which text will be analyzed. */ + source_fields?: <<Fields>> +} +---- + + + +[discrete] +[[AggregationsSimpleMovingAverageAggregation]] +=== AggregationsSimpleMovingAverageAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsSimpleMovingAverageAggregation extends <<AggregationsMovingAverageAggregationBase>> { + model: 'simple' + settings: <<EmptyObject>> +} +---- + + + +[discrete] +[[AggregationsSimpleValueAggregate]] +=== AggregationsSimpleValueAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsSimpleValueAggregate extends <<AggregationsSingleMetricAggregateBase>> {} +---- + + + +[discrete] +[[AggregationsSingleBucketAggregateBase]] +=== AggregationsSingleBucketAggregateBase + +[source,ts,subs=+macros] +---- +interface AggregationsSingleBucketAggregateBase extends <<AggregationsAggregateBase>> { + doc_count: <<long>> +} +---- + + + +[discrete] +[[AggregationsSingleMetricAggregateBase]] +=== AggregationsSingleMetricAggregateBase + +[source,ts,subs=+macros] +---- +interface AggregationsSingleMetricAggregateBase extends <<AggregationsAggregateBase>> { + pass:[/**] @property value The metric value. A missing value generally means that there was no data to aggregate, unless specified otherwise. */ + value: <<double>> | null + value_as_string?: string +} +---- + + + +[discrete] +[[AggregationsStandardDeviationBounds]] +=== AggregationsStandardDeviationBounds + +[source,ts,subs=+macros] +---- +interface AggregationsStandardDeviationBounds { + upper: <<double>> | null + lower: <<double>> | null + upper_population: <<double>> | null + lower_population: <<double>> | null + upper_sampling: <<double>> | null + lower_sampling: <<double>> | null +} +---- + + + +[discrete] +[[AggregationsStandardDeviationBoundsAsString]] +=== AggregationsStandardDeviationBoundsAsString + +[source,ts,subs=+macros] +---- +interface AggregationsStandardDeviationBoundsAsString { + upper: string + lower: string + upper_population: string + lower_population: string + upper_sampling: string + lower_sampling: string +} +---- + + + +[discrete] +[[AggregationsStatsAggregate]] +=== AggregationsStatsAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsStatsAggregate extends <<AggregationsAggregateBase>> { + count: <<long>> + min: <<double>> | null + max: <<double>> | null + avg: <<double>> | null + sum: <<double>> + min_as_string?: string + max_as_string?: string + avg_as_string?: string + sum_as_string?: string +} +---- + + + +[discrete] +[[AggregationsStatsAggregation]] +=== AggregationsStatsAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsStatsAggregation extends <<AggregationsFormatMetricAggregationBase>> {} +---- + + + +[discrete] +[[AggregationsStatsBucketAggregate]] +=== AggregationsStatsBucketAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsStatsBucketAggregate extends <<AggregationsStatsAggregate>> {} +---- + + + +[discrete] +[[AggregationsStatsBucketAggregation]] +=== AggregationsStatsBucketAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsStatsBucketAggregation extends <<AggregationsPipelineAggregationBase>> {} +---- + + + +[discrete] +[[AggregationsStringRareTermsAggregate]] +=== AggregationsStringRareTermsAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsStringRareTermsAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsStringRareTermsBucket>>> {} +---- + + + +[discrete] +[[AggregationsStringRareTermsBucket]] +=== AggregationsStringRareTermsBucket + +[source,ts,subs=+macros] +---- +interface AggregationsStringRareTermsBucketKeys extends <<AggregationsMultiBucketBase>> { + key: string +} +type AggregationsStringRareTermsBucket = AggregationsStringRareTermsBucketKeys + & { [property: string]: <<AggregationsAggregate>> | string | <<long>> } +---- + + + +[discrete] +[[AggregationsStringStatsAggregate]] +=== AggregationsStringStatsAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsStringStatsAggregate extends <<AggregationsAggregateBase>> { + count: <<long>> + min_length: <<integer>> | null + max_length: <<integer>> | null + avg_length: <<double>> | null + entropy: <<double>> | null + distribution?: Record<string, <<double>>> | null + min_length_as_string?: string + max_length_as_string?: string + avg_length_as_string?: string +} +---- + + + +[discrete] +[[AggregationsStringStatsAggregation]] +=== AggregationsStringStatsAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsStringStatsAggregation extends <<AggregationsMetricAggregationBase>> { + pass:[/**] @property show_distribution Shows the probability distribution for all characters. */ + show_distribution?: boolean +} +---- + + + +[discrete] +[[AggregationsStringTermsAggregate]] +=== AggregationsStringTermsAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsStringTermsAggregate extends <<AggregationsTermsAggregateBase>><<<AggregationsStringTermsBucket>>> {} +---- + + + +[discrete] +[[AggregationsStringTermsBucket]] +=== AggregationsStringTermsBucket + +[source,ts,subs=+macros] +---- +interface AggregationsStringTermsBucketKeys extends <<AggregationsTermsBucketBase>> { + key: <<FieldValue>> +} +type AggregationsStringTermsBucket = AggregationsStringTermsBucketKeys + & { [property: string]: <<AggregationsAggregate>> | <<FieldValue>> | <<long>> } +---- + + + +[discrete] +[[AggregationsSumAggregate]] +=== AggregationsSumAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsSumAggregate extends <<AggregationsSingleMetricAggregateBase>> {} +---- + + + +[discrete] +[[AggregationsSumAggregation]] +=== AggregationsSumAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsSumAggregation extends <<AggregationsFormatMetricAggregationBase>> {} +---- + + + +[discrete] +[[AggregationsSumBucketAggregation]] +=== AggregationsSumBucketAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsSumBucketAggregation extends <<AggregationsPipelineAggregationBase>> {} +---- + + + +[discrete] +[[AggregationsTDigest]] +=== AggregationsTDigest + +[source,ts,subs=+macros] +---- +interface AggregationsTDigest { + pass:[/**] @property compression Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error. */ + compression?: <<integer>> +} +---- + + + +[discrete] +[[AggregationsTDigestPercentileRanksAggregate]] +=== AggregationsTDigestPercentileRanksAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsTDigestPercentileRanksAggregate extends <<AggregationsPercentilesAggregateBase>> {} +---- + + + +[discrete] +[[AggregationsTDigestPercentilesAggregate]] +=== AggregationsTDigestPercentilesAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsTDigestPercentilesAggregate extends <<AggregationsPercentilesAggregateBase>> {} +---- + + + +[discrete] +[[AggregationsTTestAggregate]] +=== AggregationsTTestAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsTTestAggregate extends <<AggregationsAggregateBase>> { + value: <<double>> | null + value_as_string?: string +} +---- + + + +[discrete] +[[AggregationsTTestAggregation]] +=== AggregationsTTestAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsTTestAggregation { + pass:[/**] @property a Test population A. */ + a?: <<AggregationsTestPopulation>> + pass:[/**] @property b Test population B. */ + b?: <<AggregationsTestPopulation>> + pass:[/**] @property type The type of test. */ + type?: <<AggregationsTTestType>> +} +---- + + + +[discrete] +[[AggregationsTTestType]] +=== AggregationsTTestType + +[source,ts,subs=+macros] +---- +type AggregationsTTestType = 'paired' | 'homoscedastic' | 'heteroscedastic' +---- + + + +[discrete] +[[AggregationsTermsAggregateBase]] +=== AggregationsTermsAggregateBase + +[source,ts,subs=+macros] +---- +interface AggregationsTermsAggregateBase<TBucket = unknown> extends <<AggregationsMultiBucketAggregateBase>><TBucket> { + doc_count_error_upper_bound?: <<long>> + sum_other_doc_count?: <<long>> +} +---- + + + +[discrete] +[[AggregationsTermsAggregation]] +=== AggregationsTermsAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsTermsAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property collect_mode Determines how child aggregations should be calculated: breadth-first or depth-first. */ + collect_mode?: <<AggregationsTermsAggregationCollectMode>> + pass:[/**] @property exclude Values to exclude. Accepts regular expressions and partitions. */ + exclude?: <<AggregationsTermsExclude>> + pass:[/**] @property execution_hint Determines whether the aggregation will use field values directly or global ordinals. */ + execution_hint?: <<AggregationsTermsAggregationExecutionHint>> + pass:[/**] @property field The field from which to return terms. */ + field?: <<Field>> + pass:[/**] @property include Values to include. Accepts regular expressions and partitions. */ + include?: <<AggregationsTermsInclude>> + pass:[/**] @property min_doc_count Only return values that are found in more than `min_doc_count` hits. */ + min_doc_count?: <<integer>> + pass:[/**] @property missing The value to apply to documents that do not have a value. By default, documents without a value are ignored. */ + missing?: <<AggregationsMissing>> + missing_order?: <<AggregationsMissingOrder>> + missing_bucket?: boolean + pass:[/**] @property value_type Coerced unmapped fields into the specified type. */ + value_type?: string + pass:[/**] @property order Specifies the sort order of the buckets. Defaults to sorting by descending document count. */ + order?: <<AggregationsAggregateOrder>> + script?: <<Script>> | string + pass:[/**] @property shard_min_doc_count Regulates the certainty a shard has if the term should actually be added to the candidate list or not with respect to the `min_doc_count`. Terms will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. */ + shard_min_doc_count?: <<long>> + pass:[/**] @property shard_size The number of candidate terms produced by each shard. By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */ + shard_size?: <<integer>> + pass:[/**] @property show_term_doc_count_error Set to `true` to return the `doc_count_error_upper_bound`, which is an upper bound to the error on the `doc_count` returned by each shard. */ + show_term_doc_count_error?: boolean + pass:[/**] @property size The number of buckets returned out of the overall terms list. */ + size?: <<integer>> + format?: string +} +---- + + + +[discrete] +[[AggregationsTermsAggregationCollectMode]] +=== AggregationsTermsAggregationCollectMode + +[source,ts,subs=+macros] +---- +type AggregationsTermsAggregationCollectMode = 'depth_first' | 'breadth_first' +---- + + + +[discrete] +[[AggregationsTermsAggregationExecutionHint]] +=== AggregationsTermsAggregationExecutionHint + +[source,ts,subs=+macros] +---- +type AggregationsTermsAggregationExecutionHint = 'map' | 'global_ordinals' | 'global_ordinals_hash' | 'global_ordinals_low_cardinality' +---- + + + +[discrete] +[[AggregationsTermsBucketBase]] +=== AggregationsTermsBucketBase + +[source,ts,subs=+macros] +---- +interface AggregationsTermsBucketBase extends <<AggregationsMultiBucketBase>> { + doc_count_error_upper_bound?: <<long>> +} +---- + + + +[discrete] +[[AggregationsTermsExclude]] +=== AggregationsTermsExclude + +[source,ts,subs=+macros] +---- +type AggregationsTermsExclude = string | string[] +---- + + + +[discrete] +[[AggregationsTermsInclude]] +=== AggregationsTermsInclude + +[source,ts,subs=+macros] +---- +type AggregationsTermsInclude = string | string[] | <<AggregationsTermsPartition>> +---- + + + +[discrete] +[[AggregationsTermsPartition]] +=== AggregationsTermsPartition + +[source,ts,subs=+macros] +---- +interface AggregationsTermsPartition { + pass:[/**] @property num_partitions The number of partitions. */ + num_partitions: <<long>> + pass:[/**] @property partition The partition number for this request. */ + partition: <<long>> +} +---- + + + +[discrete] +[[AggregationsTestPopulation]] +=== AggregationsTestPopulation + +[source,ts,subs=+macros] +---- +interface AggregationsTestPopulation { + pass:[/**] @property field The field to aggregate. */ + field: <<Field>> + script?: <<Script>> | string + pass:[/**] @property filter A filter used to define a set of records to run unpaired t-test on. */ + filter?: <<QueryDslQueryContainer>> +} +---- + + + +[discrete] +[[AggregationsTimeSeriesAggregate]] +=== AggregationsTimeSeriesAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsTimeSeriesAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsTimeSeriesBucket>>> {} +---- + + + +[discrete] +[[AggregationsTimeSeriesAggregation]] +=== AggregationsTimeSeriesAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsTimeSeriesAggregation extends <<AggregationsBucketAggregationBase>> { + pass:[/**] @property size The maximum number of results to return. */ + size?: <<integer>> + pass:[/**] @property keyed Set to `true` to associate a unique string key with each bucket and returns the ranges as a hash rather than an array. */ + keyed?: boolean +} +---- + + + +[discrete] +[[AggregationsTimeSeriesBucket]] +=== AggregationsTimeSeriesBucket + +[source,ts,subs=+macros] +---- +interface AggregationsTimeSeriesBucketKeys extends <<AggregationsMultiBucketBase>> { + key: Record<<<Field>>, <<FieldValue>>> +} +type AggregationsTimeSeriesBucket = AggregationsTimeSeriesBucketKeys + & { [property: string]: <<AggregationsAggregate>> | Record<<<Field>>, <<FieldValue>>> | <<long>> } +---- + + + +[discrete] +[[AggregationsTopHitsAggregate]] +=== AggregationsTopHitsAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsTopHitsAggregate extends <<AggregationsAggregateBase>> { + hits: <<SearchHitsMetadata>><any> +} +---- + + + +[discrete] +[[AggregationsTopHitsAggregation]] +=== AggregationsTopHitsAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsTopHitsAggregation extends <<AggregationsMetricAggregationBase>> { + pass:[/**] @property docvalue_fields <<Fields>> for which to return doc values. */ + docvalue_fields?: (<<QueryDslFieldAndFormat>> | <<Field>>)[] + pass:[/**] @property explain If `true`, returns detailed information about score computation as part of a hit. */ + explain?: boolean + pass:[/**] @property fields Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response. */ + fields?: (<<QueryDslFieldAndFormat>> | <<Field>>)[] + pass:[/**] @property from Starting document offset. */ + from?: <<integer>> + pass:[/**] @property highlight Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in the search results. */ + highlight?: <<SearchHighlight>> + pass:[/**] @property script_fields Returns the result of one or more script evaluations for each hit. */ + script_fields?: Record<string, <<ScriptField>>> + pass:[/**] @property size The maximum number of top matching hits to return per bucket. */ + size?: <<integer>> + pass:[/**] @property sort <<Sort>> order of the top matching hits. By default, the hits are sorted by the score of the main query. */ + sort?: <<Sort>> + pass:[/**] @property _source Selects the fields of the source that are returned. */ + _source?: <<SearchSourceConfig>> + pass:[/**] @property stored_fields Returns values for the specified stored fields (fields that use the `store` mapping option). */ + stored_fields?: <<Fields>> + pass:[/**] @property track_scores If `true`, calculates and returns document scores, even if the scores are not used for sorting. */ + track_scores?: boolean + pass:[/**] @property version If `true`, returns document version as part of a hit. */ + version?: boolean + pass:[/**] @property seq_no_primary_term If `true`, returns sequence number and primary term of the last modification of each hit. */ + seq_no_primary_term?: boolean +} +---- + + + +[discrete] +[[AggregationsTopMetrics]] +=== AggregationsTopMetrics + +[source,ts,subs=+macros] +---- +interface AggregationsTopMetrics { + sort: (<<FieldValue>> | null)[] + metrics: Record<string, <<FieldValue>> | null> +} +---- + + + +[discrete] +[[AggregationsTopMetricsAggregate]] +=== AggregationsTopMetricsAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsTopMetricsAggregate extends <<AggregationsAggregateBase>> { + top: <<AggregationsTopMetrics>>[] +} +---- + + + +[discrete] +[[AggregationsTopMetricsAggregation]] +=== AggregationsTopMetricsAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsTopMetricsAggregation extends <<AggregationsMetricAggregationBase>> { + pass:[/**] @property metrics The fields of the top document to return. */ + metrics?: <<AggregationsTopMetricsValue>> | <<AggregationsTopMetricsValue>>[] + pass:[/**] @property size The number of top documents from which to return metrics. */ + size?: <<integer>> + pass:[/**] @property sort The sort order of the documents. */ + sort?: <<Sort>> +} +---- + + + +[discrete] +[[AggregationsTopMetricsValue]] +=== AggregationsTopMetricsValue + +[source,ts,subs=+macros] +---- +interface AggregationsTopMetricsValue { + pass:[/**] @property field A field to return as a metric. */ + field: <<Field>> +} +---- + + + +[discrete] +[[AggregationsUnmappedRareTermsAggregate]] +=== AggregationsUnmappedRareTermsAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsUnmappedRareTermsAggregate extends <<AggregationsMultiBucketAggregateBase>><void> {} +---- + + + +[discrete] +[[AggregationsUnmappedSamplerAggregate]] +=== AggregationsUnmappedSamplerAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsUnmappedSamplerAggregateKeys extends <<AggregationsSingleBucketAggregateBase>> {} +type AggregationsUnmappedSamplerAggregate = AggregationsUnmappedSamplerAggregateKeys + & { [property: string]: <<AggregationsAggregate>> | <<long>> | <<Metadata>> } +---- + + + +[discrete] +[[AggregationsUnmappedSignificantTermsAggregate]] +=== AggregationsUnmappedSignificantTermsAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsUnmappedSignificantTermsAggregate extends <<AggregationsSignificantTermsAggregateBase>><void> {} +---- + + + +[discrete] +[[AggregationsUnmappedTermsAggregate]] +=== AggregationsUnmappedTermsAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsUnmappedTermsAggregate extends <<AggregationsTermsAggregateBase>><void> {} +---- + + + +[discrete] +[[AggregationsValueCountAggregate]] +=== AggregationsValueCountAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsValueCountAggregate extends <<AggregationsSingleMetricAggregateBase>> {} +---- + + + +[discrete] +[[AggregationsValueCountAggregation]] +=== AggregationsValueCountAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsValueCountAggregation extends <<AggregationsFormattableMetricAggregation>> {} +---- + + + +[discrete] +[[AggregationsValueType]] +=== AggregationsValueType + +[source,ts,subs=+macros] +---- +type AggregationsValueType = 'string' | '<<long>>' | '<<double>>' | 'number' | 'date' | 'date_nanos' | 'ip' | 'numeric' | 'geo_point' | 'boolean' +---- + + + +[discrete] +[[AggregationsVariableWidthHistogramAggregate]] +=== AggregationsVariableWidthHistogramAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsVariableWidthHistogramAggregate extends <<AggregationsMultiBucketAggregateBase>><<<AggregationsVariableWidthHistogramBucket>>> {} +---- + + + +[discrete] +[[AggregationsVariableWidthHistogramAggregation]] +=== AggregationsVariableWidthHistogramAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsVariableWidthHistogramAggregation { + pass:[/**] @property field The name of the field. */ + field?: <<Field>> + pass:[/**] @property buckets The target number of buckets. */ + buckets?: <<integer>> + pass:[/**] @property shard_size The number of buckets that the coordinating node will request from each shard. Defaults to `buckets * 50`. */ + shard_size?: <<integer>> + pass:[/**] @property initial_buffer Specifies the number of individual documents that will be stored in memory on a shard before the initial bucketing algorithm is run. Defaults to `min(10 * shard_size, 50000)`. */ + initial_buffer?: <<integer>> + script?: <<Script>> | string +} +---- + + + +[discrete] +[[AggregationsVariableWidthHistogramBucket]] +=== AggregationsVariableWidthHistogramBucket + +[source,ts,subs=+macros] +---- +interface AggregationsVariableWidthHistogramBucketKeys extends <<AggregationsMultiBucketBase>> { + min: <<double>> + key: <<double>> + max: <<double>> + min_as_string?: string + key_as_string?: string + max_as_string?: string +} +type AggregationsVariableWidthHistogramBucket = AggregationsVariableWidthHistogramBucketKeys + & { [property: string]: <<AggregationsAggregate>> | <<double>> | string | <<long>> } +---- + + + +[discrete] +[[AggregationsWeightedAverageAggregation]] +=== AggregationsWeightedAverageAggregation + +[source,ts,subs=+macros] +---- +interface AggregationsWeightedAverageAggregation { + pass:[/**] @property format A numeric response formatter. */ + format?: string + pass:[/**] @property value Configuration for the field that provides the values. */ + value?: <<AggregationsWeightedAverageValue>> + value_type?: <<AggregationsValueType>> + pass:[/**] @property weight Configuration for the field or script that provides the weights. */ + weight?: <<AggregationsWeightedAverageValue>> +} +---- + + + +[discrete] +[[AggregationsWeightedAverageValue]] +=== AggregationsWeightedAverageValue + +[source,ts,subs=+macros] +---- +interface AggregationsWeightedAverageValue { + pass:[/**] @property field The field from which to extract the values or weights. */ + field?: <<Field>> + pass:[/**] @property missing A value or weight to use if the field is missing. */ + missing?: <<double>> + script?: <<Script>> | string +} +---- + + + +[discrete] +[[AggregationsWeightedAvgAggregate]] +=== AggregationsWeightedAvgAggregate + +[source,ts,subs=+macros] +---- +interface AggregationsWeightedAvgAggregate extends <<AggregationsSingleMetricAggregateBase>> {} +---- + + + +[discrete] +[[AnalysisAnalyzer]] +=== AnalysisAnalyzer + +[source,ts,subs=+macros] +---- +type AnalysisAnalyzer = <<AnalysisCustomAnalyzer>> | <<AnalysisFingerprintAnalyzer>> | <<AnalysisKeywordAnalyzer>> | <<AnalysisLanguageAnalyzer>> | <<AnalysisNoriAnalyzer>> | <<AnalysisPatternAnalyzer>> | <<AnalysisSimpleAnalyzer>> | <<AnalysisStandardAnalyzer>> | <<AnalysisStopAnalyzer>> | <<AnalysisWhitespaceAnalyzer>> | <<AnalysisIcuAnalyzer>> | <<AnalysisKuromojiAnalyzer>> | <<AnalysisSnowballAnalyzer>> | <<AnalysisArabicAnalyzer>> | <<AnalysisArmenianAnalyzer>> | <<AnalysisBasqueAnalyzer>> | <<AnalysisBengaliAnalyzer>> | <<AnalysisBrazilianAnalyzer>> | <<AnalysisBulgarianAnalyzer>> | <<AnalysisCatalanAnalyzer>> | <<AnalysisChineseAnalyzer>> | <<AnalysisCjkAnalyzer>> | <<AnalysisCzechAnalyzer>> | <<AnalysisDanishAnalyzer>> | <<AnalysisDutchAnalyzer>> | <<AnalysisEnglishAnalyzer>> | <<AnalysisEstonianAnalyzer>> | <<AnalysisFinnishAnalyzer>> | <<AnalysisFrenchAnalyzer>> | <<AnalysisGalicianAnalyzer>> | <<AnalysisGermanAnalyzer>> | <<AnalysisGreekAnalyzer>> | <<AnalysisHindiAnalyzer>> | <<AnalysisHungarianAnalyzer>> | <<AnalysisIndonesianAnalyzer>> | <<AnalysisIrishAnalyzer>> | <<AnalysisItalianAnalyzer>> | <<AnalysisLatvianAnalyzer>> | <<AnalysisLithuanianAnalyzer>> | <<AnalysisNorwegianAnalyzer>> | <<AnalysisPersianAnalyzer>> | <<AnalysisPortugueseAnalyzer>> | <<AnalysisRomanianAnalyzer>> | <<AnalysisRussianAnalyzer>> | <<AnalysisSerbianAnalyzer>> | <<AnalysisSoraniAnalyzer>> | <<AnalysisSpanishAnalyzer>> | <<AnalysisSwedishAnalyzer>> | <<AnalysisTurkishAnalyzer>> | <<AnalysisThaiAnalyzer>> +---- + + + +[discrete] +[[AnalysisArabicAnalyzer]] +=== AnalysisArabicAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisArabicAnalyzer { + type: 'arabic' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisArmenianAnalyzer]] +=== AnalysisArmenianAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisArmenianAnalyzer { + type: 'armenian' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisAsciiFoldingTokenFilter]] +=== AnalysisAsciiFoldingTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisAsciiFoldingTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'asciifolding' + preserve_original?: <<SpecUtilsStringified>><boolean> +} +---- + + + +[discrete] +[[AnalysisBasqueAnalyzer]] +=== AnalysisBasqueAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisBasqueAnalyzer { + type: 'basque' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisBengaliAnalyzer]] +=== AnalysisBengaliAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisBengaliAnalyzer { + type: 'bengali' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisBrazilianAnalyzer]] +=== AnalysisBrazilianAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisBrazilianAnalyzer { + type: 'brazilian' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string +} +---- + + + +[discrete] +[[AnalysisBulgarianAnalyzer]] +=== AnalysisBulgarianAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisBulgarianAnalyzer { + type: 'bulgarian' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisCatalanAnalyzer]] +=== AnalysisCatalanAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisCatalanAnalyzer { + type: 'catalan' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisCharFilter]] +=== AnalysisCharFilter + +[source,ts,subs=+macros] +---- +type AnalysisCharFilter = string | <<AnalysisCharFilterDefinition>> +---- + + + +[discrete] +[[AnalysisCharFilterBase]] +=== AnalysisCharFilterBase + +[source,ts,subs=+macros] +---- +interface AnalysisCharFilterBase { + version?: <<VersionString>> +} +---- + + + +[discrete] +[[AnalysisCharFilterDefinition]] +=== AnalysisCharFilterDefinition + +[source,ts,subs=+macros] +---- +type AnalysisCharFilterDefinition = <<AnalysisHtmlStripCharFilter>> | <<AnalysisMappingCharFilter>> | <<AnalysisPatternReplaceCharFilter>> | <<AnalysisIcuNormalizationCharFilter>> | <<AnalysisKuromojiIterationMarkCharFilter>> +---- + + + +[discrete] +[[AnalysisCharGroupTokenizer]] +=== AnalysisCharGroupTokenizer + +[source,ts,subs=+macros] +---- +interface AnalysisCharGroupTokenizer extends <<AnalysisTokenizerBase>> { + type: 'char_group' + tokenize_on_chars: string[] + max_token_length?: <<integer>> +} +---- + + + +[discrete] +[[AnalysisChineseAnalyzer]] +=== AnalysisChineseAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisChineseAnalyzer { + type: 'chinese' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string +} +---- + + + +[discrete] +[[AnalysisCjkAnalyzer]] +=== AnalysisCjkAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisCjkAnalyzer { + type: 'cjk' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string +} +---- + + + +[discrete] +[[AnalysisClassicTokenizer]] +=== AnalysisClassicTokenizer + +[source,ts,subs=+macros] +---- +interface AnalysisClassicTokenizer extends <<AnalysisTokenizerBase>> { + type: 'classic' + max_token_length?: <<integer>> +} +---- + + + +[discrete] +[[AnalysisCommonGramsTokenFilter]] +=== AnalysisCommonGramsTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisCommonGramsTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'common_grams' + common_words?: string[] + common_words_path?: string + ignore_case?: boolean + query_mode?: boolean +} +---- + + + +[discrete] +[[AnalysisCompoundWordTokenFilterBase]] +=== AnalysisCompoundWordTokenFilterBase + +[source,ts,subs=+macros] +---- +interface AnalysisCompoundWordTokenFilterBase extends <<AnalysisTokenFilterBase>> { + hyphenation_patterns_path?: string + max_subword_size?: <<integer>> + min_subword_size?: <<integer>> + min_word_size?: <<integer>> + only_longest_match?: boolean + word_list?: string[] + word_list_path?: string +} +---- + + + +[discrete] +[[AnalysisConditionTokenFilter]] +=== AnalysisConditionTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisConditionTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'condition' + filter: string[] + script: <<Script>> | string +} +---- + + + +[discrete] +[[AnalysisCustomAnalyzer]] +=== AnalysisCustomAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisCustomAnalyzer { + type: 'custom' + char_filter?: string | string[] + filter?: string | string[] + position_increment_gap?: <<integer>> + position_offset_gap?: <<integer>> + tokenizer: string +} +---- + + + +[discrete] +[[AnalysisCustomNormalizer]] +=== AnalysisCustomNormalizer + +[source,ts,subs=+macros] +---- +interface AnalysisCustomNormalizer { + type: 'custom' + char_filter?: string[] + filter?: string[] +} +---- + + + +[discrete] +[[AnalysisCzechAnalyzer]] +=== AnalysisCzechAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisCzechAnalyzer { + type: 'czech' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisDanishAnalyzer]] +=== AnalysisDanishAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisDanishAnalyzer { + type: 'danish' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string +} +---- + + + +[discrete] +[[AnalysisDelimitedPayloadEncoding]] +=== AnalysisDelimitedPayloadEncoding + +[source,ts,subs=+macros] +---- +type AnalysisDelimitedPayloadEncoding = 'int' | '<<float>>' | 'identity' +---- + + + +[discrete] +[[AnalysisDelimitedPayloadTokenFilter]] +=== AnalysisDelimitedPayloadTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisDelimitedPayloadTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'delimited_payload' + delimiter?: string + encoding?: <<AnalysisDelimitedPayloadEncoding>> +} +---- + + + +[discrete] +[[AnalysisDictionaryDecompounderTokenFilter]] +=== AnalysisDictionaryDecompounderTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisDictionaryDecompounderTokenFilter extends <<AnalysisCompoundWordTokenFilterBase>> { + type: 'dictionary_decompounder' +} +---- + + + +[discrete] +[[AnalysisDutchAnalyzer]] +=== AnalysisDutchAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisDutchAnalyzer { + type: 'dutch' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisEdgeNGramSide]] +=== AnalysisEdgeNGramSide + +[source,ts,subs=+macros] +---- +type AnalysisEdgeNGramSide = 'front' | 'back' +---- + + + +[discrete] +[[AnalysisEdgeNGramTokenFilter]] +=== AnalysisEdgeNGramTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisEdgeNGramTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'edge_ngram' + max_gram?: <<integer>> + min_gram?: <<integer>> + side?: <<AnalysisEdgeNGramSide>> + preserve_original?: <<SpecUtilsStringified>><boolean> +} +---- + + + +[discrete] +[[AnalysisEdgeNGramTokenizer]] +=== AnalysisEdgeNGramTokenizer + +[source,ts,subs=+macros] +---- +interface AnalysisEdgeNGramTokenizer extends <<AnalysisTokenizerBase>> { + type: 'edge_ngram' + custom_token_chars?: string + max_gram?: <<integer>> + min_gram?: <<integer>> + token_chars?: <<AnalysisTokenChar>>[] +} +---- + + + +[discrete] +[[AnalysisElisionTokenFilter]] +=== AnalysisElisionTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisElisionTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'elision' + articles?: string[] + articles_path?: string + articles_case?: <<SpecUtilsStringified>><boolean> +} +---- + + + +[discrete] +[[AnalysisEnglishAnalyzer]] +=== AnalysisEnglishAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisEnglishAnalyzer { + type: 'english' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisEstonianAnalyzer]] +=== AnalysisEstonianAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisEstonianAnalyzer { + type: 'estonian' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string +} +---- + + + +[discrete] +[[AnalysisFingerprintAnalyzer]] +=== AnalysisFingerprintAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisFingerprintAnalyzer { + type: 'fingerprint' + version?: <<VersionString>> + max_output_size: <<integer>> + preserve_original: boolean + separator: string + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string +} +---- + + + +[discrete] +[[AnalysisFingerprintTokenFilter]] +=== AnalysisFingerprintTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisFingerprintTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'fingerprint' + max_output_size?: <<integer>> + separator?: string +} +---- + + + +[discrete] +[[AnalysisFinnishAnalyzer]] +=== AnalysisFinnishAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisFinnishAnalyzer { + type: 'finnish' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisFrenchAnalyzer]] +=== AnalysisFrenchAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisFrenchAnalyzer { + type: 'french' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisGalicianAnalyzer]] +=== AnalysisGalicianAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisGalicianAnalyzer { + type: 'galician' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisGermanAnalyzer]] +=== AnalysisGermanAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisGermanAnalyzer { + type: 'german' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisGreekAnalyzer]] +=== AnalysisGreekAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisGreekAnalyzer { + type: 'greek' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string +} +---- + + + +[discrete] +[[AnalysisHindiAnalyzer]] +=== AnalysisHindiAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisHindiAnalyzer { + type: 'hindi' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisHtmlStripCharFilter]] +=== AnalysisHtmlStripCharFilter + +[source,ts,subs=+macros] +---- +interface AnalysisHtmlStripCharFilter extends <<AnalysisCharFilterBase>> { + type: 'html_strip' + escaped_tags?: string[] +} +---- + + + +[discrete] +[[AnalysisHungarianAnalyzer]] +=== AnalysisHungarianAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisHungarianAnalyzer { + type: 'hungarian' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisHunspellTokenFilter]] +=== AnalysisHunspellTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisHunspellTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'hunspell' + dedup?: boolean + dictionary?: string + locale: string + longest_only?: boolean +} +---- + + + +[discrete] +[[AnalysisHyphenationDecompounderTokenFilter]] +=== AnalysisHyphenationDecompounderTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisHyphenationDecompounderTokenFilter extends <<AnalysisCompoundWordTokenFilterBase>> { + type: 'hyphenation_decompounder' +} +---- + + + +[discrete] +[[AnalysisIcuAnalyzer]] +=== AnalysisIcuAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisIcuAnalyzer { + type: 'icu_analyzer' + method: <<AnalysisIcuNormalizationType>> + mode: <<AnalysisIcuNormalizationMode>> +} +---- + + + +[discrete] +[[AnalysisIcuCollationAlternate]] +=== AnalysisIcuCollationAlternate + +[source,ts,subs=+macros] +---- +type AnalysisIcuCollationAlternate = 'shifted' | 'non-ignorable' +---- + + + +[discrete] +[[AnalysisIcuCollationCaseFirst]] +=== AnalysisIcuCollationCaseFirst + +[source,ts,subs=+macros] +---- +type AnalysisIcuCollationCaseFirst = 'lower' | 'upper' +---- + + + +[discrete] +[[AnalysisIcuCollationDecomposition]] +=== AnalysisIcuCollationDecomposition + +[source,ts,subs=+macros] +---- +type AnalysisIcuCollationDecomposition = 'no' | 'identical' +---- + + + +[discrete] +[[AnalysisIcuCollationStrength]] +=== AnalysisIcuCollationStrength + +[source,ts,subs=+macros] +---- +type AnalysisIcuCollationStrength = 'primary' | 'secondary' | 'tertiary' | 'quaternary' | 'identical' +---- + + + +[discrete] +[[AnalysisIcuCollationTokenFilter]] +=== AnalysisIcuCollationTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisIcuCollationTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'icu_collation' + alternate?: <<AnalysisIcuCollationAlternate>> + case_first?: <<AnalysisIcuCollationCaseFirst>> + case_level?: boolean + country?: string + decomposition?: <<AnalysisIcuCollationDecomposition>> + hiragana_quaternary_mode?: boolean + language?: string + numeric?: boolean + rules?: string + strength?: <<AnalysisIcuCollationStrength>> + variable_top?: string + variant?: string +} +---- + + + +[discrete] +[[AnalysisIcuFoldingTokenFilter]] +=== AnalysisIcuFoldingTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisIcuFoldingTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'icu_folding' + unicode_set_filter: string +} +---- + + + +[discrete] +[[AnalysisIcuNormalizationCharFilter]] +=== AnalysisIcuNormalizationCharFilter + +[source,ts,subs=+macros] +---- +interface AnalysisIcuNormalizationCharFilter extends <<AnalysisCharFilterBase>> { + type: 'icu_normalizer' + mode?: <<AnalysisIcuNormalizationMode>> + name?: <<AnalysisIcuNormalizationType>> +} +---- + + + +[discrete] +[[AnalysisIcuNormalizationMode]] +=== AnalysisIcuNormalizationMode + +[source,ts,subs=+macros] +---- +type AnalysisIcuNormalizationMode = 'decompose' | 'compose' +---- + + + +[discrete] +[[AnalysisIcuNormalizationTokenFilter]] +=== AnalysisIcuNormalizationTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisIcuNormalizationTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'icu_normalizer' + name: <<AnalysisIcuNormalizationType>> +} +---- + + + +[discrete] +[[AnalysisIcuNormalizationType]] +=== AnalysisIcuNormalizationType + +[source,ts,subs=+macros] +---- +type AnalysisIcuNormalizationType = 'nfc' | 'nfkc' | 'nfkc_cf' +---- + + + +[discrete] +[[AnalysisIcuTokenizer]] +=== AnalysisIcuTokenizer + +[source,ts,subs=+macros] +---- +interface AnalysisIcuTokenizer extends <<AnalysisTokenizerBase>> { + type: 'icu_tokenizer' + rule_files: string +} +---- + + + +[discrete] +[[AnalysisIcuTransformDirection]] +=== AnalysisIcuTransformDirection + +[source,ts,subs=+macros] +---- +type AnalysisIcuTransformDirection = 'forward' | 'reverse' +---- + + + +[discrete] +[[AnalysisIcuTransformTokenFilter]] +=== AnalysisIcuTransformTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisIcuTransformTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'icu_transform' + dir?: <<AnalysisIcuTransformDirection>> + id: string +} +---- + + + +[discrete] +[[AnalysisIndonesianAnalyzer]] +=== AnalysisIndonesianAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisIndonesianAnalyzer { + type: 'indonesian' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisIrishAnalyzer]] +=== AnalysisIrishAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisIrishAnalyzer { + type: 'irish' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisItalianAnalyzer]] +=== AnalysisItalianAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisItalianAnalyzer { + type: 'italian' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisKStemTokenFilter]] +=== AnalysisKStemTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisKStemTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'kstem' +} +---- + + + +[discrete] +[[AnalysisKeepTypesMode]] +=== AnalysisKeepTypesMode + +[source,ts,subs=+macros] +---- +type AnalysisKeepTypesMode = 'include' | 'exclude' +---- + + + +[discrete] +[[AnalysisKeepTypesTokenFilter]] +=== AnalysisKeepTypesTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisKeepTypesTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'keep_types' + mode?: <<AnalysisKeepTypesMode>> + types?: string[] +} +---- + + + +[discrete] +[[AnalysisKeepWordsTokenFilter]] +=== AnalysisKeepWordsTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisKeepWordsTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'keep' + keep_words?: string[] + keep_words_case?: boolean + keep_words_path?: string +} +---- + + + +[discrete] +[[AnalysisKeywordAnalyzer]] +=== AnalysisKeywordAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisKeywordAnalyzer { + type: 'keyword' + version?: <<VersionString>> +} +---- + + + +[discrete] +[[AnalysisKeywordMarkerTokenFilter]] +=== AnalysisKeywordMarkerTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisKeywordMarkerTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'keyword_marker' + ignore_case?: boolean + keywords?: string | string[] + keywords_path?: string + keywords_pattern?: string +} +---- + + + +[discrete] +[[AnalysisKeywordTokenizer]] +=== AnalysisKeywordTokenizer + +[source,ts,subs=+macros] +---- +interface AnalysisKeywordTokenizer extends <<AnalysisTokenizerBase>> { + type: 'keyword' + buffer_size?: <<integer>> +} +---- + + + +[discrete] +[[AnalysisKuromojiAnalyzer]] +=== AnalysisKuromojiAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisKuromojiAnalyzer { + type: 'kuromoji' + mode: <<AnalysisKuromojiTokenizationMode>> + user_dictionary?: string +} +---- + + + +[discrete] +[[AnalysisKuromojiIterationMarkCharFilter]] +=== AnalysisKuromojiIterationMarkCharFilter + +[source,ts,subs=+macros] +---- +interface AnalysisKuromojiIterationMarkCharFilter extends <<AnalysisCharFilterBase>> { + type: 'kuromoji_iteration_mark' + normalize_kana: boolean + normalize_kanji: boolean +} +---- + + + +[discrete] +[[AnalysisKuromojiPartOfSpeechTokenFilter]] +=== AnalysisKuromojiPartOfSpeechTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisKuromojiPartOfSpeechTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'kuromoji_part_of_speech' + stoptags: string[] +} +---- + + + +[discrete] +[[AnalysisKuromojiReadingFormTokenFilter]] +=== AnalysisKuromojiReadingFormTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisKuromojiReadingFormTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'kuromoji_readingform' + use_romaji: boolean +} +---- + + + +[discrete] +[[AnalysisKuromojiStemmerTokenFilter]] +=== AnalysisKuromojiStemmerTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisKuromojiStemmerTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'kuromoji_stemmer' + minimum_length: <<integer>> +} +---- + + + +[discrete] +[[AnalysisKuromojiTokenizationMode]] +=== AnalysisKuromojiTokenizationMode + +[source,ts,subs=+macros] +---- +type AnalysisKuromojiTokenizationMode = 'normal' | 'search' | 'extended' +---- + + + +[discrete] +[[AnalysisKuromojiTokenizer]] +=== AnalysisKuromojiTokenizer + +[source,ts,subs=+macros] +---- +interface AnalysisKuromojiTokenizer extends <<AnalysisTokenizerBase>> { + type: 'kuromoji_tokenizer' + discard_punctuation?: boolean + mode: <<AnalysisKuromojiTokenizationMode>> + nbest_cost?: <<integer>> + nbest_examples?: string + user_dictionary?: string + user_dictionary_rules?: string[] + discard_compound_token?: boolean +} +---- + + + +[discrete] +[[AnalysisLanguage]] +=== AnalysisLanguage + +[source,ts,subs=+macros] +---- +type AnalysisLanguage = 'Arabic' | 'Armenian' | 'Basque' | 'Brazilian' | 'Bulgarian' | 'Catalan' | 'Chinese' | 'Cjk' | 'Czech' | 'Danish' | 'Dutch' | 'English' | 'Estonian' | 'Finnish' | 'French' | 'Galician' | 'German' | 'Greek' | 'Hindi' | 'Hungarian' | 'Indonesian' | 'Irish' | 'Italian' | 'Latvian' | 'Norwegian' | 'Persian' | 'Portuguese' | 'Romanian' | 'Russian' | 'Sorani' | 'Spanish' | 'Swedish' | 'Turkish' | 'Thai' +---- + + + +[discrete] +[[AnalysisLanguageAnalyzer]] +=== AnalysisLanguageAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisLanguageAnalyzer { + type: 'language' + version?: <<VersionString>> + language: <<AnalysisLanguage>> + stem_exclusion: string[] + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string +} +---- + + + +[discrete] +[[AnalysisLatvianAnalyzer]] +=== AnalysisLatvianAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisLatvianAnalyzer { + type: 'latvian' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisLengthTokenFilter]] +=== AnalysisLengthTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisLengthTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'length' + max?: <<integer>> + min?: <<integer>> +} +---- + + + +[discrete] +[[AnalysisLetterTokenizer]] +=== AnalysisLetterTokenizer + +[source,ts,subs=+macros] +---- +interface AnalysisLetterTokenizer extends <<AnalysisTokenizerBase>> { + type: 'letter' +} +---- + + + +[discrete] +[[AnalysisLimitTokenCountTokenFilter]] +=== AnalysisLimitTokenCountTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisLimitTokenCountTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'limit' + consume_all_tokens?: boolean + max_token_count?: <<SpecUtilsStringified>><<<integer>>> +} +---- + + + +[discrete] +[[AnalysisLithuanianAnalyzer]] +=== AnalysisLithuanianAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisLithuanianAnalyzer { + type: 'lithuanian' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisLowercaseNormalizer]] +=== AnalysisLowercaseNormalizer + +[source,ts,subs=+macros] +---- +interface AnalysisLowercaseNormalizer { + type: 'lowercase' +} +---- + + + +[discrete] +[[AnalysisLowercaseTokenFilter]] +=== AnalysisLowercaseTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisLowercaseTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'lowercase' + language?: string +} +---- + + + +[discrete] +[[AnalysisLowercaseTokenizer]] +=== AnalysisLowercaseTokenizer + +[source,ts,subs=+macros] +---- +interface AnalysisLowercaseTokenizer extends <<AnalysisTokenizerBase>> { + type: 'lowercase' +} +---- + + + +[discrete] +[[AnalysisMappingCharFilter]] +=== AnalysisMappingCharFilter + +[source,ts,subs=+macros] +---- +interface AnalysisMappingCharFilter extends <<AnalysisCharFilterBase>> { + type: 'mapping' + mappings?: string[] + mappings_path?: string +} +---- + + + +[discrete] +[[AnalysisMultiplexerTokenFilter]] +=== AnalysisMultiplexerTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisMultiplexerTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'multiplexer' + filters: string[] + preserve_original?: <<SpecUtilsStringified>><boolean> +} +---- + + + +[discrete] +[[AnalysisNGramTokenFilter]] +=== AnalysisNGramTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisNGramTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'ngram' + max_gram?: <<integer>> + min_gram?: <<integer>> + preserve_original?: <<SpecUtilsStringified>><boolean> +} +---- + + + +[discrete] +[[AnalysisNGramTokenizer]] +=== AnalysisNGramTokenizer + +[source,ts,subs=+macros] +---- +interface AnalysisNGramTokenizer extends <<AnalysisTokenizerBase>> { + type: 'ngram' + custom_token_chars?: string + max_gram?: <<integer>> + min_gram?: <<integer>> + token_chars?: <<AnalysisTokenChar>>[] +} +---- + + + +[discrete] +[[AnalysisNoriAnalyzer]] +=== AnalysisNoriAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisNoriAnalyzer { + type: 'nori' + version?: <<VersionString>> + decompound_mode?: <<AnalysisNoriDecompoundMode>> + stoptags?: string[] + user_dictionary?: string +} +---- + + + +[discrete] +[[AnalysisNoriDecompoundMode]] +=== AnalysisNoriDecompoundMode + +[source,ts,subs=+macros] +---- +type AnalysisNoriDecompoundMode = 'discard' | 'none' | 'mixed' +---- + + + +[discrete] +[[AnalysisNoriPartOfSpeechTokenFilter]] +=== AnalysisNoriPartOfSpeechTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisNoriPartOfSpeechTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'nori_part_of_speech' + stoptags?: string[] +} +---- + + + +[discrete] +[[AnalysisNoriTokenizer]] +=== AnalysisNoriTokenizer + +[source,ts,subs=+macros] +---- +interface AnalysisNoriTokenizer extends <<AnalysisTokenizerBase>> { + type: 'nori_tokenizer' + decompound_mode?: <<AnalysisNoriDecompoundMode>> + discard_punctuation?: boolean + user_dictionary?: string + user_dictionary_rules?: string[] +} +---- + + + +[discrete] +[[AnalysisNormalizer]] +=== AnalysisNormalizer + +[source,ts,subs=+macros] +---- +type AnalysisNormalizer = <<AnalysisLowercaseNormalizer>> | <<AnalysisCustomNormalizer>> +---- + + + +[discrete] +[[AnalysisNorwegianAnalyzer]] +=== AnalysisNorwegianAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisNorwegianAnalyzer { + type: 'norwegian' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisPathHierarchyTokenizer]] +=== AnalysisPathHierarchyTokenizer + +[source,ts,subs=+macros] +---- +interface AnalysisPathHierarchyTokenizer extends <<AnalysisTokenizerBase>> { + type: 'path_hierarchy' + buffer_size?: <<SpecUtilsStringified>><<<integer>>> + delimiter?: string + replacement?: string + reverse?: <<SpecUtilsStringified>><boolean> + skip?: <<SpecUtilsStringified>><<<integer>>> +} +---- + + + +[discrete] +[[AnalysisPatternAnalyzer]] +=== AnalysisPatternAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisPatternAnalyzer { + type: 'pattern' + version?: <<VersionString>> + flags?: string + lowercase?: boolean + pattern: string + stopwords?: <<AnalysisStopWords>> +} +---- + + + +[discrete] +[[AnalysisPatternCaptureTokenFilter]] +=== AnalysisPatternCaptureTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisPatternCaptureTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'pattern_capture' + patterns: string[] + preserve_original?: <<SpecUtilsStringified>><boolean> +} +---- + + + +[discrete] +[[AnalysisPatternReplaceCharFilter]] +=== AnalysisPatternReplaceCharFilter + +[source,ts,subs=+macros] +---- +interface AnalysisPatternReplaceCharFilter extends <<AnalysisCharFilterBase>> { + type: 'pattern_replace' + flags?: string + pattern: string + replacement?: string +} +---- + + + +[discrete] +[[AnalysisPatternReplaceTokenFilter]] +=== AnalysisPatternReplaceTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisPatternReplaceTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'pattern_replace' + all?: boolean + flags?: string + pattern: string + replacement?: string +} +---- + + + +[discrete] +[[AnalysisPatternTokenizer]] +=== AnalysisPatternTokenizer + +[source,ts,subs=+macros] +---- +interface AnalysisPatternTokenizer extends <<AnalysisTokenizerBase>> { + type: 'pattern' + flags?: string + group?: <<integer>> + pattern?: string +} +---- + + + +[discrete] +[[AnalysisPersianAnalyzer]] +=== AnalysisPersianAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisPersianAnalyzer { + type: 'persian' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string +} +---- + + + +[discrete] +[[AnalysisPhoneticEncoder]] +=== AnalysisPhoneticEncoder + +[source,ts,subs=+macros] +---- +type AnalysisPhoneticEncoder = 'metaphone' | 'double_metaphone' | 'soundex' | 'refined_soundex' | 'caverphone1' | 'caverphone2' | 'cologne' | 'nysiis' | 'koelnerphonetik' | 'haasephonetik' | 'beider_morse' | 'daitch_mokotoff' +---- + + + +[discrete] +[[AnalysisPhoneticLanguage]] +=== AnalysisPhoneticLanguage + +[source,ts,subs=+macros] +---- +type AnalysisPhoneticLanguage = 'any' | 'common' | 'cyrillic' | 'english' | 'french' | 'german' | 'hebrew' | 'hungarian' | 'polish' | 'romanian' | 'russian' | 'spanish' +---- + + + +[discrete] +[[AnalysisPhoneticNameType]] +=== AnalysisPhoneticNameType + +[source,ts,subs=+macros] +---- +type AnalysisPhoneticNameType = 'generic' | 'ashkenazi' | 'sephardic' +---- + + + +[discrete] +[[AnalysisPhoneticRuleType]] +=== AnalysisPhoneticRuleType + +[source,ts,subs=+macros] +---- +type AnalysisPhoneticRuleType = 'approx' | 'exact' +---- + + + +[discrete] +[[AnalysisPhoneticTokenFilter]] +=== AnalysisPhoneticTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisPhoneticTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'phonetic' + encoder: <<AnalysisPhoneticEncoder>> + languageset?: <<AnalysisPhoneticLanguage>> | <<AnalysisPhoneticLanguage>>[] + max_code_len?: <<integer>> + name_type?: <<AnalysisPhoneticNameType>> + replace?: boolean + rule_type?: <<AnalysisPhoneticRuleType>> +} +---- + + + +[discrete] +[[AnalysisPorterStemTokenFilter]] +=== AnalysisPorterStemTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisPorterStemTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'porter_stem' +} +---- + + + +[discrete] +[[AnalysisPortugueseAnalyzer]] +=== AnalysisPortugueseAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisPortugueseAnalyzer { + type: 'portuguese' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisPredicateTokenFilter]] +=== AnalysisPredicateTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisPredicateTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'predicate_token_filter' + script: <<Script>> | string +} +---- + + + +[discrete] +[[AnalysisRemoveDuplicatesTokenFilter]] +=== AnalysisRemoveDuplicatesTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisRemoveDuplicatesTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'remove_duplicates' +} +---- + + + +[discrete] +[[AnalysisReverseTokenFilter]] +=== AnalysisReverseTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisReverseTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'reverse' +} +---- + + + +[discrete] +[[AnalysisRomanianAnalyzer]] +=== AnalysisRomanianAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisRomanianAnalyzer { + type: 'romanian' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisRussianAnalyzer]] +=== AnalysisRussianAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisRussianAnalyzer { + type: 'russian' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisSerbianAnalyzer]] +=== AnalysisSerbianAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisSerbianAnalyzer { + type: 'serbian' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisShingleTokenFilter]] +=== AnalysisShingleTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisShingleTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'shingle' + filler_token?: string + max_shingle_size?: <<integer>> | string + min_shingle_size?: <<integer>> | string + output_unigrams?: boolean + output_unigrams_if_no_shingles?: boolean + token_separator?: string +} +---- + + + +[discrete] +[[AnalysisSimpleAnalyzer]] +=== AnalysisSimpleAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisSimpleAnalyzer { + type: 'simple' + version?: <<VersionString>> +} +---- + + + +[discrete] +[[AnalysisSimplePatternSplitTokenizer]] +=== AnalysisSimplePatternSplitTokenizer + +[source,ts,subs=+macros] +---- +interface AnalysisSimplePatternSplitTokenizer extends <<AnalysisTokenizerBase>> { + type: 'simple_pattern_split' + pattern?: string +} +---- + + + +[discrete] +[[AnalysisSimplePatternTokenizer]] +=== AnalysisSimplePatternTokenizer + +[source,ts,subs=+macros] +---- +interface AnalysisSimplePatternTokenizer extends <<AnalysisTokenizerBase>> { + type: 'simple_pattern' + pattern?: string +} +---- + + + +[discrete] +[[AnalysisSnowballAnalyzer]] +=== AnalysisSnowballAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisSnowballAnalyzer { + type: 'snowball' + version?: <<VersionString>> + language: <<AnalysisSnowballLanguage>> + stopwords?: <<AnalysisStopWords>> +} +---- + + + +[discrete] +[[AnalysisSnowballLanguage]] +=== AnalysisSnowballLanguage + +[source,ts,subs=+macros] +---- +type AnalysisSnowballLanguage = 'Armenian' | 'Basque' | 'Catalan' | 'Danish' | 'Dutch' | 'English' | 'Finnish' | 'French' | 'German' | 'German2' | 'Hungarian' | 'Italian' | 'Kp' | 'Lovins' | 'Norwegian' | 'Porter' | 'Portuguese' | 'Romanian' | 'Russian' | 'Spanish' | 'Swedish' | 'Turkish' +---- + + + +[discrete] +[[AnalysisSnowballTokenFilter]] +=== AnalysisSnowballTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisSnowballTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'snowball' + language?: <<AnalysisSnowballLanguage>> +} +---- + + + +[discrete] +[[AnalysisSoraniAnalyzer]] +=== AnalysisSoraniAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisSoraniAnalyzer { + type: 'sorani' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisSpanishAnalyzer]] +=== AnalysisSpanishAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisSpanishAnalyzer { + type: 'spanish' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisStandardAnalyzer]] +=== AnalysisStandardAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisStandardAnalyzer { + type: 'standard' + max_token_length?: <<integer>> + stopwords?: <<AnalysisStopWords>> +} +---- + + + +[discrete] +[[AnalysisStandardTokenizer]] +=== AnalysisStandardTokenizer + +[source,ts,subs=+macros] +---- +interface AnalysisStandardTokenizer extends <<AnalysisTokenizerBase>> { + type: 'standard' + max_token_length?: <<integer>> +} +---- + + + +[discrete] +[[AnalysisStemmerOverrideTokenFilter]] +=== AnalysisStemmerOverrideTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisStemmerOverrideTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'stemmer_override' + rules?: string[] + rules_path?: string +} +---- + + + +[discrete] +[[AnalysisStemmerTokenFilter]] +=== AnalysisStemmerTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisStemmerTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'stemmer' + language?: string + name?: string +} +---- + + + +[discrete] +[[AnalysisStopAnalyzer]] +=== AnalysisStopAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisStopAnalyzer { + type: 'stop' + version?: <<VersionString>> + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string +} +---- + + + +[discrete] +[[AnalysisStopTokenFilter]] +=== AnalysisStopTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisStopTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'stop' + ignore_case?: boolean + remove_trailing?: boolean + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string +} +---- + + + +[discrete] +[[AnalysisStopWords]] +=== AnalysisStopWords + +[source,ts,subs=+macros] +---- +type AnalysisStopWords = string | string[] +---- + + + +[discrete] +[[AnalysisSwedishAnalyzer]] +=== AnalysisSwedishAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisSwedishAnalyzer { + type: 'swedish' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisSynonymFormat]] +=== AnalysisSynonymFormat + +[source,ts,subs=+macros] +---- +type AnalysisSynonymFormat = 'solr' | 'wordnet' +---- + + + +[discrete] +[[AnalysisSynonymGraphTokenFilter]] +=== AnalysisSynonymGraphTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisSynonymGraphTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'synonym_graph' + expand?: boolean + format?: <<AnalysisSynonymFormat>> + lenient?: boolean + synonyms?: string[] + synonyms_path?: string + synonyms_set?: string + tokenizer?: string + updateable?: boolean +} +---- + + + +[discrete] +[[AnalysisSynonymTokenFilter]] +=== AnalysisSynonymTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisSynonymTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'synonym' + expand?: boolean + format?: <<AnalysisSynonymFormat>> + lenient?: boolean + synonyms?: string[] + synonyms_path?: string + synonyms_set?: string + tokenizer?: string + updateable?: boolean +} +---- + + + +[discrete] +[[AnalysisThaiAnalyzer]] +=== AnalysisThaiAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisThaiAnalyzer { + type: 'thai' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string +} +---- + + + +[discrete] +[[AnalysisThaiTokenizer]] +=== AnalysisThaiTokenizer + +[source,ts,subs=+macros] +---- +interface AnalysisThaiTokenizer extends <<AnalysisTokenizerBase>> { + type: 'thai' +} +---- + + + +[discrete] +[[AnalysisTokenChar]] +=== AnalysisTokenChar + +[source,ts,subs=+macros] +---- +type AnalysisTokenChar = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol' | 'custom' +---- + + + +[discrete] +[[AnalysisTokenFilter]] +=== AnalysisTokenFilter + +[source,ts,subs=+macros] +---- +type AnalysisTokenFilter = string | <<AnalysisTokenFilterDefinition>> +---- + + + +[discrete] +[[AnalysisTokenFilterBase]] +=== AnalysisTokenFilterBase + +[source,ts,subs=+macros] +---- +interface AnalysisTokenFilterBase { + version?: <<VersionString>> +} +---- + + + +[discrete] +[[AnalysisTokenFilterDefinition]] +=== AnalysisTokenFilterDefinition + +[source,ts,subs=+macros] +---- +type AnalysisTokenFilterDefinition = <<AnalysisAsciiFoldingTokenFilter>> | <<AnalysisCommonGramsTokenFilter>> | <<AnalysisConditionTokenFilter>> | <<AnalysisDelimitedPayloadTokenFilter>> | <<AnalysisEdgeNGramTokenFilter>> | <<AnalysisElisionTokenFilter>> | <<AnalysisFingerprintTokenFilter>> | <<AnalysisHunspellTokenFilter>> | <<AnalysisHyphenationDecompounderTokenFilter>> | <<AnalysisKeepTypesTokenFilter>> | <<AnalysisKeepWordsTokenFilter>> | <<AnalysisKeywordMarkerTokenFilter>> | <<AnalysisKStemTokenFilter>> | <<AnalysisLengthTokenFilter>> | <<AnalysisLimitTokenCountTokenFilter>> | <<AnalysisLowercaseTokenFilter>> | <<AnalysisMultiplexerTokenFilter>> | <<AnalysisNGramTokenFilter>> | <<AnalysisNoriPartOfSpeechTokenFilter>> | <<AnalysisPatternCaptureTokenFilter>> | <<AnalysisPatternReplaceTokenFilter>> | <<AnalysisPorterStemTokenFilter>> | <<AnalysisPredicateTokenFilter>> | <<AnalysisRemoveDuplicatesTokenFilter>> | <<AnalysisReverseTokenFilter>> | <<AnalysisShingleTokenFilter>> | <<AnalysisSnowballTokenFilter>> | <<AnalysisStemmerOverrideTokenFilter>> | <<AnalysisStemmerTokenFilter>> | <<AnalysisStopTokenFilter>> | <<AnalysisSynonymGraphTokenFilter>> | <<AnalysisSynonymTokenFilter>> | <<AnalysisTrimTokenFilter>> | <<AnalysisTruncateTokenFilter>> | <<AnalysisUniqueTokenFilter>> | <<AnalysisUppercaseTokenFilter>> | <<AnalysisWordDelimiterGraphTokenFilter>> | <<AnalysisWordDelimiterTokenFilter>> | <<AnalysisKuromojiStemmerTokenFilter>> | <<AnalysisKuromojiReadingFormTokenFilter>> | <<AnalysisKuromojiPartOfSpeechTokenFilter>> | <<AnalysisIcuCollationTokenFilter>> | <<AnalysisIcuFoldingTokenFilter>> | <<AnalysisIcuNormalizationTokenFilter>> | <<AnalysisIcuTransformTokenFilter>> | <<AnalysisPhoneticTokenFilter>> | <<AnalysisDictionaryDecompounderTokenFilter>> +---- + + + +[discrete] +[[AnalysisTokenizer]] +=== AnalysisTokenizer + +[source,ts,subs=+macros] +---- +type AnalysisTokenizer = string | <<AnalysisTokenizerDefinition>> +---- + + + +[discrete] +[[AnalysisTokenizerBase]] +=== AnalysisTokenizerBase + +[source,ts,subs=+macros] +---- +interface AnalysisTokenizerBase { + version?: <<VersionString>> +} +---- + + + +[discrete] +[[AnalysisTokenizerDefinition]] +=== AnalysisTokenizerDefinition + +[source,ts,subs=+macros] +---- +type AnalysisTokenizerDefinition = <<AnalysisCharGroupTokenizer>> | <<AnalysisClassicTokenizer>> | <<AnalysisEdgeNGramTokenizer>> | <<AnalysisKeywordTokenizer>> | <<AnalysisLetterTokenizer>> | <<AnalysisLowercaseTokenizer>> | <<AnalysisNGramTokenizer>> | <<AnalysisPathHierarchyTokenizer>> | <<AnalysisPatternTokenizer>> | <<AnalysisSimplePatternTokenizer>> | <<AnalysisSimplePatternSplitTokenizer>> | <<AnalysisStandardTokenizer>> | <<AnalysisThaiTokenizer>> | <<AnalysisUaxEmailUrlTokenizer>> | <<AnalysisWhitespaceTokenizer>> | <<AnalysisIcuTokenizer>> | <<AnalysisKuromojiTokenizer>> | <<AnalysisNoriTokenizer>> +---- + + + +[discrete] +[[AnalysisTrimTokenFilter]] +=== AnalysisTrimTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisTrimTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'trim' +} +---- + + + +[discrete] +[[AnalysisTruncateTokenFilter]] +=== AnalysisTruncateTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisTruncateTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'truncate' + length?: <<integer>> +} +---- + + + +[discrete] +[[AnalysisTurkishAnalyzer]] +=== AnalysisTurkishAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisTurkishAnalyzer { + type: 'turkish' + stopwords?: <<AnalysisStopWords>> + stopwords_path?: string + stem_exclusion?: string[] +} +---- + + + +[discrete] +[[AnalysisUaxEmailUrlTokenizer]] +=== AnalysisUaxEmailUrlTokenizer + +[source,ts,subs=+macros] +---- +interface AnalysisUaxEmailUrlTokenizer extends <<AnalysisTokenizerBase>> { + type: 'uax_url_email' + max_token_length?: <<integer>> +} +---- + + + +[discrete] +[[AnalysisUniqueTokenFilter]] +=== AnalysisUniqueTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisUniqueTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'unique' + only_on_same_position?: boolean +} +---- + + + +[discrete] +[[AnalysisUppercaseTokenFilter]] +=== AnalysisUppercaseTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisUppercaseTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'uppercase' +} +---- + + + +[discrete] +[[AnalysisWhitespaceAnalyzer]] +=== AnalysisWhitespaceAnalyzer + +[source,ts,subs=+macros] +---- +interface AnalysisWhitespaceAnalyzer { + type: 'whitespace' + version?: <<VersionString>> +} +---- + + + +[discrete] +[[AnalysisWhitespaceTokenizer]] +=== AnalysisWhitespaceTokenizer + +[source,ts,subs=+macros] +---- +interface AnalysisWhitespaceTokenizer extends <<AnalysisTokenizerBase>> { + type: 'whitespace' + max_token_length?: <<integer>> +} +---- + + + +[discrete] +[[AnalysisWordDelimiterGraphTokenFilter]] +=== AnalysisWordDelimiterGraphTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisWordDelimiterGraphTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'word_delimiter_graph' + adjust_offsets?: boolean + catenate_all?: boolean + catenate_numbers?: boolean + catenate_words?: boolean + generate_number_parts?: boolean + generate_word_parts?: boolean + ignore_keywords?: boolean + preserve_original?: <<SpecUtilsStringified>><boolean> + protected_words?: string[] + protected_words_path?: string + split_on_case_change?: boolean + split_on_numerics?: boolean + stem_english_possessive?: boolean + type_table?: string[] + type_table_path?: string +} +---- + + + +[discrete] +[[AnalysisWordDelimiterTokenFilter]] +=== AnalysisWordDelimiterTokenFilter + +[source,ts,subs=+macros] +---- +interface AnalysisWordDelimiterTokenFilter extends <<AnalysisTokenFilterBase>> { + type: 'word_delimiter' + catenate_all?: boolean + catenate_numbers?: boolean + catenate_words?: boolean + generate_number_parts?: boolean + generate_word_parts?: boolean + preserve_original?: <<SpecUtilsStringified>><boolean> + protected_words?: string[] + protected_words_path?: string + split_on_case_change?: boolean + split_on_numerics?: boolean + stem_english_possessive?: boolean + type_table?: string[] + type_table_path?: string +} +---- + + + +[discrete] +[[MappingAggregateMetricDoubleProperty]] +=== MappingAggregateMetricDoubleProperty + +[source,ts,subs=+macros] +---- +interface MappingAggregateMetricDoubleProperty extends <<MappingPropertyBase>> { + type: 'aggregate_metric_double' + default_metric: string + metrics: string[] + time_series_metric?: <<MappingTimeSeriesMetricType>> +} +---- + + + +[discrete] +[[MappingAllField]] +=== MappingAllField + +[source,ts,subs=+macros] +---- +interface MappingAllField { + analyzer: string + enabled: boolean + omit_norms: boolean + search_analyzer: string + similarity: string + store: boolean + store_term_vector_offsets: boolean + store_term_vector_payloads: boolean + store_term_vector_positions: boolean + store_term_vectors: boolean +} +---- + + + +[discrete] +[[MappingBinaryProperty]] +=== MappingBinaryProperty + +[source,ts,subs=+macros] +---- +interface MappingBinaryProperty extends <<MappingDocValuesPropertyBase>> { + type: 'binary' +} +---- + + + +[discrete] +[[MappingBooleanProperty]] +=== MappingBooleanProperty + +[source,ts,subs=+macros] +---- +interface MappingBooleanProperty extends <<MappingDocValuesPropertyBase>> { + boost?: <<double>> + fielddata?: <<IndicesNumericFielddata>> + index?: boolean + null_value?: boolean + type: 'boolean' +} +---- + + + +[discrete] +[[MappingByteNumberProperty]] +=== MappingByteNumberProperty + +[source,ts,subs=+macros] +---- +interface MappingByteNumberProperty extends <<MappingNumberPropertyBase>> { + type: '<<byte>>' + null_value?: <<byte>> +} +---- + + + +[discrete] +[[MappingCompletionProperty]] +=== MappingCompletionProperty + +[source,ts,subs=+macros] +---- +interface MappingCompletionProperty extends <<MappingDocValuesPropertyBase>> { + analyzer?: string + contexts?: <<MappingSuggestContext>>[] + max_input_length?: <<integer>> + preserve_position_increments?: boolean + preserve_separators?: boolean + search_analyzer?: string + type: 'completion' +} +---- + + + +[discrete] +[[MappingCompositeSubField]] +=== MappingCompositeSubField + +[source,ts,subs=+macros] +---- +interface MappingCompositeSubField { + type: <<MappingRuntimeFieldType>> +} +---- + + + +[discrete] +[[MappingConstantKeywordProperty]] +=== MappingConstantKeywordProperty + +[source,ts,subs=+macros] +---- +interface MappingConstantKeywordProperty extends <<MappingPropertyBase>> { + value?: any + type: 'constant_keyword' +} +---- + + + +[discrete] +[[MappingCorePropertyBase]] +=== MappingCorePropertyBase + +[source,ts,subs=+macros] +---- +interface MappingCorePropertyBase extends <<MappingPropertyBase>> { + copy_to?: <<Fields>> + store?: boolean +} +---- + + + +[discrete] +[[MappingDataStreamTimestamp]] +=== MappingDataStreamTimestamp + +[source,ts,subs=+macros] +---- +interface MappingDataStreamTimestamp { + enabled: boolean +} +---- + + + +[discrete] +[[MappingDateNanosProperty]] +=== MappingDateNanosProperty + +[source,ts,subs=+macros] +---- +interface MappingDateNanosProperty extends <<MappingDocValuesPropertyBase>> { + boost?: <<double>> + format?: string + ignore_malformed?: boolean + index?: boolean + null_value?: <<DateTime>> + precision_step?: <<integer>> + type: 'date_nanos' +} +---- + + + +[discrete] +[[MappingDateProperty]] +=== MappingDateProperty + +[source,ts,subs=+macros] +---- +interface MappingDateProperty extends <<MappingDocValuesPropertyBase>> { + boost?: <<double>> + fielddata?: <<IndicesNumericFielddata>> + format?: string + ignore_malformed?: boolean + index?: boolean + null_value?: <<DateTime>> + precision_step?: <<integer>> + locale?: string + type: 'date' +} +---- + + + +[discrete] +[[MappingDateRangeProperty]] +=== MappingDateRangeProperty + +[source,ts,subs=+macros] +---- +interface MappingDateRangeProperty extends <<MappingRangePropertyBase>> { + format?: string + type: 'date_range' +} +---- + + + +[discrete] +[[MappingDenseVectorElementType]] +=== MappingDenseVectorElementType + +[source,ts,subs=+macros] +---- +type MappingDenseVectorElementType = 'bit' | '<<byte>>' | '<<float>>' +---- + + + +[discrete] +[[MappingDenseVectorIndexOptions]] +=== MappingDenseVectorIndexOptions + +[source,ts,subs=+macros] +---- +interface MappingDenseVectorIndexOptions { + pass:[/**] @property confidence_interval The confidence interval to use when quantizing the vectors. Can be any value between and including `0.90` and `1.0` or exactly `0`. When the value is `0`, this indicates that dynamic quantiles should be calculated for optimized quantization. When between `0.90` and `1.0`, this value restricts the values used when calculating the quantization thresholds. For example, a value of `0.95` will only use the middle `95%` of the values when calculating the quantization thresholds (e.g. the highest and lowest `2.5%` of values will be ignored). Defaults to `1/(dims + 1)` for `int8` quantized vectors and `0` for `int4` for dynamic quantile calculation. Only applicable to `int8_hnsw`, `int4_hnsw`, `int8_flat`, and `int4_flat` index types. */ + confidence_interval?: <<float>> + pass:[/**] @property ef_construction The number of candidates to track while assembling the list of nearest neighbors for each new node. Only applicable to `hnsw`, `int8_hnsw`, and `int4_hnsw` index types. */ + ef_construction?: <<integer>> + pass:[/**] @property m The number of neighbors each node will be connected to in the HNSW graph. Only applicable to `hnsw`, `int8_hnsw`, and `int4_hnsw` index types. */ + m?: <<integer>> + pass:[/**] @property type The type of kNN algorithm to use. */ + type: <<MappingDenseVectorIndexOptionsType>> +} +---- + + + +[discrete] +[[MappingDenseVectorIndexOptionsType]] +=== MappingDenseVectorIndexOptionsType + +[source,ts,subs=+macros] +---- +type MappingDenseVectorIndexOptionsType = 'flat' | 'hnsw' | 'int4_flat' | 'int4_hnsw' | 'int8_flat' | 'int8_hnsw' +---- + + + +[discrete] +[[MappingDenseVectorProperty]] +=== MappingDenseVectorProperty + +[source,ts,subs=+macros] +---- +interface MappingDenseVectorProperty extends <<MappingPropertyBase>> { + type: 'dense_vector' + pass:[/**] @property dims Number of vector dimensions. Can't exceed `4096`. If `dims` is not specified, it will be set to the length of the first vector added to the field. */ + dims?: <<integer>> + pass:[/**] @property element_type The data type used to encode vectors. The supported data types are `<<float>>` (default), `<<byte>>`, and `bit`. */ + element_type?: <<MappingDenseVectorElementType>> + pass:[/**] @property index If `true`, you can search this field using the kNN search API. */ + index?: boolean + pass:[/**] @property index_options An optional section that configures the kNN indexing algorithm. The HNSW algorithm has two internal parameters that influence how the data structure is built. These can be adjusted to improve the accuracy of results, at the expense of slower indexing speed. This parameter can only be specified when `index` is `true`. */ + index_options?: <<MappingDenseVectorIndexOptions>> + pass:[/**] @property similarity The vector similarity metric to use in kNN search. Documents are ranked by their vector field's similarity to the query vector. The `_score` of each document will be derived from the similarity, in a way that ensures scores are positive and that a larger score corresponds to a higher ranking. Defaults to `l2_norm` when `element_type` is `bit` otherwise defaults to `cosine`. `bit` vectors only support `l2_norm` as their similarity metric. This parameter can only be specified when `index` is `true`. */ + similarity?: <<MappingDenseVectorSimilarity>> +} +---- + + + +[discrete] +[[MappingDenseVectorSimilarity]] +=== MappingDenseVectorSimilarity + +[source,ts,subs=+macros] +---- +type MappingDenseVectorSimilarity = 'cosine' | 'dot_product' | 'l2_norm' | 'max_inner_product' +---- + + + +[discrete] +[[MappingDocValuesPropertyBase]] +=== MappingDocValuesPropertyBase + +[source,ts,subs=+macros] +---- +interface MappingDocValuesPropertyBase extends <<MappingCorePropertyBase>> { + doc_values?: boolean +} +---- + + + +[discrete] +[[MappingDoubleNumberProperty]] +=== MappingDoubleNumberProperty + +[source,ts,subs=+macros] +---- +interface MappingDoubleNumberProperty extends <<MappingNumberPropertyBase>> { + type: '<<double>>' + null_value?: <<double>> +} +---- + + + +[discrete] +[[MappingDoubleRangeProperty]] +=== MappingDoubleRangeProperty + +[source,ts,subs=+macros] +---- +interface MappingDoubleRangeProperty extends <<MappingRangePropertyBase>> { + type: 'double_range' +} +---- + + + +[discrete] +[[MappingDynamicMapping]] +=== MappingDynamicMapping + +[source,ts,subs=+macros] +---- +type MappingDynamicMapping = boolean | 'strict' | 'runtime' | 'true' | 'false' +---- + + + +[discrete] +[[MappingDynamicProperty]] +=== MappingDynamicProperty + +[source,ts,subs=+macros] +---- +interface MappingDynamicProperty extends <<MappingDocValuesPropertyBase>> { + type: '{dynamic_type}' + enabled?: boolean + null_value?: <<FieldValue>> + boost?: <<double>> + coerce?: boolean + script?: <<Script>> | string + on_script_error?: <<MappingOnScriptError>> + ignore_malformed?: boolean + time_series_metric?: <<MappingTimeSeriesMetricType>> + analyzer?: string + eager_global_ordinals?: boolean + index?: boolean + index_options?: <<MappingIndexOptions>> + index_phrases?: boolean + index_prefixes?: <<MappingTextIndexPrefixes>> | null + norms?: boolean + position_increment_gap?: <<integer>> + search_analyzer?: string + search_quote_analyzer?: string + term_vector?: <<MappingTermVectorOption>> + format?: string + precision_step?: <<integer>> + locale?: string +} +---- + + + +[discrete] +[[MappingDynamicTemplate]] +=== MappingDynamicTemplate + +[source,ts,subs=+macros] +---- +interface MappingDynamicTemplate { + mapping?: <<MappingProperty>> + runtime?: <<MappingProperty>> + match?: string | string[] + path_match?: string | string[] + unmatch?: string | string[] + path_unmatch?: string | string[] + match_mapping_type?: string | string[] + unmatch_mapping_type?: string | string[] + match_pattern?: <<MappingMatchType>> +} +---- + + + +[discrete] +[[MappingFieldAliasProperty]] +=== MappingFieldAliasProperty + +[source,ts,subs=+macros] +---- +interface MappingFieldAliasProperty extends <<MappingPropertyBase>> { + path?: <<Field>> + type: 'alias' +} +---- + + + +[discrete] +[[MappingFieldMapping]] +=== MappingFieldMapping + +[source,ts,subs=+macros] +---- +interface MappingFieldMapping { + full_name: string + mapping: Partial<Record<<<Field>>, <<MappingProperty>>>> +} +---- + + + +[discrete] +[[MappingFieldNamesField]] +=== MappingFieldNamesField + +[source,ts,subs=+macros] +---- +interface MappingFieldNamesField { + enabled: boolean +} +---- + + + +[discrete] +[[MappingFieldType]] +=== MappingFieldType + +[source,ts,subs=+macros] +---- +type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'version' | 'murmur3' | 'token_count' | 'percolator' | '<<integer>>' | '<<long>>' | '<<short>>' | '<<byte>>' | '<<float>>' | 'half_float' | 'scaled_float' | '<<double>>' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'semantic_text' | 'sparse_vector' | 'match_only_text' | 'icu_collation_keyword' +---- + + + +[discrete] +[[MappingFlattenedProperty]] +=== MappingFlattenedProperty + +[source,ts,subs=+macros] +---- +interface MappingFlattenedProperty extends <<MappingPropertyBase>> { + boost?: <<double>> + depth_limit?: <<integer>> + doc_values?: boolean + eager_global_ordinals?: boolean + index?: boolean + index_options?: <<MappingIndexOptions>> + null_value?: string + similarity?: string + split_queries_on_whitespace?: boolean + type: 'flattened' +} +---- + + + +[discrete] +[[MappingFloatNumberProperty]] +=== MappingFloatNumberProperty + +[source,ts,subs=+macros] +---- +interface MappingFloatNumberProperty extends <<MappingNumberPropertyBase>> { + type: '<<float>>' + null_value?: <<float>> +} +---- + + + +[discrete] +[[MappingFloatRangeProperty]] +=== MappingFloatRangeProperty + +[source,ts,subs=+macros] +---- +interface MappingFloatRangeProperty extends <<MappingRangePropertyBase>> { + type: 'float_range' +} +---- + + + +[discrete] +[[MappingGeoOrientation]] +=== MappingGeoOrientation + +[source,ts,subs=+macros] +---- +type MappingGeoOrientation = 'right' | 'RIGHT' | 'counterclockwise' | 'ccw' | 'left' | 'LEFT' | 'clockwise' | 'cw' +---- + + + +[discrete] +[[MappingGeoPointProperty]] +=== MappingGeoPointProperty + +[source,ts,subs=+macros] +---- +interface MappingGeoPointProperty extends <<MappingDocValuesPropertyBase>> { + ignore_malformed?: boolean + ignore_z_value?: boolean + null_value?: <<GeoLocation>> + index?: boolean + on_script_error?: <<MappingOnScriptError>> + script?: <<Script>> | string + type: 'geo_point' +} +---- + + + +[discrete] +[[MappingGeoShapeProperty]] +=== MappingGeoShapeProperty + +[source,ts,subs=+macros] +---- +interface MappingGeoShapeProperty extends <<MappingDocValuesPropertyBase>> { + coerce?: boolean + ignore_malformed?: boolean + ignore_z_value?: boolean + orientation?: <<MappingGeoOrientation>> + strategy?: <<MappingGeoStrategy>> + type: 'geo_shape' +} +---- + + + +[discrete] +[[MappingGeoStrategy]] +=== MappingGeoStrategy + +[source,ts,subs=+macros] +---- +type MappingGeoStrategy = 'recursive' | 'term' +---- + + + +[discrete] +[[MappingHalfFloatNumberProperty]] +=== MappingHalfFloatNumberProperty + +[source,ts,subs=+macros] +---- +interface MappingHalfFloatNumberProperty extends <<MappingNumberPropertyBase>> { + type: 'half_float' + null_value?: <<float>> +} +---- + + + +[discrete] +[[MappingHistogramProperty]] +=== MappingHistogramProperty + +[source,ts,subs=+macros] +---- +interface MappingHistogramProperty extends <<MappingPropertyBase>> { + ignore_malformed?: boolean + type: 'histogram' +} +---- + + + +[discrete] +[[MappingIcuCollationProperty]] +=== MappingIcuCollationProperty + +[source,ts,subs=+macros] +---- +interface MappingIcuCollationProperty extends <<MappingDocValuesPropertyBase>> { + type: 'icu_collation_keyword' + norms?: boolean + index_options?: <<MappingIndexOptions>> + pass:[/**] @property index Should the field be searchable? */ + index?: boolean + pass:[/**] @property null_value Accepts a string value which is substituted for any explicit null values. Defaults to null, which means the field is treated as missing. */ + null_value?: string + rules?: string + language?: string + country?: string + variant?: string + strength?: <<AnalysisIcuCollationStrength>> + decomposition?: <<AnalysisIcuCollationDecomposition>> + alternate?: <<AnalysisIcuCollationAlternate>> + case_level?: boolean + case_first?: <<AnalysisIcuCollationCaseFirst>> + numeric?: boolean + variable_top?: string + hiragana_quaternary_mode?: boolean +} +---- + + + +[discrete] +[[MappingIndexField]] +=== MappingIndexField + +[source,ts,subs=+macros] +---- +interface MappingIndexField { + enabled: boolean +} +---- + + + +[discrete] +[[MappingIndexOptions]] +=== MappingIndexOptions + +[source,ts,subs=+macros] +---- +type MappingIndexOptions = 'docs' | 'freqs' | 'positions' | 'offsets' +---- + + + +[discrete] +[[MappingIntegerNumberProperty]] +=== MappingIntegerNumberProperty + +[source,ts,subs=+macros] +---- +interface MappingIntegerNumberProperty extends <<MappingNumberPropertyBase>> { + type: '<<integer>>' + null_value?: <<integer>> +} +---- + + + +[discrete] +[[MappingIntegerRangeProperty]] +=== MappingIntegerRangeProperty + +[source,ts,subs=+macros] +---- +interface MappingIntegerRangeProperty extends <<MappingRangePropertyBase>> { + type: 'integer_range' +} +---- + + + +[discrete] +[[MappingIpProperty]] +=== MappingIpProperty + +[source,ts,subs=+macros] +---- +interface MappingIpProperty extends <<MappingDocValuesPropertyBase>> { + boost?: <<double>> + index?: boolean + ignore_malformed?: boolean + null_value?: string + on_script_error?: <<MappingOnScriptError>> + script?: <<Script>> | string + pass:[/**] @property time_series_dimension For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. */ + time_series_dimension?: boolean + type: 'ip' +} +---- + + + +[discrete] +[[MappingIpRangeProperty]] +=== MappingIpRangeProperty + +[source,ts,subs=+macros] +---- +interface MappingIpRangeProperty extends <<MappingRangePropertyBase>> { + type: 'ip_range' +} +---- + + + +[discrete] +[[MappingJoinProperty]] +=== MappingJoinProperty + +[source,ts,subs=+macros] +---- +interface MappingJoinProperty extends <<MappingPropertyBase>> { + relations?: Record<<<RelationName>>, <<RelationName>> | <<RelationName>>[]> + eager_global_ordinals?: boolean + type: 'join' +} +---- + + + +[discrete] +[[MappingKeywordProperty]] +=== MappingKeywordProperty + +[source,ts,subs=+macros] +---- +interface MappingKeywordProperty extends <<MappingDocValuesPropertyBase>> { + boost?: <<double>> + eager_global_ordinals?: boolean + index?: boolean + index_options?: <<MappingIndexOptions>> + script?: <<Script>> | string + on_script_error?: <<MappingOnScriptError>> + normalizer?: string + norms?: boolean + null_value?: string + similarity?: string | null + split_queries_on_whitespace?: boolean + pass:[/**] @property time_series_dimension For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. */ + time_series_dimension?: boolean + type: 'keyword' +} +---- + + + +[discrete] +[[MappingLongNumberProperty]] +=== MappingLongNumberProperty + +[source,ts,subs=+macros] +---- +interface MappingLongNumberProperty extends <<MappingNumberPropertyBase>> { + type: '<<long>>' + null_value?: <<long>> +} +---- + + + +[discrete] +[[MappingLongRangeProperty]] +=== MappingLongRangeProperty + +[source,ts,subs=+macros] +---- +interface MappingLongRangeProperty extends <<MappingRangePropertyBase>> { + type: 'long_range' +} +---- + + + +[discrete] +[[MappingMatchOnlyTextProperty]] +=== MappingMatchOnlyTextProperty + +[source,ts,subs=+macros] +---- +interface MappingMatchOnlyTextProperty { + type: 'match_only_text' + pass:[/**] @property fields Multi-fields allow the same string value to be indexed in multiple ways for different purposes, such as one field for search and a multi-field for sorting and aggregations, or the same string value analyzed by different analyzers. */ + fields?: Record<<<PropertyName>>, <<MappingProperty>>> + pass:[/**] @property meta <<Metadata>> about the field. */ + meta?: Record<string, string> + pass:[/**] @property copy_to Allows you to copy the values of multiple fields into a group field, which can then be queried as a single field. */ + copy_to?: <<Fields>> +} +---- + + + +[discrete] +[[MappingMatchType]] +=== MappingMatchType + +[source,ts,subs=+macros] +---- +type MappingMatchType = 'simple' | 'regex' +---- + + + +[discrete] +[[MappingMurmur3HashProperty]] +=== MappingMurmur3HashProperty + +[source,ts,subs=+macros] +---- +interface MappingMurmur3HashProperty extends <<MappingDocValuesPropertyBase>> { + type: 'murmur3' +} +---- + + + +[discrete] +[[MappingNestedProperty]] +=== MappingNestedProperty + +[source,ts,subs=+macros] +---- +interface MappingNestedProperty extends <<MappingCorePropertyBase>> { + enabled?: boolean + include_in_parent?: boolean + include_in_root?: boolean + type: 'nested' +} +---- + + + +[discrete] +[[MappingNumberPropertyBase]] +=== MappingNumberPropertyBase + +[source,ts,subs=+macros] +---- +interface MappingNumberPropertyBase extends <<MappingDocValuesPropertyBase>> { + boost?: <<double>> + coerce?: boolean + ignore_malformed?: boolean + index?: boolean + on_script_error?: <<MappingOnScriptError>> + script?: <<Script>> | string + pass:[/**] @property time_series_metric For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. */ + time_series_metric?: <<MappingTimeSeriesMetricType>> + pass:[/**] @property time_series_dimension For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. */ + time_series_dimension?: boolean +} +---- + + + +[discrete] +[[MappingObjectProperty]] +=== MappingObjectProperty + +[source,ts,subs=+macros] +---- +interface MappingObjectProperty extends <<MappingCorePropertyBase>> { + enabled?: boolean + subobjects?: boolean + type?: 'object' +} +---- + + + +[discrete] +[[MappingOnScriptError]] +=== MappingOnScriptError + +[source,ts,subs=+macros] +---- +type MappingOnScriptError = 'fail' | 'continue' +---- + + + +[discrete] +[[MappingPercolatorProperty]] +=== MappingPercolatorProperty + +[source,ts,subs=+macros] +---- +interface MappingPercolatorProperty extends <<MappingPropertyBase>> { + type: 'percolator' +} +---- + + + +[discrete] +[[MappingPointProperty]] +=== MappingPointProperty + +[source,ts,subs=+macros] +---- +interface MappingPointProperty extends <<MappingDocValuesPropertyBase>> { + ignore_malformed?: boolean + ignore_z_value?: boolean + null_value?: string + type: 'point' +} +---- + + + +[discrete] +[[MappingProperty]] +=== MappingProperty + +[source,ts,subs=+macros] +---- +type MappingProperty = <<MappingBinaryProperty>> | <<MappingBooleanProperty>> | <<MappingDynamicProperty>> | <<MappingJoinProperty>> | <<MappingKeywordProperty>> | <<MappingMatchOnlyTextProperty>> | <<MappingPercolatorProperty>> | <<MappingRankFeatureProperty>> | <<MappingRankFeaturesProperty>> | <<MappingSearchAsYouTypeProperty>> | <<MappingTextProperty>> | <<MappingVersionProperty>> | <<MappingWildcardProperty>> | <<MappingDateNanosProperty>> | <<MappingDateProperty>> | <<MappingAggregateMetricDoubleProperty>> | <<MappingDenseVectorProperty>> | <<MappingFlattenedProperty>> | <<MappingNestedProperty>> | <<MappingObjectProperty>> | <<MappingSemanticTextProperty>> | <<MappingSparseVectorProperty>> | <<MappingCompletionProperty>> | <<MappingConstantKeywordProperty>> | <<MappingFieldAliasProperty>> | <<MappingHistogramProperty>> | <<MappingIpProperty>> | MappingMurmur3HashProperty | <<MappingTokenCountProperty>> | <<MappingGeoPointProperty>> | <<MappingGeoShapeProperty>> | <<MappingPointProperty>> | <<MappingShapeProperty>> | <<MappingByteNumberProperty>> | <<MappingDoubleNumberProperty>> | <<MappingFloatNumberProperty>> | <<MappingHalfFloatNumberProperty>> | <<MappingIntegerNumberProperty>> | <<MappingLongNumberProperty>> | <<MappingScaledFloatNumberProperty>> | <<MappingShortNumberProperty>> | <<MappingUnsignedLongNumberProperty>> | <<MappingDateRangeProperty>> | <<MappingDoubleRangeProperty>> | <<MappingFloatRangeProperty>> | <<MappingIntegerRangeProperty>> | <<MappingIpRangeProperty>> | <<MappingLongRangeProperty>> | <<MappingIcuCollationProperty>> +---- + + + +[discrete] +[[MappingPropertyBase]] +=== MappingPropertyBase + +[source,ts,subs=+macros] +---- +interface MappingPropertyBase { + pass:[/**] @property meta <<Metadata>> about the field. */ + meta?: Record<string, string> + properties?: Record<<<PropertyName>>, <<MappingProperty>>> + ignore_above?: <<integer>> + dynamic?: <<MappingDynamicMapping>> + fields?: Record<<<PropertyName>>, <<MappingProperty>>> +} +---- + + + +[discrete] +[[MappingRangePropertyBase]] +=== MappingRangePropertyBase + +[source,ts,subs=+macros] +---- +interface MappingRangePropertyBase extends <<MappingDocValuesPropertyBase>> { + boost?: <<double>> + coerce?: boolean + index?: boolean +} +---- + + + +[discrete] +[[MappingRankFeatureProperty]] +=== MappingRankFeatureProperty + +[source,ts,subs=+macros] +---- +interface MappingRankFeatureProperty extends <<MappingPropertyBase>> { + positive_score_impact?: boolean + type: 'rank_feature' +} +---- + + + +[discrete] +[[MappingRankFeaturesProperty]] +=== MappingRankFeaturesProperty + +[source,ts,subs=+macros] +---- +interface MappingRankFeaturesProperty extends <<MappingPropertyBase>> { + positive_score_impact?: boolean + type: 'rank_features' +} +---- + + + +[discrete] +[[MappingRoutingField]] +=== MappingRoutingField + +[source,ts,subs=+macros] +---- +interface MappingRoutingField { + required: boolean +} +---- + + + +[discrete] +[[MappingRuntimeField]] +=== MappingRuntimeField + +[source,ts,subs=+macros] +---- +interface MappingRuntimeField { + pass:[/**] @property fields For type `composite` */ + fields?: Record<string, <<MappingCompositeSubField>>> + pass:[/**] @property fetch_fields For type `lookup` */ + fetch_fields?: (<<MappingRuntimeFieldFetchFields>> | <<Field>>)[] + pass:[/**] @property format A custom format for `date` type runtime fields. */ + format?: string + pass:[/**] @property input_field For type `lookup` */ + input_field?: <<Field>> + pass:[/**] @property target_field For type `lookup` */ + target_field?: <<Field>> + pass:[/**] @property target_index For type `lookup` */ + target_index?: <<IndexName>> + pass:[/**] @property script Painless script executed at query time. */ + script?: <<Script>> | string + pass:[/**] @property type <<Field>> type, which can be: `boolean`, `composite`, `date`, `<<double>>`, `geo_point`, `ip`,`keyword`, `<<long>>`, or `lookup`. */ + type: <<MappingRuntimeFieldType>> +} +---- + + + +[discrete] +[[MappingRuntimeFieldFetchFields]] +=== MappingRuntimeFieldFetchFields + +[source,ts,subs=+macros] +---- +interface MappingRuntimeFieldFetchFields { + field: <<Field>> + format?: string +} +---- + + + +[discrete] +[[MappingRuntimeFieldType]] +=== MappingRuntimeFieldType + +[source,ts,subs=+macros] +---- +type MappingRuntimeFieldType = 'boolean' | 'composite' | 'date' | '<<double>>' | 'geo_point' | 'ip' | 'keyword' | '<<long>>' | 'lookup' +---- + + + +[discrete] +[[MappingRuntimeFields]] +=== MappingRuntimeFields + +[source,ts,subs=+macros] +---- +type MappingRuntimeFields = Record<<<Field>>, <<MappingRuntimeField>>> +---- + + + +[discrete] +[[MappingScaledFloatNumberProperty]] +=== MappingScaledFloatNumberProperty + +[source,ts,subs=+macros] +---- +interface MappingScaledFloatNumberProperty extends <<MappingNumberPropertyBase>> { + type: 'scaled_float' + null_value?: <<double>> + scaling_factor?: <<double>> +} +---- + + + +[discrete] +[[MappingSearchAsYouTypeProperty]] +=== MappingSearchAsYouTypeProperty + +[source,ts,subs=+macros] +---- +interface MappingSearchAsYouTypeProperty extends <<MappingCorePropertyBase>> { + analyzer?: string + index?: boolean + index_options?: <<MappingIndexOptions>> + max_shingle_size?: <<integer>> + norms?: boolean + search_analyzer?: string + search_quote_analyzer?: string + similarity?: string | null + term_vector?: <<MappingTermVectorOption>> + type: 'search_as_you_type' +} +---- + + + +[discrete] +[[MappingSemanticTextProperty]] +=== MappingSemanticTextProperty + +[source,ts,subs=+macros] +---- +interface MappingSemanticTextProperty { + type: 'semantic_text' + meta?: Record<string, string> + inference_id: <<Id>> +} +---- + + + +[discrete] +[[MappingShapeProperty]] +=== MappingShapeProperty + +[source,ts,subs=+macros] +---- +interface MappingShapeProperty extends <<MappingDocValuesPropertyBase>> { + coerce?: boolean + ignore_malformed?: boolean + ignore_z_value?: boolean + orientation?: <<MappingGeoOrientation>> + type: 'shape' +} +---- + + + +[discrete] +[[MappingShortNumberProperty]] +=== MappingShortNumberProperty + +[source,ts,subs=+macros] +---- +interface MappingShortNumberProperty extends <<MappingNumberPropertyBase>> { + type: '<<short>>' + null_value?: <<short>> +} +---- + + + +[discrete] +[[MappingSizeField]] +=== MappingSizeField + +[source,ts,subs=+macros] +---- +interface MappingSizeField { + enabled: boolean +} +---- + + + +[discrete] +[[MappingSourceField]] +=== MappingSourceField + +[source,ts,subs=+macros] +---- +interface MappingSourceField { + compress?: boolean + compress_threshold?: string + enabled?: boolean + excludes?: string[] + includes?: string[] + mode?: <<MappingSourceFieldMode>> +} +---- + + + +[discrete] +[[MappingSourceFieldMode]] +=== MappingSourceFieldMode + +[source,ts,subs=+macros] +---- +type MappingSourceFieldMode = 'disabled' | 'stored' | 'synthetic' +---- + + + +[discrete] +[[MappingSparseVectorProperty]] +=== MappingSparseVectorProperty + +[source,ts,subs=+macros] +---- +interface MappingSparseVectorProperty extends <<MappingPropertyBase>> { + type: 'sparse_vector' +} +---- + + + +[discrete] +[[MappingSuggestContext]] +=== MappingSuggestContext + +[source,ts,subs=+macros] +---- +interface MappingSuggestContext { + name: <<Name>> + path?: <<Field>> + type: string + precision?: <<integer>> | string +} +---- + + + +[discrete] +[[MappingTermVectorOption]] +=== MappingTermVectorOption + +[source,ts,subs=+macros] +---- +type MappingTermVectorOption = 'no' | 'yes' | 'with_offsets' | 'with_positions' | 'with_positions_offsets' | 'with_positions_offsets_payloads' | 'with_positions_payloads' +---- + + + +[discrete] +[[MappingTextIndexPrefixes]] +=== MappingTextIndexPrefixes + +[source,ts,subs=+macros] +---- +interface MappingTextIndexPrefixes { + max_chars: <<integer>> + min_chars: <<integer>> +} +---- + + + +[discrete] +[[MappingTextProperty]] +=== MappingTextProperty + +[source,ts,subs=+macros] +---- +interface MappingTextProperty extends <<MappingCorePropertyBase>> { + analyzer?: string + boost?: <<double>> + eager_global_ordinals?: boolean + fielddata?: boolean + fielddata_frequency_filter?: <<IndicesFielddataFrequencyFilter>> + index?: boolean + index_options?: <<MappingIndexOptions>> + index_phrases?: boolean + index_prefixes?: <<MappingTextIndexPrefixes>> | null + norms?: boolean + position_increment_gap?: <<integer>> + search_analyzer?: string + search_quote_analyzer?: string + similarity?: string | null + term_vector?: <<MappingTermVectorOption>> + type: 'text' +} +---- + + + +[discrete] +[[MappingTimeSeriesMetricType]] +=== MappingTimeSeriesMetricType + +[source,ts,subs=+macros] +---- +type MappingTimeSeriesMetricType = 'gauge' | 'counter' | 'summary' | 'histogram' | 'position' +---- + + + +[discrete] +[[MappingTokenCountProperty]] +=== MappingTokenCountProperty + +[source,ts,subs=+macros] +---- +interface MappingTokenCountProperty extends <<MappingDocValuesPropertyBase>> { + analyzer?: string + boost?: <<double>> + index?: boolean + null_value?: <<double>> + enable_position_increments?: boolean + type: 'token_count' +} +---- + + + +[discrete] +[[MappingTypeMapping]] +=== MappingTypeMapping + +[source,ts,subs=+macros] +---- +interface MappingTypeMapping { + all_field?: <<MappingAllField>> + date_detection?: boolean + dynamic?: <<MappingDynamicMapping>> + dynamic_date_formats?: string[] + dynamic_templates?: Record<string, <<MappingDynamicTemplate>>>[] + _field_names?: <<MappingFieldNamesField>> + index_field?: <<MappingIndexField>> + _meta?: <<Metadata>> + numeric_detection?: boolean + properties?: Record<<<PropertyName>>, <<MappingProperty>>> + _routing?: <<MappingRoutingField>> + _size?: <<MappingSizeField>> + _source?: <<MappingSourceField>> + runtime?: Record<string, <<MappingRuntimeField>>> + enabled?: boolean + subobjects?: boolean + _data_stream_timestamp?: <<MappingDataStreamTimestamp>> +} +---- + + + +[discrete] +[[MappingUnsignedLongNumberProperty]] +=== MappingUnsignedLongNumberProperty + +[source,ts,subs=+macros] +---- +interface MappingUnsignedLongNumberProperty extends <<MappingNumberPropertyBase>> { + type: 'unsigned_long' + null_value?: <<ulong>> +} +---- + + + +[discrete] +[[MappingVersionProperty]] +=== MappingVersionProperty + +[source,ts,subs=+macros] +---- +interface MappingVersionProperty extends <<MappingDocValuesPropertyBase>> { + type: 'version' +} +---- + + + +[discrete] +[[MappingWildcardProperty]] +=== MappingWildcardProperty + +[source,ts,subs=+macros] +---- +interface MappingWildcardProperty extends <<MappingDocValuesPropertyBase>> { + type: 'wildcard' + null_value?: string +} +---- + + + +[discrete] +[[QueryDslBoolQuery]] +=== QueryDslBoolQuery + +[source,ts,subs=+macros] +---- +interface QueryDslBoolQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property filter The clause (query) must appear in matching documents. However, unlike `must`, the score of the query will be ignored. */ + filter?: <<QueryDslQueryContainer>> | <<QueryDslQueryContainer>>[] + pass:[/**] @property minimum_should_match Specifies the number or percentage of `should` clauses returned documents must match. */ + minimum_should_match?: <<MinimumShouldMatch>> + pass:[/**] @property must The clause (query) must appear in matching documents and will contribute to the score. */ + must?: <<QueryDslQueryContainer>> | <<QueryDslQueryContainer>>[] + pass:[/**] @property must_not The clause (query) must not appear in the matching documents. Because scoring is ignored, a score of `0` is returned for all documents. */ + must_not?: <<QueryDslQueryContainer>> | <<QueryDslQueryContainer>>[] + pass:[/**] @property should The clause (query) should appear in the matching document. */ + should?: <<QueryDslQueryContainer>> | <<QueryDslQueryContainer>>[] +} +---- + + + +[discrete] +[[QueryDslBoostingQuery]] +=== QueryDslBoostingQuery + +[source,ts,subs=+macros] +---- +interface QueryDslBoostingQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property negative_boost Floating point number between 0 and 1.0 used to decrease the relevance scores of documents matching the `negative` query. */ + negative_boost: <<double>> + pass:[/**] @property negative Query used to decrease the relevance score of matching documents. */ + negative: <<QueryDslQueryContainer>> + pass:[/**] @property positive Any returned documents must match this query. */ + positive: <<QueryDslQueryContainer>> +} +---- + + + +[discrete] +[[QueryDslChildScoreMode]] +=== QueryDslChildScoreMode + +[source,ts,subs=+macros] +---- +type QueryDslChildScoreMode = 'none' | 'avg' | 'sum' | 'max' | 'min' +---- + + + +[discrete] +[[QueryDslCombinedFieldsOperator]] +=== QueryDslCombinedFieldsOperator + +[source,ts,subs=+macros] +---- +type QueryDslCombinedFieldsOperator = 'or' | 'and' +---- + + + +[discrete] +[[QueryDslCombinedFieldsQuery]] +=== QueryDslCombinedFieldsQuery + +[source,ts,subs=+macros] +---- +interface QueryDslCombinedFieldsQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property fields List of fields to search. <<Field>> wildcard patterns are allowed. Only `text` fields are supported, and they must all have the same search `analyzer`. */ + fields: <<Field>>[] + pass:[/**] @property query Text to search for in the provided `fields`. The `combined_fields` query analyzes the provided text before performing a search. */ + query: string + pass:[/**] @property auto_generate_synonyms_phrase_query If true, match phrase queries are automatically created for multi-term synonyms. */ + auto_generate_synonyms_phrase_query?: boolean + pass:[/**] @property operator Boolean logic used to interpret text in the query value. */ + operator?: <<QueryDslCombinedFieldsOperator>> + pass:[/**] @property minimum_should_match Minimum number of clauses that must match for a document to be returned. */ + minimum_should_match?: <<MinimumShouldMatch>> + pass:[/**] @property zero_terms_query Indicates whether no documents are returned if the analyzer removes all tokens, such as when using a `stop` filter. */ + zero_terms_query?: <<QueryDslCombinedFieldsZeroTerms>> +} +---- + + + +[discrete] +[[QueryDslCombinedFieldsZeroTerms]] +=== QueryDslCombinedFieldsZeroTerms + +[source,ts,subs=+macros] +---- +type QueryDslCombinedFieldsZeroTerms = 'none' | 'all' +---- + + + +[discrete] +[[QueryDslCommonTermsQuery]] +=== QueryDslCommonTermsQuery + +[source,ts,subs=+macros] +---- +interface QueryDslCommonTermsQuery extends <<QueryDslQueryBase>> { + analyzer?: string + cutoff_frequency?: <<double>> + high_freq_operator?: <<QueryDslOperator>> + low_freq_operator?: <<QueryDslOperator>> + minimum_should_match?: <<MinimumShouldMatch>> + query: string +} +---- + + + +[discrete] +[[QueryDslConstantScoreQuery]] +=== QueryDslConstantScoreQuery + +[source,ts,subs=+macros] +---- +interface QueryDslConstantScoreQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property filter Filter query you wish to run. Any returned documents must match this query. Filter queries do not calculate relevance scores. To speed up performance, Elasticsearch automatically caches frequently used filter queries. */ + filter: <<QueryDslQueryContainer>> +} +---- + + + +[discrete] +[[QueryDslDateDecayFunction]] +=== QueryDslDateDecayFunction + +[source,ts,subs=+macros] +---- +interface QueryDslDateDecayFunctionKeys extends <<QueryDslDecayFunctionBase>><<<DateMath>>, <<Duration>>> {} +type QueryDslDateDecayFunction = QueryDslDateDecayFunctionKeys + & { [property: string]: <<QueryDslDecayPlacement>> | <<QueryDslMultiValueMode>> } +---- + + + +[discrete] +[[QueryDslDateDistanceFeatureQuery]] +=== QueryDslDateDistanceFeatureQuery + +[source,ts,subs=+macros] +---- +interface QueryDslDateDistanceFeatureQuery extends <<QueryDslDistanceFeatureQueryBase>><<<DateMath>>, <<Duration>>> {} +---- + + + +[discrete] +[[QueryDslDateRangeQuery]] +=== QueryDslDateRangeQuery + +[source,ts,subs=+macros] +---- +interface QueryDslDateRangeQuery extends <<QueryDslRangeQueryBase>><<<DateMath>>> { + pass:[/**] @property format Date format used to convert `date` values in the query. */ + format?: <<DateFormat>> + pass:[/**] @property time_zone Coordinated Universal Time (UTC) offset or IANA time zone used to convert `date` values in the query to UTC. */ + time_zone?: <<TimeZone>> +} +---- + + + +[discrete] +[[QueryDslDecayFunction]] +=== QueryDslDecayFunction + +[source,ts,subs=+macros] +---- +type QueryDslDecayFunction = <<QueryDslUntypedDecayFunction>> | <<QueryDslDateDecayFunction>> | <<QueryDslNumericDecayFunction>> | <<QueryDslGeoDecayFunction>> +---- + + + +[discrete] +[[QueryDslDecayFunctionBase]] +=== QueryDslDecayFunctionBase + +[source,ts,subs=+macros] +---- +interface QueryDslDecayFunctionBase<TOrigin = unknown, TScale = unknown> { + pass:[/**] @property multi_value_mode Determines how the distance is calculated when a field used for computing the decay contains multiple values. */ + multi_value_mode?: <<QueryDslMultiValueMode>> +} +---- + + + +[discrete] +[[QueryDslDecayPlacement]] +=== QueryDslDecayPlacement + +[source,ts,subs=+macros] +---- +interface QueryDslDecayPlacement<TOrigin = unknown, TScale = unknown> { + pass:[/**] @property decay Defines how documents are scored at the distance given at scale. */ + decay?: <<double>> + pass:[/**] @property offset If defined, the decay function will only compute the decay function for documents with a distance greater than the defined `offset`. */ + offset?: TScale + pass:[/**] @property scale Defines the distance from origin + offset at which the computed score will equal `decay` parameter. */ + scale?: TScale + pass:[/**] @property origin The point of origin used for calculating distance. Must be given as a number for numeric field, date for date fields and geo point for geo fields. */ + origin?: TOrigin +} +---- + + + +[discrete] +[[QueryDslDisMaxQuery]] +=== QueryDslDisMaxQuery + +[source,ts,subs=+macros] +---- +interface QueryDslDisMaxQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property queries One or more query clauses. Returned documents must match one or more of these queries. If a document matches multiple queries, Elasticsearch uses the highest relevance score. */ + queries: <<QueryDslQueryContainer>>[] + pass:[/**] @property tie_breaker Floating point number between 0 and 1.0 used to increase the relevance scores of documents matching multiple query clauses. */ + tie_breaker?: <<double>> +} +---- + + + +[discrete] +[[QueryDslDistanceFeatureQuery]] +=== QueryDslDistanceFeatureQuery + +[source,ts,subs=+macros] +---- +type QueryDslDistanceFeatureQuery = <<QueryDslUntypedDistanceFeatureQuery>> | <<QueryDslGeoDistanceFeatureQuery>> | <<QueryDslDateDistanceFeatureQuery>> +---- + + + +[discrete] +[[QueryDslDistanceFeatureQueryBase]] +=== QueryDslDistanceFeatureQueryBase + +[source,ts,subs=+macros] +---- +interface QueryDslDistanceFeatureQueryBase<TOrigin = unknown, TDistance = unknown> extends <<QueryDslQueryBase>> { + pass:[/**] @property origin Date or point of origin used to calculate distances. If the `field` value is a `date` or `date_nanos` field, the `origin` value must be a date. Date Math, such as `now-1h`, is supported. If the field value is a `geo_point` field, the `origin` value must be a geopoint. */ + origin: TOrigin + pass:[/**] @property pivot <<Distance>> from the `origin` at which relevance scores receive half of the `boost` value. If the `field` value is a `date` or `date_nanos` field, the `pivot` value must be a time unit, such as `1h` or `10d`. If the `field` value is a `geo_point` field, the `pivot` value must be a distance unit, such as `1km` or `12m`. */ + pivot: TDistance + pass:[/**] @property field <<Name>> of the field used to calculate distances. This field must meet the following criteria: be a `date`, `date_nanos` or `geo_point` field; have an `index` mapping parameter value of `true`, which is the default; have an `doc_values` mapping parameter value of `true`, which is the default. */ + field: <<Field>> +} +---- + + + +[discrete] +[[QueryDslExistsQuery]] +=== QueryDslExistsQuery + +[source,ts,subs=+macros] +---- +interface QueryDslExistsQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property field <<Name>> of the field you wish to search. */ + field: <<Field>> +} +---- + + + +[discrete] +[[QueryDslFieldAndFormat]] +=== QueryDslFieldAndFormat + +[source,ts,subs=+macros] +---- +interface QueryDslFieldAndFormat { + pass:[/**] @property field Wildcard pattern. The request returns values for field names matching this pattern. */ + field: <<Field>> + pass:[/**] @property format Format in which the values are returned. */ + format?: string + include_unmapped?: boolean +} +---- + + + +[discrete] +[[QueryDslFieldLookup]] +=== QueryDslFieldLookup + +[source,ts,subs=+macros] +---- +interface QueryDslFieldLookup { + pass:[/**] @property id `id` of the document. */ + id: <<Id>> + pass:[/**] @property index Index from which to retrieve the document. */ + index?: <<IndexName>> + pass:[/**] @property path <<Name>> of the field. */ + path?: <<Field>> + pass:[/**] @property routing Custom routing value. */ + routing?: <<Routing>> +} +---- + + + +[discrete] +[[QueryDslFieldValueFactorModifier]] +=== QueryDslFieldValueFactorModifier + +[source,ts,subs=+macros] +---- +type QueryDslFieldValueFactorModifier = 'none' | 'log' | 'log1p' | 'log2p' | 'ln' | 'ln1p' | 'ln2p' | 'square' | 'sqrt' | 'reciprocal' +---- + + + +[discrete] +[[QueryDslFieldValueFactorScoreFunction]] +=== QueryDslFieldValueFactorScoreFunction + +[source,ts,subs=+macros] +---- +interface QueryDslFieldValueFactorScoreFunction { + pass:[/**] @property field <<Field>> to be extracted from the document. */ + field: <<Field>> + pass:[/**] @property factor Optional factor to multiply the field value with. */ + factor?: <<double>> + pass:[/**] @property missing Value used if the document doesn’t have that field. The modifier and factor are still applied to it as though it were read from the document. */ + missing?: <<double>> + pass:[/**] @property modifier Modifier to apply to the field value. */ + modifier?: <<QueryDslFieldValueFactorModifier>> +} +---- + + + +[discrete] +[[QueryDslFunctionBoostMode]] +=== QueryDslFunctionBoostMode + +[source,ts,subs=+macros] +---- +type QueryDslFunctionBoostMode = 'multiply' | 'replace' | 'sum' | 'avg' | 'max' | 'min' +---- + + + +[discrete] +[[QueryDslFunctionScoreContainer]] +=== QueryDslFunctionScoreContainer + +[source,ts,subs=+macros] +---- +interface QueryDslFunctionScoreContainer { + pass:[/**] @property exp Function that scores a document with a exponential decay, depending on the distance of a numeric field value of the document from an origin. */ + exp?: <<QueryDslDecayFunction>> + pass:[/**] @property gauss Function that scores a document with a normal decay, depending on the distance of a numeric field value of the document from an origin. */ + gauss?: <<QueryDslDecayFunction>> + pass:[/**] @property linear Function that scores a document with a linear decay, depending on the distance of a numeric field value of the document from an origin. */ + linear?: <<QueryDslDecayFunction>> + pass:[/**] @property field_value_factor Function allows you to use a field from a document to influence the score. It’s similar to using the script_score function, however, it avoids the overhead of scripting. */ + field_value_factor?: <<QueryDslFieldValueFactorScoreFunction>> + pass:[/**] @property random_score Generates scores that are uniformly distributed from 0 up to but not including 1. In case you want scores to be reproducible, it is possible to provide a `seed` and `field`. */ + random_score?: <<QueryDslRandomScoreFunction>> + pass:[/**] @property script_score Enables you to wrap another query and customize the scoring of it optionally with a computation derived from other numeric field values in the doc using a script expression. */ + script_score?: <<QueryDslScriptScoreFunction>> + filter?: <<QueryDslQueryContainer>> + weight?: <<double>> +} +---- + + + +[discrete] +[[QueryDslFunctionScoreMode]] +=== QueryDslFunctionScoreMode + +[source,ts,subs=+macros] +---- +type QueryDslFunctionScoreMode = 'multiply' | 'sum' | 'avg' | 'first' | 'max' | 'min' +---- + + + +[discrete] +[[QueryDslFunctionScoreQuery]] +=== QueryDslFunctionScoreQuery + +[source,ts,subs=+macros] +---- +interface QueryDslFunctionScoreQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property boost_mode Defines how he newly computed score is combined with the score of the query */ + boost_mode?: <<QueryDslFunctionBoostMode>> + pass:[/**] @property functions One or more functions that compute a new score for each document returned by the query. */ + functions?: <<QueryDslFunctionScoreContainer>>[] + pass:[/**] @property max_boost Restricts the new score to not exceed the provided limit. */ + max_boost?: <<double>> + pass:[/**] @property min_score Excludes documents that do not meet the provided score threshold. */ + min_score?: <<double>> + pass:[/**] @property query A query that determines the documents for which a new score is computed. */ + query?: <<QueryDslQueryContainer>> + pass:[/**] @property score_mode Specifies how the computed scores are combined */ + score_mode?: <<QueryDslFunctionScoreMode>> +} +---- + + + +[discrete] +[[QueryDslFuzzyQuery]] +=== QueryDslFuzzyQuery + +[source,ts,subs=+macros] +---- +interface QueryDslFuzzyQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property max_expansions Maximum number of variations created. */ + max_expansions?: <<integer>> + pass:[/**] @property prefix_length Number of beginning characters left unchanged when creating expansions. */ + prefix_length?: <<integer>> + pass:[/**] @property rewrite Number of beginning characters left unchanged when creating expansions. */ + rewrite?: <<MultiTermQueryRewrite>> + pass:[/**] @property transpositions Indicates whether edits include transpositions of two adjacent characters (for example `ab` to `ba`). */ + transpositions?: boolean + pass:[/**] @property fuzziness Maximum edit distance allowed for matching. */ + fuzziness?: <<Fuzziness>> + pass:[/**] @property value Term you wish to find in the provided field. */ + value: string | <<double>> | boolean +} +---- + + + +[discrete] +[[QueryDslGeoBoundingBoxQuery]] +=== QueryDslGeoBoundingBoxQuery + +[source,ts,subs=+macros] +---- +interface QueryDslGeoBoundingBoxQueryKeys extends <<QueryDslQueryBase>> { + type?: <<QueryDslGeoExecution>> + validation_method?: <<QueryDslGeoValidationMethod>> + ignore_unmapped?: boolean +} +type QueryDslGeoBoundingBoxQuery = QueryDslGeoBoundingBoxQueryKeys + & { [property: string]: <<GeoBounds>> | <<QueryDslGeoExecution>> | <<QueryDslGeoValidationMethod>> | boolean | <<float>> | string } +---- + + + +[discrete] +[[QueryDslGeoDecayFunction]] +=== QueryDslGeoDecayFunction + +[source,ts,subs=+macros] +---- +interface QueryDslGeoDecayFunctionKeys extends <<QueryDslDecayFunctionBase>><<<GeoLocation>>, <<Distance>>> {} +type QueryDslGeoDecayFunction = QueryDslGeoDecayFunctionKeys + & { [property: string]: <<QueryDslDecayPlacement>> | <<QueryDslMultiValueMode>> } +---- + + + +[discrete] +[[QueryDslGeoDistanceFeatureQuery]] +=== QueryDslGeoDistanceFeatureQuery + +[source,ts,subs=+macros] +---- +interface QueryDslGeoDistanceFeatureQuery extends <<QueryDslDistanceFeatureQueryBase>><<<GeoLocation>>, <<Distance>>> {} +---- + + + +[discrete] +[[QueryDslGeoDistanceQuery]] +=== QueryDslGeoDistanceQuery + +[source,ts,subs=+macros] +---- +interface QueryDslGeoDistanceQueryKeys extends <<QueryDslQueryBase>> { + distance: <<Distance>> + distance_type?: <<GeoDistanceType>> + validation_method?: <<QueryDslGeoValidationMethod>> + ignore_unmapped?: boolean +} +type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys + & { [property: string]: <<GeoLocation>> | <<Distance>> | <<GeoDistanceType>> | <<QueryDslGeoValidationMethod>> | boolean | <<float>> | string } +---- + + + +[discrete] +[[QueryDslGeoExecution]] +=== QueryDslGeoExecution + +[source,ts,subs=+macros] +---- +type QueryDslGeoExecution = 'memory' | 'indexed' +---- + + + +[discrete] +[[QueryDslGeoPolygonPoints]] +=== QueryDslGeoPolygonPoints + +[source,ts,subs=+macros] +---- +interface QueryDslGeoPolygonPoints { + points: <<GeoLocation>>[] +} +---- + + + +[discrete] +[[QueryDslGeoPolygonQuery]] +=== QueryDslGeoPolygonQuery + +[source,ts,subs=+macros] +---- +interface QueryDslGeoPolygonQueryKeys extends <<QueryDslQueryBase>> { + validation_method?: <<QueryDslGeoValidationMethod>> + ignore_unmapped?: boolean +} +type QueryDslGeoPolygonQuery = QueryDslGeoPolygonQueryKeys + & { [property: string]: <<QueryDslGeoPolygonPoints>> | <<QueryDslGeoValidationMethod>> | boolean | <<float>> | string } +---- + + + +[discrete] +[[QueryDslGeoShapeFieldQuery]] +=== QueryDslGeoShapeFieldQuery + +[source,ts,subs=+macros] +---- +interface QueryDslGeoShapeFieldQuery { + shape?: <<GeoShape>> + pass:[/**] @property indexed_shape Query using an indexed shape retrieved from the the specified document and path. */ + indexed_shape?: <<QueryDslFieldLookup>> + pass:[/**] @property relation Spatial relation operator used to search a geo field. */ + relation?: <<GeoShapeRelation>> +} +---- + + + +[discrete] +[[QueryDslGeoShapeQuery]] +=== QueryDslGeoShapeQuery + +[source,ts,subs=+macros] +---- +interface QueryDslGeoShapeQueryKeys extends <<QueryDslQueryBase>> { + ignore_unmapped?: boolean +} +type QueryDslGeoShapeQuery = QueryDslGeoShapeQueryKeys + & { [property: string]: <<QueryDslGeoShapeFieldQuery>> | boolean | <<float>> | string } +---- + + + +[discrete] +[[QueryDslGeoValidationMethod]] +=== QueryDslGeoValidationMethod + +[source,ts,subs=+macros] +---- +type QueryDslGeoValidationMethod = 'coerce' | 'ignore_malformed' | 'strict' +---- + + + +[discrete] +[[QueryDslHasChildQuery]] +=== QueryDslHasChildQuery + +[source,ts,subs=+macros] +---- +interface QueryDslHasChildQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property ignore_unmapped Indicates whether to ignore an unmapped `type` and not return any documents instead of an error. */ + ignore_unmapped?: boolean + pass:[/**] @property inner_hits If defined, each search hit will contain inner hits. */ + inner_hits?: <<SearchInnerHits>> + pass:[/**] @property max_children Maximum number of child documents that match the query allowed for a returned parent document. If the parent document exceeds this limit, it is excluded from the search results. */ + max_children?: <<integer>> + pass:[/**] @property min_children Minimum number of child documents that match the query required to match the query for a returned parent document. If the parent document does not meet this limit, it is excluded from the search results. */ + min_children?: <<integer>> + pass:[/**] @property query Query you wish to run on child documents of the `type` field. If a child document matches the search, the query returns the parent document. */ + query: <<QueryDslQueryContainer>> + pass:[/**] @property score_mode Indicates how scores for matching child documents affect the root parent document’s relevance score. */ + score_mode?: <<QueryDslChildScoreMode>> + pass:[/**] @property type <<Name>> of the child relationship mapped for the `join` field. */ + type: <<RelationName>> +} +---- + + + +[discrete] +[[QueryDslHasParentQuery]] +=== QueryDslHasParentQuery + +[source,ts,subs=+macros] +---- +interface QueryDslHasParentQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property ignore_unmapped Indicates whether to ignore an unmapped `parent_type` and not return any documents instead of an error. You can use this parameter to query multiple indices that may not contain the `parent_type`. */ + ignore_unmapped?: boolean + pass:[/**] @property inner_hits If defined, each search hit will contain inner hits. */ + inner_hits?: <<SearchInnerHits>> + pass:[/**] @property parent_type <<Name>> of the parent relationship mapped for the `join` field. */ + parent_type: <<RelationName>> + pass:[/**] @property query Query you wish to run on parent documents of the `parent_type` field. If a parent document matches the search, the query returns its child documents. */ + query: <<QueryDslQueryContainer>> + pass:[/**] @property score Indicates whether the relevance score of a matching parent document is aggregated into its child documents. */ + score?: boolean +} +---- + + + +[discrete] +[[QueryDslIdsQuery]] +=== QueryDslIdsQuery + +[source,ts,subs=+macros] +---- +interface QueryDslIdsQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property values An array of document IDs. */ + values?: <<Ids>> +} +---- + + + +[discrete] +[[QueryDslIntervalsAllOf]] +=== QueryDslIntervalsAllOf + +[source,ts,subs=+macros] +---- +interface QueryDslIntervalsAllOf { + pass:[/**] @property intervals An array of rules to combine. All rules must produce a match in a document for the overall source to match. */ + intervals: <<QueryDslIntervalsContainer>>[] + pass:[/**] @property max_gaps Maximum number of positions between the matching terms. Intervals produced by the rules further apart than this are not considered matches. */ + max_gaps?: <<integer>> + pass:[/**] @property ordered If `true`, intervals produced by the rules should appear in the order in which they are specified. */ + ordered?: boolean + pass:[/**] @property filter Rule used to filter returned intervals. */ + filter?: <<QueryDslIntervalsFilter>> +} +---- + + + +[discrete] +[[QueryDslIntervalsAnyOf]] +=== QueryDslIntervalsAnyOf + +[source,ts,subs=+macros] +---- +interface QueryDslIntervalsAnyOf { + pass:[/**] @property intervals An array of rules to match. */ + intervals: <<QueryDslIntervalsContainer>>[] + pass:[/**] @property filter Rule used to filter returned intervals. */ + filter?: <<QueryDslIntervalsFilter>> +} +---- + + + +[discrete] +[[QueryDslIntervalsContainer]] +=== QueryDslIntervalsContainer + +[source,ts,subs=+macros] +---- +interface QueryDslIntervalsContainer { + pass:[/**] @property all_of Returns matches that span a combination of other rules. */ + all_of?: <<QueryDslIntervalsAllOf>> + pass:[/**] @property any_of Returns intervals produced by any of its sub-rules. */ + any_of?: <<QueryDslIntervalsAnyOf>> + pass:[/**] @property fuzzy Matches analyzed text. */ + fuzzy?: <<QueryDslIntervalsFuzzy>> + pass:[/**] @property match Matches analyzed text. */ + match?: <<QueryDslIntervalsMatch>> + pass:[/**] @property prefix Matches terms that start with a specified set of characters. */ + prefix?: <<QueryDslIntervalsPrefix>> + pass:[/**] @property wildcard Matches terms using a wildcard pattern. */ + wildcard?: <<QueryDslIntervalsWildcard>> +} +---- + + + +[discrete] +[[QueryDslIntervalsFilter]] +=== QueryDslIntervalsFilter + +[source,ts,subs=+macros] +---- +interface QueryDslIntervalsFilter { + pass:[/**] @property after Query used to return intervals that follow an interval from the `filter` rule. */ + after?: <<QueryDslIntervalsContainer>> + pass:[/**] @property before Query used to return intervals that occur before an interval from the `filter` rule. */ + before?: <<QueryDslIntervalsContainer>> + pass:[/**] @property contained_by Query used to return intervals contained by an interval from the `filter` rule. */ + contained_by?: <<QueryDslIntervalsContainer>> + pass:[/**] @property containing Query used to return intervals that contain an interval from the `filter` rule. */ + containing?: <<QueryDslIntervalsContainer>> + pass:[/**] @property not_contained_by Query used to return intervals that are **not** contained by an interval from the `filter` rule. */ + not_contained_by?: <<QueryDslIntervalsContainer>> + pass:[/**] @property not_containing Query used to return intervals that do **not** contain an interval from the `filter` rule. */ + not_containing?: <<QueryDslIntervalsContainer>> + pass:[/**] @property not_overlapping Query used to return intervals that do **not** overlap with an interval from the `filter` rule. */ + not_overlapping?: <<QueryDslIntervalsContainer>> + pass:[/**] @property overlapping Query used to return intervals that overlap with an interval from the `filter` rule. */ + overlapping?: <<QueryDslIntervalsContainer>> + pass:[/**] @property script <<Script>> used to return matching documents. This script must return a boolean value: `true` or `false`. */ + script?: <<Script>> | string +} +---- + + + +[discrete] +[[QueryDslIntervalsFuzzy]] +=== QueryDslIntervalsFuzzy + +[source,ts,subs=+macros] +---- +interface QueryDslIntervalsFuzzy { + pass:[/**] @property analyzer Analyzer used to normalize the term. */ + analyzer?: string + pass:[/**] @property fuzziness Maximum edit distance allowed for matching. */ + fuzziness?: <<Fuzziness>> + pass:[/**] @property prefix_length Number of beginning characters left unchanged when creating expansions. */ + prefix_length?: <<integer>> + pass:[/**] @property term The term to match. */ + term: string + pass:[/**] @property transpositions Indicates whether edits include transpositions of two adjacent characters (for example, `ab` to `ba`). */ + transpositions?: boolean + pass:[/**] @property use_field If specified, match intervals from this field rather than the top-level field. The `term` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ + use_field?: <<Field>> +} +---- + + + +[discrete] +[[QueryDslIntervalsMatch]] +=== QueryDslIntervalsMatch + +[source,ts,subs=+macros] +---- +interface QueryDslIntervalsMatch { + pass:[/**] @property analyzer Analyzer used to analyze terms in the query. */ + analyzer?: string + pass:[/**] @property max_gaps Maximum number of positions between the matching terms. Terms further apart than this are not considered matches. */ + max_gaps?: <<integer>> + pass:[/**] @property ordered If `true`, matching terms must appear in their specified order. */ + ordered?: boolean + pass:[/**] @property query Text you wish to find in the provided field. */ + query: string + pass:[/**] @property use_field If specified, match intervals from this field rather than the top-level field. The `term` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ + use_field?: <<Field>> + pass:[/**] @property filter An optional interval filter. */ + filter?: <<QueryDslIntervalsFilter>> +} +---- + + + +[discrete] +[[QueryDslIntervalsPrefix]] +=== QueryDslIntervalsPrefix + +[source,ts,subs=+macros] +---- +interface QueryDslIntervalsPrefix { + pass:[/**] @property analyzer Analyzer used to analyze the `prefix`. */ + analyzer?: string + pass:[/**] @property prefix Beginning characters of terms you wish to find in the top-level field. */ + prefix: string + pass:[/**] @property use_field If specified, match intervals from this field rather than the top-level field. The `prefix` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ + use_field?: <<Field>> +} +---- + + + +[discrete] +[[QueryDslIntervalsQuery]] +=== QueryDslIntervalsQuery + +[source,ts,subs=+macros] +---- +interface QueryDslIntervalsQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property all_of Returns matches that span a combination of other rules. */ + all_of?: <<QueryDslIntervalsAllOf>> + pass:[/**] @property any_of Returns intervals produced by any of its sub-rules. */ + any_of?: <<QueryDslIntervalsAnyOf>> + pass:[/**] @property fuzzy Matches terms that are similar to the provided term, within an edit distance defined by `fuzziness`. */ + fuzzy?: <<QueryDslIntervalsFuzzy>> + pass:[/**] @property match Matches analyzed text. */ + match?: <<QueryDslIntervalsMatch>> + pass:[/**] @property prefix Matches terms that start with a specified set of characters. */ + prefix?: <<QueryDslIntervalsPrefix>> + pass:[/**] @property wildcard Matches terms using a wildcard pattern. */ + wildcard?: <<QueryDslIntervalsWildcard>> +} +---- + + + +[discrete] +[[QueryDslIntervalsWildcard]] +=== QueryDslIntervalsWildcard + +[source,ts,subs=+macros] +---- +interface QueryDslIntervalsWildcard { + pass:[/**] @property analyzer Analyzer used to analyze the `pattern`. Defaults to the top-level field's analyzer. */ + analyzer?: string + pass:[/**] @property pattern Wildcard pattern used to find matching terms. */ + pattern: string + pass:[/**] @property use_field If specified, match intervals from this field rather than the top-level field. The `pattern` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ + use_field?: <<Field>> +} +---- + + + +[discrete] +[[QueryDslLike]] +=== QueryDslLike + +[source,ts,subs=+macros] +---- +type QueryDslLike = string | <<QueryDslLikeDocument>> +---- + + + +[discrete] +[[QueryDslLikeDocument]] +=== QueryDslLikeDocument + +[source,ts,subs=+macros] +---- +interface QueryDslLikeDocument { + pass:[/**] @property doc A document not present in the index. */ + doc?: any + fields?: <<Field>>[] + pass:[/**] @property _id ID of a document. */ + _id?: <<Id>> + pass:[/**] @property _index Index of a document. */ + _index?: <<IndexName>> + pass:[/**] @property per_field_analyzer Overrides the default analyzer. */ + per_field_analyzer?: Record<<<Field>>, string> + routing?: <<Routing>> + version?: <<VersionNumber>> + version_type?: <<VersionType>> +} +---- + + + +[discrete] +[[QueryDslMatchAllQuery]] +=== QueryDslMatchAllQuery + +[source,ts,subs=+macros] +---- +interface QueryDslMatchAllQuery extends <<QueryDslQueryBase>> {} +---- + + + +[discrete] +[[QueryDslMatchBoolPrefixQuery]] +=== QueryDslMatchBoolPrefixQuery + +[source,ts,subs=+macros] +---- +interface QueryDslMatchBoolPrefixQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property analyzer Analyzer used to convert the text in the query value into tokens. */ + analyzer?: string + pass:[/**] @property fuzziness Maximum edit distance allowed for matching. Can be applied to the term subqueries constructed for all terms but the final term. */ + fuzziness?: <<Fuzziness>> + pass:[/**] @property fuzzy_rewrite Method used to rewrite the query. Can be applied to the term subqueries constructed for all terms but the final term. */ + fuzzy_rewrite?: <<MultiTermQueryRewrite>> + pass:[/**] @property fuzzy_transpositions If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). Can be applied to the term subqueries constructed for all terms but the final term. */ + fuzzy_transpositions?: boolean + pass:[/**] @property max_expansions Maximum number of terms to which the query will expand. Can be applied to the term subqueries constructed for all terms but the final term. */ + max_expansions?: <<integer>> + pass:[/**] @property minimum_should_match Minimum number of clauses that must match for a document to be returned. Applied to the constructed bool query. */ + minimum_should_match?: <<MinimumShouldMatch>> + pass:[/**] @property operator Boolean logic used to interpret text in the query value. Applied to the constructed bool query. */ + operator?: <<QueryDslOperator>> + pass:[/**] @property prefix_length Number of beginning characters left unchanged for fuzzy matching. Can be applied to the term subqueries constructed for all terms but the final term. */ + prefix_length?: <<integer>> + pass:[/**] @property query Terms you wish to find in the provided field. The last term is used in a prefix query. */ + query: string +} +---- + + + +[discrete] +[[QueryDslMatchNoneQuery]] +=== QueryDslMatchNoneQuery + +[source,ts,subs=+macros] +---- +interface QueryDslMatchNoneQuery extends <<QueryDslQueryBase>> {} +---- + + + +[discrete] +[[QueryDslMatchPhrasePrefixQuery]] +=== QueryDslMatchPhrasePrefixQuery + +[source,ts,subs=+macros] +---- +interface QueryDslMatchPhrasePrefixQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property analyzer Analyzer used to convert text in the query value into tokens. */ + analyzer?: string + pass:[/**] @property max_expansions Maximum number of terms to which the last provided term of the query value will expand. */ + max_expansions?: <<integer>> + pass:[/**] @property query Text you wish to find in the provided field. */ + query: string + pass:[/**] @property slop Maximum number of positions allowed between matching tokens. */ + slop?: <<integer>> + pass:[/**] @property zero_terms_query Indicates whether no documents are returned if the analyzer removes all tokens, such as when using a `stop` filter. */ + zero_terms_query?: <<QueryDslZeroTermsQuery>> +} +---- + + + +[discrete] +[[QueryDslMatchPhraseQuery]] +=== QueryDslMatchPhraseQuery + +[source,ts,subs=+macros] +---- +interface QueryDslMatchPhraseQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property analyzer Analyzer used to convert the text in the query value into tokens. */ + analyzer?: string + pass:[/**] @property query Query terms that are analyzed and turned into a phrase query. */ + query: string + pass:[/**] @property slop Maximum number of positions allowed between matching tokens. */ + slop?: <<integer>> + pass:[/**] @property zero_terms_query Indicates whether no documents are returned if the `analyzer` removes all tokens, such as when using a `stop` filter. */ + zero_terms_query?: <<QueryDslZeroTermsQuery>> +} +---- + + + +[discrete] +[[QueryDslMatchQuery]] +=== QueryDslMatchQuery + +[source,ts,subs=+macros] +---- +interface QueryDslMatchQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property analyzer Analyzer used to convert the text in the query value into tokens. */ + analyzer?: string + pass:[/**] @property auto_generate_synonyms_phrase_query If `true`, match phrase queries are automatically created for multi-term synonyms. */ + auto_generate_synonyms_phrase_query?: boolean + cutoff_frequency?: <<double>> + pass:[/**] @property fuzziness Maximum edit distance allowed for matching. */ + fuzziness?: <<Fuzziness>> + pass:[/**] @property fuzzy_rewrite Method used to rewrite the query. */ + fuzzy_rewrite?: <<MultiTermQueryRewrite>> + pass:[/**] @property fuzzy_transpositions If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). */ + fuzzy_transpositions?: boolean + pass:[/**] @property lenient If `true`, format-based errors, such as providing a text query value for a numeric field, are ignored. */ + lenient?: boolean + pass:[/**] @property max_expansions Maximum number of terms to which the query will expand. */ + max_expansions?: <<integer>> + pass:[/**] @property minimum_should_match Minimum number of clauses that must match for a document to be returned. */ + minimum_should_match?: <<MinimumShouldMatch>> + pass:[/**] @property operator Boolean logic used to interpret text in the query value. */ + operator?: <<QueryDslOperator>> + pass:[/**] @property prefix_length Number of beginning characters left unchanged for fuzzy matching. */ + prefix_length?: <<integer>> + pass:[/**] @property query Text, number, boolean value or date you wish to find in the provided field. */ + query: string | <<float>> | boolean + pass:[/**] @property zero_terms_query Indicates whether no documents are returned if the `analyzer` removes all tokens, such as when using a `stop` filter. */ + zero_terms_query?: <<QueryDslZeroTermsQuery>> +} +---- + + + +[discrete] +[[QueryDslMoreLikeThisQuery]] +=== QueryDslMoreLikeThisQuery + +[source,ts,subs=+macros] +---- +interface QueryDslMoreLikeThisQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property analyzer The analyzer that is used to analyze the free form text. Defaults to the analyzer associated with the first field in fields. */ + analyzer?: string + pass:[/**] @property boost_terms Each term in the formed query could be further boosted by their tf-idf score. This sets the boost factor to use when using this feature. Defaults to deactivated (0). */ + boost_terms?: <<double>> + pass:[/**] @property fail_on_unsupported_field Controls whether the query should fail (throw an exception) if any of the specified fields are not of the supported types (`text` or `keyword`). */ + fail_on_unsupported_field?: boolean + pass:[/**] @property fields A list of fields to fetch and analyze the text from. Defaults to the `index.query.default_field` index setting, which has a default value of `*`. */ + fields?: <<Field>>[] + pass:[/**] @property include Specifies whether the input documents should also be included in the search results returned. */ + include?: boolean + pass:[/**] @property like Specifies free form text and/or a single or multiple documents for which you want to find similar documents. */ + like: <<QueryDslLike>> | <<QueryDslLike>>[] + pass:[/**] @property max_doc_freq The maximum document frequency above which the terms are ignored from the input document. */ + max_doc_freq?: <<integer>> + pass:[/**] @property max_query_terms The maximum number of query terms that can be selected. */ + max_query_terms?: <<integer>> + pass:[/**] @property max_word_length The maximum word length above which the terms are ignored. Defaults to unbounded (`0`). */ + max_word_length?: <<integer>> + pass:[/**] @property min_doc_freq The minimum document frequency below which the terms are ignored from the input document. */ + min_doc_freq?: <<integer>> + pass:[/**] @property minimum_should_match After the disjunctive query has been formed, this parameter controls the number of terms that must match. */ + minimum_should_match?: <<MinimumShouldMatch>> + pass:[/**] @property min_term_freq The minimum term frequency below which the terms are ignored from the input document. */ + min_term_freq?: <<integer>> + pass:[/**] @property min_word_length The minimum word length below which the terms are ignored. */ + min_word_length?: <<integer>> + routing?: <<Routing>> + pass:[/**] @property stop_words An array of stop words. Any word in this set is ignored. */ + stop_words?: <<AnalysisStopWords>> + pass:[/**] @property unlike Used in combination with `like` to exclude documents that match a set of terms. */ + unlike?: <<QueryDslLike>> | <<QueryDslLike>>[] + version?: <<VersionNumber>> + version_type?: <<VersionType>> +} +---- + + + +[discrete] +[[QueryDslMultiMatchQuery]] +=== QueryDslMultiMatchQuery + +[source,ts,subs=+macros] +---- +interface QueryDslMultiMatchQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property analyzer Analyzer used to convert the text in the query value into tokens. */ + analyzer?: string + pass:[/**] @property auto_generate_synonyms_phrase_query If `true`, match phrase queries are automatically created for multi-term synonyms. */ + auto_generate_synonyms_phrase_query?: boolean + cutoff_frequency?: <<double>> + pass:[/**] @property fields The fields to be queried. Defaults to the `index.query.default_field` index settings, which in turn defaults to `*`. */ + fields?: <<Fields>> + pass:[/**] @property fuzziness Maximum edit distance allowed for matching. */ + fuzziness?: <<Fuzziness>> + pass:[/**] @property fuzzy_rewrite Method used to rewrite the query. */ + fuzzy_rewrite?: <<MultiTermQueryRewrite>> + pass:[/**] @property fuzzy_transpositions If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). Can be applied to the term subqueries constructed for all terms but the final term. */ + fuzzy_transpositions?: boolean + pass:[/**] @property lenient If `true`, format-based errors, such as providing a text query value for a numeric field, are ignored. */ + lenient?: boolean + pass:[/**] @property max_expansions Maximum number of terms to which the query will expand. */ + max_expansions?: <<integer>> + pass:[/**] @property minimum_should_match Minimum number of clauses that must match for a document to be returned. */ + minimum_should_match?: <<MinimumShouldMatch>> + pass:[/**] @property operator Boolean logic used to interpret text in the query value. */ + operator?: <<QueryDslOperator>> + pass:[/**] @property prefix_length Number of beginning characters left unchanged for fuzzy matching. */ + prefix_length?: <<integer>> + pass:[/**] @property query Text, number, boolean value or date you wish to find in the provided field. */ + query: string + pass:[/**] @property slop Maximum number of positions allowed between matching tokens. */ + slop?: <<integer>> + pass:[/**] @property tie_breaker Determines how scores for each per-term blended query and scores across groups are combined. */ + tie_breaker?: <<double>> + pass:[/**] @property type How `the` multi_match query is executed internally. */ + type?: <<QueryDslTextQueryType>> + pass:[/**] @property zero_terms_query Indicates whether no documents are returned if the `analyzer` removes all tokens, such as when using a `stop` filter. */ + zero_terms_query?: <<QueryDslZeroTermsQuery>> +} +---- + + + +[discrete] +[[QueryDslMultiValueMode]] +=== QueryDslMultiValueMode + +[source,ts,subs=+macros] +---- +type QueryDslMultiValueMode = 'min' | 'max' | 'avg' | 'sum' +---- + + + +[discrete] +[[QueryDslNestedQuery]] +=== QueryDslNestedQuery + +[source,ts,subs=+macros] +---- +interface QueryDslNestedQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property ignore_unmapped Indicates whether to ignore an unmapped path and not return any documents instead of an error. */ + ignore_unmapped?: boolean + pass:[/**] @property inner_hits If defined, each search hit will contain inner hits. */ + inner_hits?: <<SearchInnerHits>> + pass:[/**] @property path Path to the nested object you wish to search. */ + path: <<Field>> + pass:[/**] @property query Query you wish to run on nested objects in the path. */ + query: <<QueryDslQueryContainer>> + pass:[/**] @property score_mode How scores for matching child objects affect the root parent document’s relevance score. */ + score_mode?: <<QueryDslChildScoreMode>> +} +---- + + + +[discrete] +[[QueryDslNumberRangeQuery]] +=== QueryDslNumberRangeQuery + +[source,ts,subs=+macros] +---- +interface QueryDslNumberRangeQuery extends <<QueryDslRangeQueryBase>><<<double>>> {} +---- + + + +[discrete] +[[QueryDslNumericDecayFunction]] +=== QueryDslNumericDecayFunction + +[source,ts,subs=+macros] +---- +interface QueryDslNumericDecayFunctionKeys extends <<QueryDslDecayFunctionBase>><<<double>>, <<double>>> {} +type QueryDslNumericDecayFunction = QueryDslNumericDecayFunctionKeys + & { [property: string]: <<QueryDslDecayPlacement>> | <<QueryDslMultiValueMode>> } +---- + + + +[discrete] +[[QueryDslOperator]] +=== QueryDslOperator + +[source,ts,subs=+macros] +---- +type QueryDslOperator = 'and' | 'AND' | 'or' | 'OR' +---- + + + +[discrete] +[[QueryDslParentIdQuery]] +=== QueryDslParentIdQuery + +[source,ts,subs=+macros] +---- +interface QueryDslParentIdQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property id ID of the parent document. */ + id?: <<Id>> + pass:[/**] @property ignore_unmapped Indicates whether to ignore an unmapped `type` and not return any documents instead of an error. */ + ignore_unmapped?: boolean + pass:[/**] @property type <<Name>> of the child relationship mapped for the `join` field. */ + type?: <<RelationName>> +} +---- + + + +[discrete] +[[QueryDslPercolateQuery]] +=== QueryDslPercolateQuery + +[source,ts,subs=+macros] +---- +interface QueryDslPercolateQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property document The source of the document being percolated. */ + document?: any + pass:[/**] @property documents An array of sources of the documents being percolated. */ + documents?: any[] + pass:[/**] @property field <<Field>> that holds the indexed queries. The field must use the `percolator` mapping type. */ + field: <<Field>> + pass:[/**] @property id The ID of a stored document to percolate. */ + id?: <<Id>> + pass:[/**] @property index The index of a stored document to percolate. */ + index?: <<IndexName>> + pass:[/**] @property name The suffix used for the `_percolator_document_slot` field when multiple `percolate` queries are specified. */ + name?: string + pass:[/**] @property preference Preference used to fetch document to percolate. */ + preference?: string + pass:[/**] @property routing <<Routing>> used to fetch document to percolate. */ + routing?: <<Routing>> + pass:[/**] @property version The expected version of a stored document to percolate. */ + version?: <<VersionNumber>> +} +---- + + + +[discrete] +[[QueryDslPinnedDoc]] +=== QueryDslPinnedDoc + +[source,ts,subs=+macros] +---- +interface QueryDslPinnedDoc { + pass:[/**] @property _id The unique document ID. */ + _id: <<Id>> + pass:[/**] @property _index The index that contains the document. */ + _index: <<IndexName>> +} +---- + + + +[discrete] +[[QueryDslPinnedQuery]] +=== QueryDslPinnedQuery + +[source,ts,subs=+macros] +---- +interface QueryDslPinnedQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property organic Any choice of query used to rank documents which will be ranked below the "pinned" documents. */ + organic: <<QueryDslQueryContainer>> + pass:[/**] @property ids Document IDs listed in the order they are to appear in results. Required if `docs` is not specified. */ + ids?: <<Id>>[] + pass:[/**] @property docs Documents listed in the order they are to appear in results. Required if `ids` is not specified. */ + docs?: <<QueryDslPinnedDoc>>[] +} +---- + + + +[discrete] +[[QueryDslPrefixQuery]] +=== QueryDslPrefixQuery + +[source,ts,subs=+macros] +---- +interface QueryDslPrefixQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property rewrite Method used to rewrite the query. */ + rewrite?: <<MultiTermQueryRewrite>> + pass:[/**] @property value Beginning characters of terms you wish to find in the provided field. */ + value: string + pass:[/**] @property case_insensitive Allows ASCII case insensitive matching of the value with the indexed field values when set to `true`. Default is `false` which means the case sensitivity of matching depends on the underlying field’s mapping. */ + case_insensitive?: boolean +} +---- + + + +[discrete] +[[QueryDslQueryBase]] +=== QueryDslQueryBase + +[source,ts,subs=+macros] +---- +interface QueryDslQueryBase { + pass:[/**] @property boost Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. */ + boost?: <<float>> + _name?: string +} +---- + + + +[discrete] +[[QueryDslQueryContainer]] +=== QueryDslQueryContainer + +[source,ts,subs=+macros] +---- +interface QueryDslQueryContainer { + pass:[/**] @property bool matches documents matching boolean combinations of other queries. */ + bool?: <<QueryDslBoolQuery>> + pass:[/**] @property boosting Returns documents matching a `positive` query while reducing the relevance score of documents that also match a `negative` query. */ + boosting?: <<QueryDslBoostingQuery>> + common?: Partial<Record<<<Field>>, <<QueryDslCommonTermsQuery>> | string>> + pass:[/**] @property combined_fields The `combined_fields` query supports searching multiple text fields as if their contents had been indexed into one combined field. */ + combined_fields?: <<QueryDslCombinedFieldsQuery>> + pass:[/**] @property constant_score Wraps a filter query and returns every matching document with a relevance score equal to the `boost` parameter value. */ + constant_score?: <<QueryDslConstantScoreQuery>> + pass:[/**] @property dis_max Returns documents matching one or more wrapped queries, called query clauses or clauses. If a returned document matches multiple query clauses, the `dis_max` query assigns the document the highest relevance score from any matching clause, plus a tie breaking increment for any additional matching subqueries. */ + dis_max?: <<QueryDslDisMaxQuery>> + pass:[/**] @property distance_feature Boosts the relevance score of documents closer to a provided origin date or point. For example, you can use this query to give more weight to documents closer to a certain date or location. */ + distance_feature?: <<QueryDslDistanceFeatureQuery>> + pass:[/**] @property exists Returns documents that contain an indexed value for a field. */ + exists?: <<QueryDslExistsQuery>> + pass:[/**] @property function_score The `function_score` enables you to modify the score of documents that are retrieved by a query. */ + function_score?: <<QueryDslFunctionScoreQuery>> | <<QueryDslFunctionScoreContainer>>[] + pass:[/**] @property fuzzy Returns documents that contain terms similar to the search term, as measured by a Levenshtein edit distance. */ + fuzzy?: Partial<Record<<<Field>>, <<QueryDslFuzzyQuery>> | string | <<double>> | boolean>> + pass:[/**] @property geo_bounding_box Matches geo_point and geo_shape values that intersect a bounding box. */ + geo_bounding_box?: <<QueryDslGeoBoundingBoxQuery>> + pass:[/**] @property geo_distance Matches `geo_point` and `geo_shape` values within a given distance of a geopoint. */ + geo_distance?: <<QueryDslGeoDistanceQuery>> + geo_polygon?: <<QueryDslGeoPolygonQuery>> + pass:[/**] @property geo_shape Filter documents indexed using either the `geo_shape` or the `geo_point` type. */ + geo_shape?: <<QueryDslGeoShapeQuery>> + pass:[/**] @property has_child Returns parent documents whose joined child documents match a provided query. */ + has_child?: <<QueryDslHasChildQuery>> + pass:[/**] @property has_parent Returns child documents whose joined parent document matches a provided query. */ + has_parent?: <<QueryDslHasParentQuery>> + pass:[/**] @property ids Returns documents based on their IDs. This query uses document IDs stored in the `_id` field. */ + ids?: <<QueryDslIdsQuery>> + pass:[/**] @property intervals Returns documents based on the order and proximity of matching terms. */ + intervals?: Partial<Record<<<Field>>, <<QueryDslIntervalsQuery>>>> + pass:[/**] @property knn Finds the k nearest vectors to a query vector, as measured by a similarity metric. knn query finds nearest vectors through approximate search on indexed dense_vectors. */ + knn?: <<KnnQuery>> + pass:[/**] @property match Returns documents that match a provided text, number, date or boolean value. The provided text is analyzed before matching. */ + match?: Partial<Record<<<Field>>, <<QueryDslMatchQuery>> | string | <<float>> | boolean>> + pass:[/**] @property match_all Matches all documents, giving them all a `_score` of 1.0. */ + match_all?: <<QueryDslMatchAllQuery>> + pass:[/**] @property match_bool_prefix Analyzes its input and constructs a `bool` query from the terms. Each term except the last is used in a `term` query. The last term is used in a prefix query. */ + match_bool_prefix?: Partial<Record<<<Field>>, <<QueryDslMatchBoolPrefixQuery>> | string>> + pass:[/**] @property match_none Matches no documents. */ + match_none?: <<QueryDslMatchNoneQuery>> + pass:[/**] @property match_phrase Analyzes the text and creates a phrase query out of the analyzed text. */ + match_phrase?: Partial<Record<<<Field>>, <<QueryDslMatchPhraseQuery>> | string>> + pass:[/**] @property match_phrase_prefix Returns documents that contain the words of a provided text, in the same order as provided. The last term of the provided text is treated as a prefix, matching any words that begin with that term. */ + match_phrase_prefix?: Partial<Record<<<Field>>, <<QueryDslMatchPhrasePrefixQuery>> | string>> + pass:[/**] @property more_like_this Returns documents that are "like" a given set of documents. */ + more_like_this?: <<QueryDslMoreLikeThisQuery>> + pass:[/**] @property multi_match Enables you to search for a provided text, number, date or boolean value across multiple fields. The provided text is analyzed before matching. */ + multi_match?: <<QueryDslMultiMatchQuery>> + pass:[/**] @property nested Wraps another query to search nested fields. If an object matches the search, the nested query returns the root parent document. */ + nested?: <<QueryDslNestedQuery>> + pass:[/**] @property parent_id Returns child documents joined to a specific parent document. */ + parent_id?: <<QueryDslParentIdQuery>> + pass:[/**] @property percolate Matches queries stored in an index. */ + percolate?: <<QueryDslPercolateQuery>> + pass:[/**] @property pinned Promotes selected documents to rank higher than those matching a given query. */ + pinned?: <<QueryDslPinnedQuery>> + pass:[/**] @property prefix Returns documents that contain a specific prefix in a provided field. */ + prefix?: Partial<Record<<<Field>>, <<QueryDslPrefixQuery>> | string>> + pass:[/**] @property query_string Returns documents based on a provided query string, using a parser with a strict syntax. */ + query_string?: <<QueryDslQueryStringQuery>> + pass:[/**] @property range Returns documents that contain terms within a provided range. */ + range?: Partial<Record<<<Field>>, <<QueryDslRangeQuery>>>> + pass:[/**] @property rank_feature Boosts the relevance score of documents based on the numeric value of a `rank_feature` or `rank_features` field. */ + rank_feature?: <<QueryDslRankFeatureQuery>> + pass:[/**] @property regexp Returns documents that contain terms matching a regular expression. */ + regexp?: Partial<Record<<<Field>>, <<QueryDslRegexpQuery>> | string>> + rule?: <<QueryDslRuleQuery>> + pass:[/**] @property script Filters documents based on a provided script. The script query is typically used in a filter context. */ + script?: <<QueryDslScriptQuery>> + pass:[/**] @property script_score Uses a script to provide a custom score for returned documents. */ + script_score?: <<QueryDslScriptScoreQuery>> + pass:[/**] @property semantic A semantic query to semantic_text field types */ + semantic?: <<QueryDslSemanticQuery>> + pass:[/**] @property shape Queries documents that contain fields indexed using the `shape` type. */ + shape?: <<QueryDslShapeQuery>> + pass:[/**] @property simple_query_string Returns documents based on a provided query string, using a parser with a limited but fault-tolerant syntax. */ + simple_query_string?: <<QueryDslSimpleQueryStringQuery>> + pass:[/**] @property span_containing Returns matches which enclose another span query. */ + span_containing?: <<QueryDslSpanContainingQuery>> + pass:[/**] @property span_field_masking Wrapper to allow span queries to participate in composite single-field span queries by _lying_ about their search field. */ + span_field_masking?: <<QueryDslSpanFieldMaskingQuery>> + pass:[/**] @property span_first Matches spans near the beginning of a field. */ + span_first?: <<QueryDslSpanFirstQuery>> + pass:[/**] @property span_multi Allows you to wrap a multi term query (one of `wildcard`, `fuzzy`, `prefix`, `range`, or `regexp` query) as a `span` query, so it can be nested. */ + span_multi?: <<QueryDslSpanMultiTermQuery>> + pass:[/**] @property span_near Matches spans which are near one another. You can specify `slop`, the maximum number of intervening unmatched positions, as well as whether matches are required to be in-order. */ + span_near?: <<QueryDslSpanNearQuery>> + pass:[/**] @property span_not Removes matches which overlap with another span query or which are within x tokens before (controlled by the parameter `pre`) or y tokens after (controlled by the parameter `post`) another span query. */ + span_not?: <<QueryDslSpanNotQuery>> + pass:[/**] @property span_or Matches the union of its span clauses. */ + span_or?: <<QueryDslSpanOrQuery>> + pass:[/**] @property span_term Matches spans containing a term. */ + span_term?: Partial<Record<<<Field>>, <<QueryDslSpanTermQuery>> | string>> + pass:[/**] @property span_within Returns matches which are enclosed inside another span query. */ + span_within?: <<QueryDslSpanWithinQuery>> + pass:[/**] @property sparse_vector Using input query vectors or a natural language processing model to convert a query into a list of token-weight pairs, queries against a sparse vector field. */ + sparse_vector?: <<QueryDslSparseVectorQuery>> + pass:[/**] @property term Returns documents that contain an exact term in a provided field. To return a document, the query term must exactly match the queried field's value, including whitespace and capitalization. */ + term?: Partial<Record<<<Field>>, <<QueryDslTermQuery>> | <<FieldValue>>>> + pass:[/**] @property terms Returns documents that contain one or more exact terms in a provided field. To return a document, one or more terms must exactly match a field value, including whitespace and capitalization. */ + terms?: <<QueryDslTermsQuery>> + pass:[/**] @property terms_set Returns documents that contain a minimum number of exact terms in a provided field. To return a document, a required number of terms must exactly match the field values, including whitespace and capitalization. */ + terms_set?: Partial<Record<<<Field>>, <<QueryDslTermsSetQuery>>>> + pass:[/**] @property text_expansion Uses a natural language processing model to convert the query text into a list of token-weight pairs which are then used in a query against a sparse vector or rank features field. */ + text_expansion?: Partial<Record<<<Field>>, <<QueryDslTextExpansionQuery>>>> + pass:[/**] @property weighted_tokens Supports returning text_expansion query results by sending in precomputed tokens with the query. */ + weighted_tokens?: Partial<Record<<<Field>>, <<QueryDslWeightedTokensQuery>>>> + pass:[/**] @property wildcard Returns documents that contain terms matching a wildcard pattern. */ + wildcard?: Partial<Record<<<Field>>, <<QueryDslWildcardQuery>> | string>> + pass:[/**] @property wrapper A query that accepts any other query as base64 encoded string. */ + wrapper?: <<QueryDslWrapperQuery>> + type?: <<QueryDslTypeQuery>> +} +---- + + + +[discrete] +[[QueryDslQueryStringQuery]] +=== QueryDslQueryStringQuery + +[source,ts,subs=+macros] +---- +interface QueryDslQueryStringQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property allow_leading_wildcard If `true`, the wildcard characters `*` and `?` are allowed as the first character of the query string. */ + allow_leading_wildcard?: boolean + pass:[/**] @property analyzer Analyzer used to convert text in the query string into tokens. */ + analyzer?: string + pass:[/**] @property analyze_wildcard If `true`, the query attempts to analyze wildcard terms in the query string. */ + analyze_wildcard?: boolean + pass:[/**] @property auto_generate_synonyms_phrase_query If `true`, match phrase queries are automatically created for multi-term synonyms. */ + auto_generate_synonyms_phrase_query?: boolean + pass:[/**] @property default_field Default field to search if no field is provided in the query string. Supports wildcards (`*`). Defaults to the `index.query.default_field` index setting, which has a default value of `*`. */ + default_field?: <<Field>> + pass:[/**] @property default_operator Default boolean logic used to interpret text in the query string if no operators are specified. */ + default_operator?: <<QueryDslOperator>> + pass:[/**] @property enable_position_increments If `true`, enable position increments in queries constructed from a `query_string` search. */ + enable_position_increments?: boolean + escape?: boolean + pass:[/**] @property fields Array of fields to search. Supports wildcards (`*`). */ + fields?: <<Field>>[] + pass:[/**] @property fuzziness Maximum edit distance allowed for fuzzy matching. */ + fuzziness?: <<Fuzziness>> + pass:[/**] @property fuzzy_max_expansions Maximum number of terms to which the query expands for fuzzy matching. */ + fuzzy_max_expansions?: <<integer>> + pass:[/**] @property fuzzy_prefix_length Number of beginning characters left unchanged for fuzzy matching. */ + fuzzy_prefix_length?: <<integer>> + pass:[/**] @property fuzzy_rewrite Method used to rewrite the query. */ + fuzzy_rewrite?: <<MultiTermQueryRewrite>> + pass:[/**] @property fuzzy_transpositions If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). */ + fuzzy_transpositions?: boolean + pass:[/**] @property lenient If `true`, format-based errors, such as providing a text value for a numeric field, are ignored. */ + lenient?: boolean + pass:[/**] @property max_determinized_states Maximum number of automaton states required for the query. */ + max_determinized_states?: <<integer>> + pass:[/**] @property minimum_should_match Minimum number of clauses that must match for a document to be returned. */ + minimum_should_match?: <<MinimumShouldMatch>> + pass:[/**] @property phrase_slop Maximum number of positions allowed between matching tokens for phrases. */ + phrase_slop?: <<double>> + pass:[/**] @property query Query string you wish to parse and use for search. */ + query: string + pass:[/**] @property quote_analyzer Analyzer used to convert quoted text in the query string into tokens. For quoted text, this parameter overrides the analyzer specified in the `analyzer` parameter. */ + quote_analyzer?: string + pass:[/**] @property quote_field_suffix Suffix appended to quoted text in the query string. You can use this suffix to use a different analysis method for exact matches. */ + quote_field_suffix?: string + pass:[/**] @property rewrite Method used to rewrite the query. */ + rewrite?: <<MultiTermQueryRewrite>> + pass:[/**] @property tie_breaker How to combine the queries generated from the individual search terms in the resulting `dis_max` query. */ + tie_breaker?: <<double>> + pass:[/**] @property time_zone Coordinated Universal Time (UTC) offset or IANA time zone used to convert date values in the query string to UTC. */ + time_zone?: <<TimeZone>> + pass:[/**] @property type Determines how the query matches and scores documents. */ + type?: <<QueryDslTextQueryType>> +} +---- + + + +[discrete] +[[QueryDslRandomScoreFunction]] +=== QueryDslRandomScoreFunction + +[source,ts,subs=+macros] +---- +interface QueryDslRandomScoreFunction { + field?: <<Field>> + seed?: <<long>> | string +} +---- + + + +[discrete] +[[QueryDslRangeQuery]] +=== QueryDslRangeQuery + +[source,ts,subs=+macros] +---- +type QueryDslRangeQuery = <<QueryDslUntypedRangeQuery>> | <<QueryDslDateRangeQuery>> | <<QueryDslNumberRangeQuery>> | <<QueryDslTermRangeQuery>> +---- + + + +[discrete] +[[QueryDslRangeQueryBase]] +=== QueryDslRangeQueryBase + +[source,ts,subs=+macros] +---- +interface QueryDslRangeQueryBase<T = unknown> extends <<QueryDslQueryBase>> { + pass:[/**] @property relation Indicates how the range query matches values for `range` fields. */ + relation?: <<QueryDslRangeRelation>> + pass:[/**] @property gt Greater than. */ + gt?: T + pass:[/**] @property gte Greater than or equal to. */ + gte?: T + pass:[/**] @property lt Less than. */ + lt?: T + pass:[/**] @property lte Less than or equal to. */ + lte?: T + from?: T | null + to?: T | null +} +---- + + + +[discrete] +[[QueryDslRangeRelation]] +=== QueryDslRangeRelation + +[source,ts,subs=+macros] +---- +type QueryDslRangeRelation = 'within' | 'contains' | 'intersects' +---- + + + +[discrete] +[[QueryDslRankFeatureFunction]] +=== QueryDslRankFeatureFunction + +[source,ts,subs=+macros] +---- +interface QueryDslRankFeatureFunction {} +---- + + + +[discrete] +[[QueryDslRankFeatureFunctionLinear]] +=== QueryDslRankFeatureFunctionLinear + +[source,ts,subs=+macros] +---- +interface QueryDslRankFeatureFunctionLinear {} +---- + + + +[discrete] +[[QueryDslRankFeatureFunctionLogarithm]] +=== QueryDslRankFeatureFunctionLogarithm + +[source,ts,subs=+macros] +---- +interface QueryDslRankFeatureFunctionLogarithm { + pass:[/**] @property scaling_factor Configurable scaling factor. */ + scaling_factor: <<float>> +} +---- + + + +[discrete] +[[QueryDslRankFeatureFunctionSaturation]] +=== QueryDslRankFeatureFunctionSaturation + +[source,ts,subs=+macros] +---- +interface QueryDslRankFeatureFunctionSaturation { + pass:[/**] @property pivot Configurable pivot value so that the result will be less than 0.5. */ + pivot?: <<float>> +} +---- + + + +[discrete] +[[QueryDslRankFeatureFunctionSigmoid]] +=== QueryDslRankFeatureFunctionSigmoid + +[source,ts,subs=+macros] +---- +interface QueryDslRankFeatureFunctionSigmoid { + pass:[/**] @property pivot Configurable pivot value so that the result will be less than 0.5. */ + pivot: <<float>> + pass:[/**] @property exponent Configurable Exponent. */ + exponent: <<float>> +} +---- + + + +[discrete] +[[QueryDslRankFeatureQuery]] +=== QueryDslRankFeatureQuery + +[source,ts,subs=+macros] +---- +interface QueryDslRankFeatureQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property field `rank_feature` or `rank_features` field used to boost relevance scores. */ + field: <<Field>> + pass:[/**] @property saturation Saturation function used to boost relevance scores based on the value of the rank feature `field`. */ + saturation?: <<QueryDslRankFeatureFunctionSaturation>> + pass:[/**] @property log Logarithmic function used to boost relevance scores based on the value of the rank feature `field`. */ + log?: <<QueryDslRankFeatureFunctionLogarithm>> + pass:[/**] @property linear Linear function used to boost relevance scores based on the value of the rank feature `field`. */ + linear?: <<QueryDslRankFeatureFunctionLinear>> + pass:[/**] @property sigmoid Sigmoid function used to boost relevance scores based on the value of the rank feature `field`. */ + sigmoid?: <<QueryDslRankFeatureFunctionSigmoid>> +} +---- + + + +[discrete] +[[QueryDslRegexpQuery]] +=== QueryDslRegexpQuery + +[source,ts,subs=+macros] +---- +interface QueryDslRegexpQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property case_insensitive Allows case insensitive matching of the regular expression value with the indexed field values when set to `true`. When `false`, case sensitivity of matching depends on the underlying field’s mapping. */ + case_insensitive?: boolean + pass:[/**] @property flags Enables optional operators for the regular expression. */ + flags?: string + pass:[/**] @property max_determinized_states Maximum number of automaton states required for the query. */ + max_determinized_states?: <<integer>> + pass:[/**] @property rewrite Method used to rewrite the query. */ + rewrite?: <<MultiTermQueryRewrite>> + pass:[/**] @property value Regular expression for terms you wish to find in the provided field. */ + value: string +} +---- + + + +[discrete] +[[QueryDslRuleQuery]] +=== QueryDslRuleQuery + +[source,ts,subs=+macros] +---- +interface QueryDslRuleQuery extends <<QueryDslQueryBase>> { + organic: <<QueryDslQueryContainer>> + ruleset_ids: <<Id>>[] + match_criteria: any +} +---- + + + +[discrete] +[[QueryDslScriptQuery]] +=== QueryDslScriptQuery + +[source,ts,subs=+macros] +---- +interface QueryDslScriptQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property script Contains a script to run as a query. This script must return a boolean value, `true` or `false`. */ + script: <<Script>> | string +} +---- + + + +[discrete] +[[QueryDslScriptScoreFunction]] +=== QueryDslScriptScoreFunction + +[source,ts,subs=+macros] +---- +interface QueryDslScriptScoreFunction { + pass:[/**] @property script A script that computes a score. */ + script: <<Script>> | string +} +---- + + + +[discrete] +[[QueryDslScriptScoreQuery]] +=== QueryDslScriptScoreQuery + +[source,ts,subs=+macros] +---- +interface QueryDslScriptScoreQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property min_score Documents with a score lower than this floating point number are excluded from the search results. */ + min_score?: <<float>> + pass:[/**] @property query Query used to return documents. */ + query: <<QueryDslQueryContainer>> + pass:[/**] @property script <<Script>> used to compute the score of documents returned by the query. Important: final relevance scores from the `script_score` query cannot be negative. */ + script: <<Script>> | string +} +---- + + + +[discrete] +[[QueryDslSemanticQuery]] +=== QueryDslSemanticQuery + +[source,ts,subs=+macros] +---- +interface QueryDslSemanticQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property field The field to query, which must be a semantic_text field type */ + field: string + pass:[/**] @property query The query text */ + query: string +} +---- + + + +[discrete] +[[QueryDslShapeFieldQuery]] +=== QueryDslShapeFieldQuery + +[source,ts,subs=+macros] +---- +interface QueryDslShapeFieldQuery { + pass:[/**] @property indexed_shape Queries using a pre-indexed shape. */ + indexed_shape?: <<QueryDslFieldLookup>> + pass:[/**] @property relation Spatial relation between the query shape and the document shape. */ + relation?: <<GeoShapeRelation>> + pass:[/**] @property shape Queries using an inline shape definition in GeoJSON or Well Known Text (WKT) format. */ + shape?: <<GeoShape>> +} +---- + + + +[discrete] +[[QueryDslShapeQuery]] +=== QueryDslShapeQuery + +[source,ts,subs=+macros] +---- +interface QueryDslShapeQueryKeys extends <<QueryDslQueryBase>> { + ignore_unmapped?: boolean +} +type QueryDslShapeQuery = QueryDslShapeQueryKeys + & { [property: string]: <<QueryDslShapeFieldQuery>> | boolean | <<float>> | string } +---- + + + +[discrete] +[[QueryDslSimpleQueryStringFlag]] +=== QueryDslSimpleQueryStringFlag + +[source,ts,subs=+macros] +---- +type QueryDslSimpleQueryStringFlag = 'NONE' | 'AND' | 'NOT' | 'OR' | 'PREFIX' | 'PHRASE' | 'PRECEDENCE' | 'ESCAPE' | 'WHITESPACE' | 'FUZZY' | 'NEAR' | 'SLOP' | 'ALL' +---- + + + +[discrete] +[[QueryDslSimpleQueryStringFlags]] +=== QueryDslSimpleQueryStringFlags + +[source,ts,subs=+macros] +---- +type QueryDslSimpleQueryStringFlags = <<SpecUtilsPipeSeparatedFlags>><<<QueryDslSimpleQueryStringFlag>>> +---- + + + +[discrete] +[[QueryDslSimpleQueryStringQuery]] +=== QueryDslSimpleQueryStringQuery + +[source,ts,subs=+macros] +---- +interface QueryDslSimpleQueryStringQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property analyzer Analyzer used to convert text in the query string into tokens. */ + analyzer?: string + pass:[/**] @property analyze_wildcard If `true`, the query attempts to analyze wildcard terms in the query string. */ + analyze_wildcard?: boolean + pass:[/**] @property auto_generate_synonyms_phrase_query If `true`, the parser creates a match_phrase query for each multi-position token. */ + auto_generate_synonyms_phrase_query?: boolean + pass:[/**] @property default_operator Default boolean logic used to interpret text in the query string if no operators are specified. */ + default_operator?: <<QueryDslOperator>> + pass:[/**] @property fields Array of fields you wish to search. Accepts wildcard expressions. You also can boost relevance scores for matches to particular fields using a caret (`^`) notation. Defaults to the `index.query.default_field index` setting, which has a default value of `*`. */ + fields?: <<Field>>[] + pass:[/**] @property flags List of enabled operators for the simple query string syntax. */ + flags?: <<QueryDslSimpleQueryStringFlags>> + pass:[/**] @property fuzzy_max_expansions Maximum number of terms to which the query expands for fuzzy matching. */ + fuzzy_max_expansions?: <<integer>> + pass:[/**] @property fuzzy_prefix_length Number of beginning characters left unchanged for fuzzy matching. */ + fuzzy_prefix_length?: <<integer>> + pass:[/**] @property fuzzy_transpositions If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). */ + fuzzy_transpositions?: boolean + pass:[/**] @property lenient If `true`, format-based errors, such as providing a text value for a numeric field, are ignored. */ + lenient?: boolean + pass:[/**] @property minimum_should_match Minimum number of clauses that must match for a document to be returned. */ + minimum_should_match?: <<MinimumShouldMatch>> + pass:[/**] @property query Query string in the simple query string syntax you wish to parse and use for search. */ + query: string + pass:[/**] @property quote_field_suffix Suffix appended to quoted text in the query string. */ + quote_field_suffix?: string +} +---- + + + +[discrete] +[[QueryDslSpanContainingQuery]] +=== QueryDslSpanContainingQuery + +[source,ts,subs=+macros] +---- +interface QueryDslSpanContainingQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property big Can be any span query. Matching spans from `big` that contain matches from `little` are returned. */ + big: <<QueryDslSpanQuery>> + pass:[/**] @property little Can be any span query. Matching spans from `big` that contain matches from `little` are returned. */ + little: <<QueryDslSpanQuery>> +} +---- + + + +[discrete] +[[QueryDslSpanFieldMaskingQuery]] +=== QueryDslSpanFieldMaskingQuery + +[source,ts,subs=+macros] +---- +interface QueryDslSpanFieldMaskingQuery extends <<QueryDslQueryBase>> { + field: <<Field>> + query: <<QueryDslSpanQuery>> +} +---- + + + +[discrete] +[[QueryDslSpanFirstQuery]] +=== QueryDslSpanFirstQuery + +[source,ts,subs=+macros] +---- +interface QueryDslSpanFirstQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property end Controls the maximum end position permitted in a match. */ + end: <<integer>> + pass:[/**] @property match Can be any other span type query. */ + match: <<QueryDslSpanQuery>> +} +---- + + + +[discrete] +[[QueryDslSpanGapQuery]] +=== QueryDslSpanGapQuery + +[source,ts,subs=+macros] +---- +type QueryDslSpanGapQuery = Partial<Record<<<Field>>, <<integer>>>> +---- + + + +[discrete] +[[QueryDslSpanMultiTermQuery]] +=== QueryDslSpanMultiTermQuery + +[source,ts,subs=+macros] +---- +interface QueryDslSpanMultiTermQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property match Should be a multi term query (one of `wildcard`, `fuzzy`, `prefix`, `range`, or `regexp` query). */ + match: <<QueryDslQueryContainer>> +} +---- + + + +[discrete] +[[QueryDslSpanNearQuery]] +=== QueryDslSpanNearQuery + +[source,ts,subs=+macros] +---- +interface QueryDslSpanNearQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property clauses Array of one or more other span type queries. */ + clauses: <<QueryDslSpanQuery>>[] + pass:[/**] @property in_order Controls whether matches are required to be in-order. */ + in_order?: boolean + pass:[/**] @property slop Controls the maximum number of intervening unmatched positions permitted. */ + slop?: <<integer>> +} +---- + + + +[discrete] +[[QueryDslSpanNotQuery]] +=== QueryDslSpanNotQuery + +[source,ts,subs=+macros] +---- +interface QueryDslSpanNotQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property dist The number of tokens from within the include span that can’t have overlap with the exclude span. Equivalent to setting both `pre` and `post`. */ + dist?: <<integer>> + pass:[/**] @property exclude Span query whose matches must not overlap those returned. */ + exclude: <<QueryDslSpanQuery>> + pass:[/**] @property include Span query whose matches are filtered. */ + include: <<QueryDslSpanQuery>> + pass:[/**] @property post The number of tokens after the include span that can’t have overlap with the exclude span. */ + post?: <<integer>> + pass:[/**] @property pre The number of tokens before the include span that can’t have overlap with the exclude span. */ + pre?: <<integer>> +} +---- + + + +[discrete] +[[QueryDslSpanOrQuery]] +=== QueryDslSpanOrQuery + +[source,ts,subs=+macros] +---- +interface QueryDslSpanOrQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property clauses Array of one or more other span type queries. */ + clauses: <<QueryDslSpanQuery>>[] +} +---- + + + +[discrete] +[[QueryDslSpanQuery]] +=== QueryDslSpanQuery + +[source,ts,subs=+macros] +---- +interface QueryDslSpanQuery { + pass:[/**] @property span_containing Accepts a list of span queries, but only returns those spans which also match a second span query. */ + span_containing?: <<QueryDslSpanContainingQuery>> + pass:[/**] @property span_field_masking Allows queries like `span_near` or `span_or` across different fields. */ + span_field_masking?: <<QueryDslSpanFieldMaskingQuery>> + pass:[/**] @property span_first Accepts another span query whose matches must appear within the first N positions of the field. */ + span_first?: <<QueryDslSpanFirstQuery>> + span_gap?: <<QueryDslSpanGapQuery>> + pass:[/**] @property span_multi Wraps a `term`, `range`, `prefix`, `wildcard`, `regexp`, or `fuzzy` query. */ + span_multi?: <<QueryDslSpanMultiTermQuery>> + pass:[/**] @property span_near Accepts multiple span queries whose matches must be within the specified distance of each other, and possibly in the same order. */ + span_near?: <<QueryDslSpanNearQuery>> + pass:[/**] @property span_not Wraps another span query, and excludes any documents which match that query. */ + span_not?: <<QueryDslSpanNotQuery>> + pass:[/**] @property span_or Combines multiple span queries and returns documents which match any of the specified queries. */ + span_or?: <<QueryDslSpanOrQuery>> + pass:[/**] @property span_term The equivalent of the `term` query but for use with other span queries. */ + span_term?: Partial<Record<<<Field>>, <<QueryDslSpanTermQuery>> | string>> + pass:[/**] @property span_within The result from a single span query is returned as <<long>> is its span falls within the spans returned by a list of other span queries. */ + span_within?: <<QueryDslSpanWithinQuery>> +} +---- + + + +[discrete] +[[QueryDslSpanTermQuery]] +=== QueryDslSpanTermQuery + +[source,ts,subs=+macros] +---- +interface QueryDslSpanTermQuery extends <<QueryDslQueryBase>> { + value: string +} +---- + + + +[discrete] +[[QueryDslSpanWithinQuery]] +=== QueryDslSpanWithinQuery + +[source,ts,subs=+macros] +---- +interface QueryDslSpanWithinQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property big Can be any span query. Matching spans from `little` that are enclosed within `big` are returned. */ + big: <<QueryDslSpanQuery>> + pass:[/**] @property little Can be any span query. Matching spans from `little` that are enclosed within `big` are returned. */ + little: <<QueryDslSpanQuery>> +} +---- + + + +[discrete] +[[QueryDslSparseVectorQuery]] +=== QueryDslSparseVectorQuery + +[source,ts,subs=+macros] +---- +interface QueryDslSparseVectorQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property field The name of the field that contains the token-weight pairs to be searched against. This field must be a mapped sparse_vector field. */ + field: <<Field>> + pass:[/**] @property query_vector Dictionary of precomputed sparse vectors and their associated weights. Only one of inference_id or query_vector may be supplied in a request. */ + query_vector?: Record<string, <<float>>> + pass:[/**] @property inference_id The inference ID to use to convert the query text into token-weight pairs. It must be the same inference ID that was used to create the tokens from the input text. Only one of inference_id and query_vector is allowed. If inference_id is specified, query must also be specified. Only one of inference_id or query_vector may be supplied in a request. */ + inference_id?: <<Id>> + pass:[/**] @property query The query text you want to use for search. If inference_id is specified, query must also be specified. */ + query?: string + pass:[/**] @property prune Whether to perform pruning, omitting the non-significant tokens from the query to improve query performance. If prune is true but the pruning_config is not specified, pruning will occur but default values will be used. Default: false */ + prune?: boolean + pass:[/**] @property pruning_config Optional pruning configuration. If enabled, this will omit non-significant tokens from the query in order to improve query performance. This is only used if prune is set to true. If prune is set to true but pruning_config is not specified, default values will be used. */ + pruning_config?: <<QueryDslTokenPruningConfig>> +} +---- + + + +[discrete] +[[QueryDslTermQuery]] +=== QueryDslTermQuery + +[source,ts,subs=+macros] +---- +interface QueryDslTermQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property value Term you wish to find in the provided field. */ + value: <<FieldValue>> + pass:[/**] @property case_insensitive Allows ASCII case insensitive matching of the value with the indexed field values when set to `true`. When `false`, the case sensitivity of matching depends on the underlying field’s mapping. */ + case_insensitive?: boolean +} +---- + + + +[discrete] +[[QueryDslTermRangeQuery]] +=== QueryDslTermRangeQuery + +[source,ts,subs=+macros] +---- +interface QueryDslTermRangeQuery extends <<QueryDslRangeQueryBase>><string> {} +---- + + + +[discrete] +[[QueryDslTermsLookup]] +=== QueryDslTermsLookup + +[source,ts,subs=+macros] +---- +interface QueryDslTermsLookup { + index: <<IndexName>> + id: <<Id>> + path: <<Field>> + routing?: <<Routing>> +} +---- + + + +[discrete] +[[QueryDslTermsQuery]] +=== QueryDslTermsQuery + +[source,ts,subs=+macros] +---- +interface QueryDslTermsQueryKeys extends <<QueryDslQueryBase>> {} +type QueryDslTermsQuery = QueryDslTermsQueryKeys + & { [property: string]: <<QueryDslTermsQueryField>> | <<float>> | string } +---- + + + +[discrete] +[[QueryDslTermsQueryField]] +=== QueryDslTermsQueryField + +[source,ts,subs=+macros] +---- +type QueryDslTermsQueryField = <<FieldValue>>[] | <<QueryDslTermsLookup>> +---- + + + +[discrete] +[[QueryDslTermsSetQuery]] +=== QueryDslTermsSetQuery + +[source,ts,subs=+macros] +---- +interface QueryDslTermsSetQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property minimum_should_match Specification describing number of matching terms required to return a document. */ + minimum_should_match?: <<MinimumShouldMatch>> + pass:[/**] @property minimum_should_match_field Numeric field containing the number of matching terms required to return a document. */ + minimum_should_match_field?: <<Field>> + pass:[/**] @property minimum_should_match_script Custom script containing the number of matching terms required to return a document. */ + minimum_should_match_script?: <<Script>> | string + pass:[/**] @property terms Array of terms you wish to find in the provided field. */ + terms: <<FieldValue>>[] +} +---- + + + +[discrete] +[[QueryDslTextExpansionQuery]] +=== QueryDslTextExpansionQuery + +[source,ts,subs=+macros] +---- +interface QueryDslTextExpansionQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property model_id The text expansion NLP model to use */ + model_id: string + pass:[/**] @property model_text The query text */ + model_text: string + pass:[/**] @property pruning_config Token pruning configurations */ + pruning_config?: <<QueryDslTokenPruningConfig>> +} +---- + + + +[discrete] +[[QueryDslTextQueryType]] +=== QueryDslTextQueryType + +[source,ts,subs=+macros] +---- +type QueryDslTextQueryType = 'best_fields' | 'most_fields' | 'cross_fields' | 'phrase' | 'phrase_prefix' | 'bool_prefix' +---- + + + +[discrete] +[[QueryDslTokenPruningConfig]] +=== QueryDslTokenPruningConfig + +[source,ts,subs=+macros] +---- +interface QueryDslTokenPruningConfig { + pass:[/**] @property tokens_freq_ratio_threshold Tokens whose frequency is more than this threshold times the average frequency of all tokens in the specified field are considered outliers and pruned. */ + tokens_freq_ratio_threshold?: <<integer>> + pass:[/**] @property tokens_weight_threshold Tokens whose weight is less than this threshold are considered nonsignificant and pruned. */ + tokens_weight_threshold?: <<float>> + pass:[/**] @property only_score_pruned_tokens Whether to only score pruned tokens, vs only scoring kept tokens. */ + only_score_pruned_tokens?: boolean +} +---- + + + +[discrete] +[[QueryDslTypeQuery]] +=== QueryDslTypeQuery + +[source,ts,subs=+macros] +---- +interface QueryDslTypeQuery extends <<QueryDslQueryBase>> { + value: string +} +---- + + + +[discrete] +[[QueryDslUntypedDecayFunction]] +=== QueryDslUntypedDecayFunction + +[source,ts,subs=+macros] +---- +interface QueryDslUntypedDecayFunctionKeys extends <<QueryDslDecayFunctionBase>><any, any> {} +type QueryDslUntypedDecayFunction = QueryDslUntypedDecayFunctionKeys + & { [property: string]: <<QueryDslDecayPlacement>> | <<QueryDslMultiValueMode>> } +---- + + + +[discrete] +[[QueryDslUntypedDistanceFeatureQuery]] +=== QueryDslUntypedDistanceFeatureQuery + +[source,ts,subs=+macros] +---- +interface QueryDslUntypedDistanceFeatureQuery extends <<QueryDslDistanceFeatureQueryBase>><any, any> {} +---- + + + +[discrete] +[[QueryDslUntypedRangeQuery]] +=== QueryDslUntypedRangeQuery + +[source,ts,subs=+macros] +---- +interface QueryDslUntypedRangeQuery extends <<QueryDslRangeQueryBase>><any> { + pass:[/**] @property format Date format used to convert `date` values in the query. */ + format?: <<DateFormat>> + pass:[/**] @property time_zone Coordinated Universal Time (UTC) offset or IANA time zone used to convert `date` values in the query to UTC. */ + time_zone?: <<TimeZone>> +} +---- + + + +[discrete] +[[QueryDslWeightedTokensQuery]] +=== QueryDslWeightedTokensQuery + +[source,ts,subs=+macros] +---- +interface QueryDslWeightedTokensQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property tokens The tokens representing this query */ + tokens: Record<string, <<float>>> + pass:[/**] @property pruning_config Token pruning configurations */ + pruning_config?: <<QueryDslTokenPruningConfig>> +} +---- + + + +[discrete] +[[QueryDslWildcardQuery]] +=== QueryDslWildcardQuery + +[source,ts,subs=+macros] +---- +interface QueryDslWildcardQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property case_insensitive Allows case insensitive matching of the pattern with the indexed field values when set to true. Default is false which means the case sensitivity of matching depends on the underlying field’s mapping. */ + case_insensitive?: boolean + pass:[/**] @property rewrite Method used to rewrite the query. */ + rewrite?: <<MultiTermQueryRewrite>> + pass:[/**] @property value Wildcard pattern for terms you wish to find in the provided field. Required, when wildcard is not set. */ + value?: string + pass:[/**] @property wildcard Wildcard pattern for terms you wish to find in the provided field. Required, when value is not set. */ + wildcard?: string +} +---- + + + +[discrete] +[[QueryDslWrapperQuery]] +=== QueryDslWrapperQuery + +[source,ts,subs=+macros] +---- +interface QueryDslWrapperQuery extends <<QueryDslQueryBase>> { + pass:[/**] @property query A base64 encoded query. The binary data format can be any of JSON, YAML, CBOR or SMILE encodings */ + query: string +} +---- + + + +[discrete] +[[QueryDslZeroTermsQuery]] +=== QueryDslZeroTermsQuery + +[source,ts,subs=+macros] +---- +type QueryDslZeroTermsQuery = 'all' | 'none' +---- + + + + +[discrete] +== Other shared types + +* <<reference-shared-types-global-knn-search-types>> +* <<reference-shared-types-global-search-types>> +* <<reference-shared-types-global-search-mvt-types>> +* <<reference-shared-types-async-search-types>> +* <<reference-shared-types-autoscaling-types>> +* <<reference-shared-types-cat-types>> +* <<reference-shared-types-ccr-types>> +* <<reference-shared-types-cluster-types>> +* <<reference-shared-types-connector-types>> +* <<reference-shared-types-enrich-types>> +* <<reference-shared-types-eql-types>> +* <<reference-shared-types-esql-types>> +* <<reference-shared-types-features-types>> +* <<reference-shared-types-fleet-types>> +* <<reference-shared-types-graph-types>> +* <<reference-shared-types-ilm-types>> +* <<reference-shared-types-indices-types>> +* <<reference-shared-types-indices-forcemerge-types>> +* <<reference-shared-types-inference-types>> +* <<reference-shared-types-ingest-types>> +* <<reference-shared-types-license-types>> +* <<reference-shared-types-logstash-types>> +* <<reference-shared-types-ml-types>> +* <<reference-shared-types-nodes-types>> +* <<reference-shared-types-query-rules-types>> +* <<reference-shared-types-rollup-types>> +* <<reference-shared-types-search-application-types>> +* <<reference-shared-types-searchable-snapshots-types>> +* <<reference-shared-types-security-types>> +* <<reference-shared-types-shutdown-types>> +* <<reference-shared-types-slm-types>> +* <<reference-shared-types-snapshot-types>> +* <<reference-shared-types-synonyms-types>> +* <<reference-shared-types-tasks-types>> +* <<reference-shared-types-transform-types>> +* <<reference-shared-types-watcher-types>> + + +include::global-knn-search-types.asciidoc[] +include::global-search-types.asciidoc[] +include::global-search-mvt-types.asciidoc[] +include::async-search-types.asciidoc[] +include::autoscaling-types.asciidoc[] +include::cat-types.asciidoc[] +include::ccr-types.asciidoc[] +include::cluster-types.asciidoc[] +include::connector-types.asciidoc[] +include::enrich-types.asciidoc[] +include::eql-types.asciidoc[] +include::esql-types.asciidoc[] +include::features-types.asciidoc[] +include::fleet-types.asciidoc[] +include::graph-types.asciidoc[] +include::ilm-types.asciidoc[] +include::indices-types.asciidoc[] +include::indices-forcemerge-types.asciidoc[] +include::inference-types.asciidoc[] +include::ingest-types.asciidoc[] +include::license-types.asciidoc[] +include::logstash-types.asciidoc[] +include::ml-types.asciidoc[] +include::nodes-types.asciidoc[] +include::query-rules-types.asciidoc[] +include::rollup-types.asciidoc[] +include::search-application-types.asciidoc[] +include::searchable-snapshots-types.asciidoc[] +include::security-types.asciidoc[] +include::shutdown-types.asciidoc[] +include::slm-types.asciidoc[] +include::snapshot-types.asciidoc[] +include::synonyms-types.asciidoc[] +include::tasks-types.asciidoc[] +include::transform-types.asciidoc[] +include::watcher-types.asciidoc[] +include::global-bulk.asciidoc[] +include::global-clear-scroll.asciidoc[] +include::global-close-point-in-time.asciidoc[] +include::global-count.asciidoc[] +include::global-create.asciidoc[] +include::global-delete.asciidoc[] +include::global-delete-by-query.asciidoc[] +include::global-delete-by-query-rethrottle.asciidoc[] +include::global-delete-script.asciidoc[] +include::global-exists.asciidoc[] +include::global-exists-source.asciidoc[] +include::global-explain.asciidoc[] +include::global-field-caps.asciidoc[] +include::global-get.asciidoc[] +include::global-get-script.asciidoc[] +include::global-get-script-context.asciidoc[] +include::global-get-script-languages.asciidoc[] +include::global-get-source.asciidoc[] +include::global-health-report.asciidoc[] +include::global-index.asciidoc[] +include::global-info.asciidoc[] +include::global-knn-search.asciidoc[] +include::global-mget.asciidoc[] +include::global-msearch.asciidoc[] +include::global-msearch-template.asciidoc[] +include::global-mtermvectors.asciidoc[] +include::global-open-point-in-time.asciidoc[] +include::global-ping.asciidoc[] +include::global-put-script.asciidoc[] +include::global-rank-eval.asciidoc[] +include::global-reindex.asciidoc[] +include::global-reindex-rethrottle.asciidoc[] +include::global-render-search-template.asciidoc[] +include::global-scripts-painless-execute.asciidoc[] +include::global-scroll.asciidoc[] +include::global-search.asciidoc[] +include::global-search-mvt.asciidoc[] +include::global-search-shards.asciidoc[] +include::global-search-template.asciidoc[] +include::global-terms-enum.asciidoc[] +include::global-termvectors.asciidoc[] +include::global-update.asciidoc[] +include::global-update-by-query.asciidoc[] +include::global-update-by-query-rethrottle.asciidoc[] diff --git a/docs/reference/shared-types/indices-forcemerge-types.asciidoc b/docs/reference/shared-types/indices-forcemerge-types.asciidoc new file mode 100644 index 000000000..59379c8e3 --- /dev/null +++ b/docs/reference/shared-types/indices-forcemerge-types.asciidoc @@ -0,0 +1,50 @@ +[[reference-shared-types-indices-forcemerge-types]] + +=== `Indices` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[IndicesForceMergeResponseBody]] +=== IndicesForceMergeResponseBody + +[source,ts,subs=+macros] +---- +interface IndicesForcemergeForceMergeResponseBody extends <<ShardsOperationResponseBase>> { + pass:[/**] @property task task contains a task id returned when wait_for_completion=false, you can use the task_id to get the status of the task at _tasks/<task_id> */ + task?: string +} +---- + + diff --git a/docs/reference/shared-types/indices-types.asciidoc b/docs/reference/shared-types/indices-types.asciidoc new file mode 100644 index 000000000..273cd4b05 --- /dev/null +++ b/docs/reference/shared-types/indices-types.asciidoc @@ -0,0 +1,1259 @@ +[[reference-shared-types-indices-types]] + +=== `Indices` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[IndicesAlias]] +=== IndicesAlias + +[source,ts,subs=+macros] +---- +interface IndicesAlias { + pass:[/**] @property filter Query used to limit documents the alias can access. */ + filter?: <<QueryDslQueryContainer>> + pass:[/**] @property index_routing Value used to route indexing operations to a specific shard. If specified, this overwrites the `routing` value for indexing operations. */ + index_routing?: <<Routing>> + pass:[/**] @property is_hidden If `true`, the alias is hidden. All indices for the alias must have the same `is_hidden` value. */ + is_hidden?: boolean + pass:[/**] @property is_write_index If `true`, the index is the write index for the alias. */ + is_write_index?: boolean + pass:[/**] @property routing Value used to route indexing and search operations to a specific shard. */ + routing?: <<Routing>> + pass:[/**] @property search_routing Value used to route search operations to a specific shard. If specified, this overwrites the `routing` value for search operations. */ + search_routing?: <<Routing>> +} +---- + + +[discrete] +[[IndicesAliasDefinition]] +=== IndicesAliasDefinition + +[source,ts,subs=+macros] +---- +interface IndicesAliasDefinition { + pass:[/**] @property filter Query used to limit documents the alias can access. */ + filter?: <<QueryDslQueryContainer>> + pass:[/**] @property index_routing Value used to route indexing operations to a specific shard. If specified, this overwrites the `routing` value for indexing operations. */ + index_routing?: string + pass:[/**] @property is_write_index If `true`, the index is the write index for the alias. */ + is_write_index?: boolean + pass:[/**] @property routing Value used to route indexing and search operations to a specific shard. */ + routing?: string + pass:[/**] @property search_routing Value used to route search operations to a specific shard. If specified, this overwrites the `routing` value for search operations. */ + search_routing?: string + pass:[/**] @property is_hidden If `true`, the alias is hidden. All indices for the alias must have the same `is_hidden` value. */ + is_hidden?: boolean +} +---- + + +[discrete] +[[IndicesCacheQueries]] +=== IndicesCacheQueries + +[source,ts,subs=+macros] +---- +interface IndicesCacheQueries { + enabled: boolean +} +---- + + +[discrete] +[[IndicesDataStream]] +=== IndicesDataStream + +[source,ts,subs=+macros] +---- +interface IndicesDataStream { + pass:[/**] @property _meta Custom metadata for the stream, copied from the `_meta` object of the stream’s matching index template. If empty, the response omits this property. */ + _meta?: <<Metadata>> + pass:[/**] @property allow_custom_routing If `true`, the data stream allows custom routing on write request. */ + allow_custom_routing?: boolean + pass:[/**] @property failure_store Information about failure store backing indices */ + failure_store?: <<IndicesFailureStore>> + pass:[/**] @property generation Current generation for the data stream. This number acts as a cumulative count of the stream’s rollovers, starting at 1. */ + generation: <<integer>> + pass:[/**] @property hidden If `true`, the data stream is hidden. */ + hidden: boolean + pass:[/**] @property ilm_policy <<Name>> of the current ILM lifecycle policy in the stream’s matching index template. This lifecycle policy is set in the `index.lifecycle.name` setting. If the template does not include a lifecycle policy, this property is not included in the response. NOTE: A data stream’s backing indices may be assigned different lifecycle policies. To retrieve the lifecycle policy for individual backing indices, use the get index settings API. */ + ilm_policy?: <<Name>> + pass:[/**] @property next_generation_managed_by <<Name>> of the lifecycle system that'll manage the next generation of the data stream. */ + next_generation_managed_by: <<IndicesManagedBy>> + pass:[/**] @property prefer_ilm Indicates if ILM should take precedence over DSL in case both are configured to managed this data stream. */ + prefer_ilm: boolean + pass:[/**] @property indices Array of objects containing information about the data stream’s backing indices. The last item in this array contains information about the stream’s current write index. */ + indices: <<IndicesDataStreamIndex>>[] + pass:[/**] @property lifecycle Contains the configuration for the data stream lifecycle of this data stream. */ + lifecycle?: <<IndicesDataStreamLifecycleWithRollover>> + pass:[/**] @property name <<Name>> of the data stream. */ + name: <<DataStreamName>> + pass:[/**] @property replicated If `true`, the data stream is created and managed by cross-cluster replication and the local cluster can not write into this data stream or change its mappings. */ + replicated?: boolean + pass:[/**] @property rollover_on_write If `true`, the next write to this data stream will trigger a rollover first and the document will be indexed in the new backing index. If the rollover fails the indexing request will fail too. */ + rollover_on_write: boolean + pass:[/**] @property status Health status of the data stream. This health status is based on the state of the primary and replica shards of the stream’s backing indices. */ + status: <<HealthStatus>> + pass:[/**] @property system If `true`, the data stream is created and managed by an Elastic stack component and cannot be modified through normal user interaction. */ + system?: boolean + pass:[/**] @property template <<Name>> of the index template used to create the data stream’s backing indices. The template’s index pattern must match the name of this data stream. */ + template: <<Name>> + pass:[/**] @property timestamp_field Information about the `@timestamp` field in the data stream. */ + timestamp_field: <<IndicesDataStreamTimestampField>> +} +---- + + +[discrete] +[[IndicesDataStreamIndex]] +=== IndicesDataStreamIndex + +[source,ts,subs=+macros] +---- +interface IndicesDataStreamIndex { + pass:[/**] @property index_name <<Name>> of the backing index. */ + index_name: <<IndexName>> + pass:[/**] @property index_uuid Universally unique identifier (UUID) for the index. */ + index_uuid: <<Uuid>> + pass:[/**] @property ilm_policy <<Name>> of the current ILM lifecycle policy configured for this backing index. */ + ilm_policy?: <<Name>> + pass:[/**] @property managed_by <<Name>> of the lifecycle system that's currently managing this backing index. */ + managed_by?: <<IndicesManagedBy>> + pass:[/**] @property prefer_ilm Indicates if ILM should take precedence over DSL in case both are configured to manage this index. */ + prefer_ilm?: boolean +} +---- + + +[discrete] +[[IndicesDataStreamLifecycle]] +=== IndicesDataStreamLifecycle + +[source,ts,subs=+macros] +---- +interface IndicesDataStreamLifecycle { + pass:[/**] @property data_retention If defined, every document added to this data stream will be stored at least for this time frame. Any time after this duration the document could be deleted. When empty, every document in this data stream will be stored indefinitely. */ + data_retention?: <<Duration>> + pass:[/**] @property downsampling The downsampling configuration to execute for the managed backing index after rollover. */ + downsampling?: <<IndicesDataStreamLifecycleDownsampling>> + pass:[/**] @property enabled If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle that's disabled (enabled: `false`) will have no effect on the data stream. */ + enabled?: boolean +} +---- + + +[discrete] +[[IndicesDataStreamLifecycleDownsampling]] +=== IndicesDataStreamLifecycleDownsampling + +[source,ts,subs=+macros] +---- +interface IndicesDataStreamLifecycleDownsampling { + pass:[/**] @property rounds The list of downsampling rounds to execute as part of this downsampling configuration */ + rounds: <<IndicesDownsamplingRound>>[] +} +---- + + +[discrete] +[[IndicesDataStreamLifecycleRolloverConditions]] +=== IndicesDataStreamLifecycleRolloverConditions + +[source,ts,subs=+macros] +---- +interface IndicesDataStreamLifecycleRolloverConditions { + min_age?: <<Duration>> + max_age?: string + min_docs?: <<long>> + max_docs?: <<long>> + min_size?: <<ByteSize>> + max_size?: <<ByteSize>> + min_primary_shard_size?: <<ByteSize>> + max_primary_shard_size?: <<ByteSize>> + min_primary_shard_docs?: <<long>> + max_primary_shard_docs?: <<long>> +} +---- + + +[discrete] +[[IndicesDataStreamLifecycleWithRollover]] +=== IndicesDataStreamLifecycleWithRollover + +[source,ts,subs=+macros] +---- +interface IndicesDataStreamLifecycleWithRollover extends <<IndicesDataStreamLifecycle>> { + pass:[/**] @property rollover The conditions which will trigger the rollover of a backing index as configured by the cluster setting `cluster.lifecycle.default.rollover`. This property is an implementation detail and it will only be retrieved when the query param `include_defaults` is set to true. The contents of this field are subject to change. */ + rollover?: <<IndicesDataStreamLifecycleRolloverConditions>> +} +---- + + +[discrete] +[[IndicesDataStreamTimestampField]] +=== IndicesDataStreamTimestampField + +[source,ts,subs=+macros] +---- +interface IndicesDataStreamTimestampField { + pass:[/**] @property name <<Name>> of the timestamp field for the data stream, which must be `@timestamp`. The `@timestamp` field must be included in every document indexed to the data stream. */ + name: <<Field>> +} +---- + + +[discrete] +[[IndicesDataStreamVisibility]] +=== IndicesDataStreamVisibility + +[source,ts,subs=+macros] +---- +interface IndicesDataStreamVisibility { + hidden?: boolean + allow_custom_routing?: boolean +} +---- + + +[discrete] +[[IndicesDownsampleConfig]] +=== IndicesDownsampleConfig + +[source,ts,subs=+macros] +---- +interface IndicesDownsampleConfig { + pass:[/**] @property fixed_interval The interval at which to aggregate the original time series index. */ + fixed_interval: <<DurationLarge>> +} +---- + + +[discrete] +[[IndicesDownsamplingRound]] +=== IndicesDownsamplingRound + +[source,ts,subs=+macros] +---- +interface IndicesDownsamplingRound { + pass:[/**] @property after The duration since rollover when this downsampling round should execute */ + after: <<Duration>> + pass:[/**] @property config The downsample configuration to execute. */ + config: <<IndicesDownsampleConfig>> +} +---- + + +[discrete] +[[IndicesFailureStore]] +=== IndicesFailureStore + +[source,ts,subs=+macros] +---- +interface IndicesFailureStore { + enabled: boolean + indices: <<IndicesDataStreamIndex>>[] + rollover_on_write: boolean +} +---- + + +[discrete] +[[IndicesFielddataFrequencyFilter]] +=== IndicesFielddataFrequencyFilter + +[source,ts,subs=+macros] +---- +interface IndicesFielddataFrequencyFilter { + max: <<double>> + min: <<double>> + min_segment_size: <<integer>> +} +---- + + +[discrete] +[[IndicesIndexCheckOnStartup]] +=== IndicesIndexCheckOnStartup + +[source,ts,subs=+macros] +---- +type IndicesIndexCheckOnStartup = boolean | 'true' | 'false' | 'checksum' +---- + + +[discrete] +[[IndicesIndexRouting]] +=== IndicesIndexRouting + +[source,ts,subs=+macros] +---- +interface IndicesIndexRouting { + allocation?: <<IndicesIndexRoutingAllocation>> + rebalance?: <<IndicesIndexRoutingRebalance>> +} +---- + + +[discrete] +[[IndicesIndexRoutingAllocation]] +=== IndicesIndexRoutingAllocation + +[source,ts,subs=+macros] +---- +interface IndicesIndexRoutingAllocation { + enable?: <<IndicesIndexRoutingAllocationOptions>> + include?: <<IndicesIndexRoutingAllocationInclude>> + initial_recovery?: <<IndicesIndexRoutingAllocationInitialRecovery>> + disk?: <<IndicesIndexRoutingAllocationDisk>> +} +---- + + +[discrete] +[[IndicesIndexRoutingAllocationDisk]] +=== IndicesIndexRoutingAllocationDisk + +[source,ts,subs=+macros] +---- +interface IndicesIndexRoutingAllocationDisk { + threshold_enabled?: boolean | string +} +---- + + +[discrete] +[[IndicesIndexRoutingAllocationInclude]] +=== IndicesIndexRoutingAllocationInclude + +[source,ts,subs=+macros] +---- +interface IndicesIndexRoutingAllocationInclude { + _tier_preference?: string + _id?: <<Id>> +} +---- + + +[discrete] +[[IndicesIndexRoutingAllocationInitialRecovery]] +=== IndicesIndexRoutingAllocationInitialRecovery + +[source,ts,subs=+macros] +---- +interface IndicesIndexRoutingAllocationInitialRecovery { + _id?: <<Id>> +} +---- + + +[discrete] +[[IndicesIndexRoutingAllocationOptions]] +=== IndicesIndexRoutingAllocationOptions + +[source,ts,subs=+macros] +---- +type IndicesIndexRoutingAllocationOptions = 'all' | 'primaries' | 'new_primaries' | 'none' +---- + + +[discrete] +[[IndicesIndexRoutingRebalance]] +=== IndicesIndexRoutingRebalance + +[source,ts,subs=+macros] +---- +interface IndicesIndexRoutingRebalance { + enable: <<IndicesIndexRoutingRebalanceOptions>> +} +---- + + +[discrete] +[[IndicesIndexRoutingRebalanceOptions]] +=== IndicesIndexRoutingRebalanceOptions + +[source,ts,subs=+macros] +---- +type IndicesIndexRoutingRebalanceOptions = 'all' | 'primaries' | 'replicas' | 'none' +---- + + +[discrete] +[[IndicesIndexSegmentSort]] +=== IndicesIndexSegmentSort + +[source,ts,subs=+macros] +---- +interface IndicesIndexSegmentSort { + field?: <<Fields>> + order?: <<IndicesSegmentSortOrder>> | <<IndicesSegmentSortOrder>>[] + mode?: <<IndicesSegmentSortMode>> | <<IndicesSegmentSortMode>>[] + missing?: <<IndicesSegmentSortMissing>> | <<IndicesSegmentSortMissing>>[] +} +---- + + +[discrete] +[[IndicesIndexSettingBlocks]] +=== IndicesIndexSettingBlocks + +[source,ts,subs=+macros] +---- +interface IndicesIndexSettingBlocks { + read_only?: <<SpecUtilsStringified>><boolean> + read_only_allow_delete?: <<SpecUtilsStringified>><boolean> + read?: <<SpecUtilsStringified>><boolean> + write?: <<SpecUtilsStringified>><boolean> + metadata?: <<SpecUtilsStringified>><boolean> +} +---- + + +[discrete] +[[IndicesIndexSettings]] +=== IndicesIndexSettings + +[source,ts,subs=+macros] +---- +interface IndicesIndexSettingsKeys { + index?: <<IndicesIndexSettings>> + mode?: string + routing_path?: string | string[] + soft_deletes?: <<IndicesSoftDeletes>> + sort?: <<IndicesIndexSegmentSort>> + number_of_shards?: <<integer>> | string + number_of_replicas?: <<integer>> | string + number_of_routing_shards?: <<integer>> + check_on_startup?: <<IndicesIndexCheckOnStartup>> + codec?: string + routing_partition_size?: <<SpecUtilsStringified>><<<integer>>> + load_fixed_bitset_filters_eagerly?: boolean + hidden?: boolean | string + auto_expand_replicas?: string + merge?: <<IndicesMerge>> + search?: <<IndicesSettingsSearch>> + refresh_interval?: <<Duration>> + max_result_window?: <<integer>> + max_inner_result_window?: <<integer>> + max_rescore_window?: <<integer>> + max_docvalue_fields_search?: <<integer>> + max_script_fields?: <<integer>> + max_ngram_diff?: <<integer>> + max_shingle_diff?: <<integer>> + blocks?: <<IndicesIndexSettingBlocks>> + max_refresh_listeners?: <<integer>> + analyze?: <<IndicesSettingsAnalyze>> + highlight?: <<IndicesSettingsHighlight>> + max_terms_count?: <<integer>> + max_regex_length?: <<integer>> + routing?: <<IndicesIndexRouting>> + gc_deletes?: <<Duration>> + default_pipeline?: <<PipelineName>> + final_pipeline?: <<PipelineName>> + lifecycle?: <<IndicesIndexSettingsLifecycle>> + provided_name?: <<Name>> + creation_date?: <<SpecUtilsStringified>><<<EpochTime>><<<UnitMillis>>>> + creation_date_string?: <<DateTime>> + uuid?: <<Uuid>> + version?: <<IndicesIndexVersioning>> + verified_before_close?: boolean | string + format?: string | <<integer>> + max_slices_per_scroll?: <<integer>> + translog?: <<IndicesTranslog>> + query_string?: <<IndicesSettingsQueryString>> + priority?: <<integer>> | string + top_metrics_max_size?: <<integer>> + analysis?: <<IndicesIndexSettingsAnalysis>> + settings?: <<IndicesIndexSettings>> + time_series?: <<IndicesIndexSettingsTimeSeries>> + queries?: <<IndicesQueries>> + similarity?: Record<string, <<IndicesSettingsSimilarity>>> + mapping?: <<IndicesMappingLimitSettings>> + 'indexing.slowlog'?: <<IndicesIndexingSlowlogSettings>> + indexing_pressure?: <<IndicesIndexingPressure>> + store?: <<IndicesStorage>> +} +type IndicesIndexSettings = IndicesIndexSettingsKeys + & { [property: string]: any } +---- + + +[discrete] +[[IndicesIndexSettingsAnalysis]] +=== IndicesIndexSettingsAnalysis + +[source,ts,subs=+macros] +---- +interface IndicesIndexSettingsAnalysis { + analyzer?: Record<string, <<AnalysisAnalyzer>>> + char_filter?: Record<string, <<AnalysisCharFilter>>> + filter?: Record<string, <<AnalysisTokenFilter>>> + normalizer?: Record<string, <<AnalysisNormalizer>>> + tokenizer?: Record<string, <<AnalysisTokenizer>>> +} +---- + + +[discrete] +[[IndicesIndexSettingsLifecycle]] +=== IndicesIndexSettingsLifecycle + +[source,ts,subs=+macros] +---- +interface IndicesIndexSettingsLifecycle { + pass:[/**] @property name The name of the policy to use to manage the index. For information about how Elasticsearch applies policy changes, see Policy updates. */ + name?: <<Name>> + pass:[/**] @property indexing_complete Indicates whether or not the index has been rolled over. Automatically set to true when ILM completes the rollover action. You can explicitly set it to skip rollover. */ + indexing_complete?: <<SpecUtilsStringified>><boolean> + pass:[/**] @property origination_date If specified, this is the timestamp used to calculate the index age for its phase transitions. Use this setting if you create a new index that contains old data and want to use the original creation date to calculate the index age. Specified as a Unix epoch value in milliseconds. */ + origination_date?: <<long>> + pass:[/**] @property parse_origination_date Set to true to parse the origination date from the index name. This origination date is used to calculate the index age for its phase transitions. The index name must match the pattern ^.*-{date_format}-\\d+, where the date_format is yyyy.MM.dd and the trailing digits are optional. An index that was rolled over would normally match the full format, for example logs-2016.10.31-000002). If the index name doesn’t match the pattern, index creation fails. */ + parse_origination_date?: boolean + step?: <<IndicesIndexSettingsLifecycleStep>> + pass:[/**] @property rollover_alias The index alias to update when the index rolls over. Specify when using a policy that contains a rollover action. When the index rolls over, the alias is updated to reflect that the index is no longer the write index. For more information about rolling indices, see Rollover. */ + rollover_alias?: string +} +---- + + +[discrete] +[[IndicesIndexSettingsLifecycleStep]] +=== IndicesIndexSettingsLifecycleStep + +[source,ts,subs=+macros] +---- +interface IndicesIndexSettingsLifecycleStep { + pass:[/**] @property wait_time_threshold Time to wait for the cluster to resolve allocation issues during an ILM shrink action. Must be greater than 1h (1 hour). See Shard allocation for shrink. */ + wait_time_threshold?: <<Duration>> +} +---- + + +[discrete] +[[IndicesIndexSettingsTimeSeries]] +=== IndicesIndexSettingsTimeSeries + +[source,ts,subs=+macros] +---- +interface IndicesIndexSettingsTimeSeries { + end_time?: <<DateTime>> + start_time?: <<DateTime>> +} +---- + + +[discrete] +[[IndicesIndexState]] +=== IndicesIndexState + +[source,ts,subs=+macros] +---- +interface IndicesIndexState { + aliases?: Record<<<IndexName>>, <<IndicesAlias>>> + mappings?: <<MappingTypeMapping>> + settings?: <<IndicesIndexSettings>> + pass:[/**] @property defaults Default settings, included when the request's `include_default` is `true`. */ + defaults?: <<IndicesIndexSettings>> + data_stream?: <<DataStreamName>> + pass:[/**] @property lifecycle Data stream lifecycle applicable if this is a data stream. */ + lifecycle?: <<IndicesDataStreamLifecycle>> +} +---- + + +[discrete] +[[IndicesIndexTemplate]] +=== IndicesIndexTemplate + +[source,ts,subs=+macros] +---- +interface IndicesIndexTemplate { + pass:[/**] @property index_patterns <<Name>> of the index template. */ + index_patterns: <<Names>> + pass:[/**] @property composed_of An ordered list of component template names. Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. */ + composed_of: <<Name>>[] + pass:[/**] @property template Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration. */ + template?: <<IndicesIndexTemplateSummary>> + pass:[/**] @property version Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. */ + version?: <<VersionNumber>> + pass:[/**] @property priority Priority to determine index template precedence when a new data stream or index is created. The index template with the highest priority is chosen. If no priority is specified the template is treated as though it is of priority 0 (lowest priority). This number is not automatically generated by Elasticsearch. */ + priority?: <<long>> + pass:[/**] @property _meta Optional user metadata about the index template. May have any contents. This map is not automatically generated by Elasticsearch. */ + _meta?: <<Metadata>> + allow_auto_create?: boolean + pass:[/**] @property data_stream If this object is included, the template is used to create data streams and their backing indices. Supports an empty object. Data streams require a matching index template with a `data_stream` object. */ + data_stream?: <<IndicesIndexTemplateDataStreamConfiguration>> + pass:[/**] @property deprecated Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. */ + deprecated?: boolean + pass:[/**] @property ignore_missing_component_templates A list of component template names that are allowed to be absent. */ + ignore_missing_component_templates?: <<Names>> +} +---- + + +[discrete] +[[IndicesIndexTemplateDataStreamConfiguration]] +=== IndicesIndexTemplateDataStreamConfiguration + +[source,ts,subs=+macros] +---- +interface IndicesIndexTemplateDataStreamConfiguration { + pass:[/**] @property hidden If true, the data stream is hidden. */ + hidden?: boolean + pass:[/**] @property allow_custom_routing If true, the data stream supports custom routing. */ + allow_custom_routing?: boolean +} +---- + + +[discrete] +[[IndicesIndexTemplateSummary]] +=== IndicesIndexTemplateSummary + +[source,ts,subs=+macros] +---- +interface IndicesIndexTemplateSummary { + pass:[/**] @property aliases Aliases to add. If the index template includes a `data_stream` object, these are data stream aliases. Otherwise, these are index aliases. Data stream aliases ignore the `index_routing`, `routing`, and `search_routing` options. */ + aliases?: Record<<<IndexName>>, <<IndicesAlias>>> + pass:[/**] @property mappings Mapping for fields in the index. If specified, this mapping can include field names, field data types, and mapping parameters. */ + mappings?: <<MappingTypeMapping>> + pass:[/**] @property settings Configuration options for the index. */ + settings?: <<IndicesIndexSettings>> + lifecycle?: <<IndicesDataStreamLifecycleWithRollover>> +} +---- + + +[discrete] +[[IndicesIndexVersioning]] +=== IndicesIndexVersioning + +[source,ts,subs=+macros] +---- +interface IndicesIndexVersioning { + created?: <<VersionString>> + created_string?: string +} +---- + + +[discrete] +[[IndicesIndexingPressure]] +=== IndicesIndexingPressure + +[source,ts,subs=+macros] +---- +interface IndicesIndexingPressure { + memory: <<IndicesIndexingPressureMemory>> +} +---- + + +[discrete] +[[IndicesIndexingPressureMemory]] +=== IndicesIndexingPressureMemory + +[source,ts,subs=+macros] +---- +interface IndicesIndexingPressureMemory { + pass:[/**] @property limit Number of outstanding bytes that may be consumed by indexing requests. When this limit is reached or exceeded, the node will reject new coordinating and primary operations. When replica operations consume 1.5x this limit, the node will reject new replica operations. Defaults to 10% of the heap. */ + limit?: <<integer>> +} +---- + + +[discrete] +[[IndicesIndexingSlowlogSettings]] +=== IndicesIndexingSlowlogSettings + +[source,ts,subs=+macros] +---- +interface IndicesIndexingSlowlogSettings { + level?: string + source?: <<integer>> + reformat?: boolean + threshold?: <<IndicesIndexingSlowlogTresholds>> +} +---- + + +[discrete] +[[IndicesIndexingSlowlogTresholds]] +=== IndicesIndexingSlowlogTresholds + +[source,ts,subs=+macros] +---- +interface IndicesIndexingSlowlogTresholds { + pass:[/**] @property index The indexing slow log, similar in functionality to the search slow log. The log file name ends with `_index_indexing_slowlog.json`. Log and the thresholds are configured in the same way as the search slowlog. */ + index?: <<IndicesSlowlogTresholdLevels>> +} +---- + + +[discrete] +[[IndicesManagedBy]] +=== IndicesManagedBy + +[source,ts,subs=+macros] +---- +type IndicesManagedBy = 'Index Lifecycle Management' | 'Data stream lifecycle' | 'Unmanaged' +---- + + +[discrete] +[[IndicesMappingLimitSettings]] +=== IndicesMappingLimitSettings + +[source,ts,subs=+macros] +---- +interface IndicesMappingLimitSettings { + coerce?: boolean + total_fields?: <<IndicesMappingLimitSettingsTotalFields>> + depth?: <<IndicesMappingLimitSettingsDepth>> + nested_fields?: <<IndicesMappingLimitSettingsNestedFields>> + nested_objects?: <<IndicesMappingLimitSettingsNestedObjects>> + field_name_length?: <<IndicesMappingLimitSettingsFieldNameLength>> + dimension_fields?: <<IndicesMappingLimitSettingsDimensionFields>> + ignore_malformed?: boolean +} +---- + + +[discrete] +[[IndicesMappingLimitSettingsDepth]] +=== IndicesMappingLimitSettingsDepth + +[source,ts,subs=+macros] +---- +interface IndicesMappingLimitSettingsDepth { + pass:[/**] @property limit The maximum depth for a field, which is measured as the number of inner objects. For instance, if all fields are defined at the root object level, then the depth is 1. If there is one object mapping, then the depth is 2, etc. */ + limit?: <<long>> +} +---- + + +[discrete] +[[IndicesMappingLimitSettingsDimensionFields]] +=== IndicesMappingLimitSettingsDimensionFields + +[source,ts,subs=+macros] +---- +interface IndicesMappingLimitSettingsDimensionFields { + pass:[/**] @property limit [preview] This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features. */ + limit?: <<long>> +} +---- + + +[discrete] +[[IndicesMappingLimitSettingsFieldNameLength]] +=== IndicesMappingLimitSettingsFieldNameLength + +[source,ts,subs=+macros] +---- +interface IndicesMappingLimitSettingsFieldNameLength { + pass:[/**] @property limit Setting for the maximum length of a field name. This setting isn’t really something that addresses mappings explosion but might still be useful if you want to limit the field length. It usually shouldn’t be necessary to set this setting. The default is okay unless a user starts to add a huge number of fields with really <<long>> names. Default is `Long.MAX_VALUE` (no limit). */ + limit?: <<long>> +} +---- + + +[discrete] +[[IndicesMappingLimitSettingsNestedFields]] +=== IndicesMappingLimitSettingsNestedFields + +[source,ts,subs=+macros] +---- +interface IndicesMappingLimitSettingsNestedFields { + pass:[/**] @property limit The maximum number of distinct nested mappings in an index. The nested type should only be used in special cases, when arrays of objects need to be queried independently of each other. To safeguard against poorly designed mappings, this setting limits the number of unique nested types per index. */ + limit?: <<long>> +} +---- + + +[discrete] +[[IndicesMappingLimitSettingsNestedObjects]] +=== IndicesMappingLimitSettingsNestedObjects + +[source,ts,subs=+macros] +---- +interface IndicesMappingLimitSettingsNestedObjects { + pass:[/**] @property limit The maximum number of nested JSON objects that a single document can contain across all nested types. This limit helps to prevent out of memory errors when a document contains too many nested objects. */ + limit?: <<long>> +} +---- + + +[discrete] +[[IndicesMappingLimitSettingsTotalFields]] +=== IndicesMappingLimitSettingsTotalFields + +[source,ts,subs=+macros] +---- +interface IndicesMappingLimitSettingsTotalFields { + pass:[/**] @property limit The maximum number of fields in an index. <<Field>> and object mappings, as well as field aliases count towards this limit. The limit is in place to prevent mappings and searches from becoming too large. Higher values can lead to performance degradations and memory issues, especially in clusters with a high load or few resources. */ + limit?: <<long>> | string + pass:[/**] @property ignore_dynamic_beyond_limit This setting determines what happens when a dynamically mapped field would exceed the total fields limit. When set to false (the default), the index request of the document that tries to add a dynamic field to the mapping will fail with the message Limit of total fields [X] has been exceeded. When set to true, the index request will not fail. Instead, fields that would exceed the limit are not added to the mapping, similar to dynamic: false. The fields that were not added to the mapping will be added to the _ignored field. */ + ignore_dynamic_beyond_limit?: boolean | string +} +---- + + +[discrete] +[[IndicesMerge]] +=== IndicesMerge + +[source,ts,subs=+macros] +---- +interface IndicesMerge { + scheduler?: <<IndicesMergeScheduler>> +} +---- + + +[discrete] +[[IndicesMergeScheduler]] +=== IndicesMergeScheduler + +[source,ts,subs=+macros] +---- +interface IndicesMergeScheduler { + max_thread_count?: <<SpecUtilsStringified>><<<integer>>> + max_merge_count?: <<SpecUtilsStringified>><<<integer>>> +} +---- + + +[discrete] +[[IndicesNumericFielddata]] +=== IndicesNumericFielddata + +[source,ts,subs=+macros] +---- +interface IndicesNumericFielddata { + format: <<IndicesNumericFielddataFormat>> +} +---- + + +[discrete] +[[IndicesNumericFielddataFormat]] +=== IndicesNumericFielddataFormat + +[source,ts,subs=+macros] +---- +type IndicesNumericFielddataFormat = 'array' | 'disabled' +---- + + +[discrete] +[[IndicesQueries]] +=== IndicesQueries + +[source,ts,subs=+macros] +---- +interface IndicesQueries { + cache?: <<IndicesCacheQueries>> +} +---- + + +[discrete] +[[IndicesRetentionLease]] +=== IndicesRetentionLease + +[source,ts,subs=+macros] +---- +interface IndicesRetentionLease { + period: <<Duration>> +} +---- + + +[discrete] +[[IndicesSearchIdle]] +=== IndicesSearchIdle + +[source,ts,subs=+macros] +---- +interface IndicesSearchIdle { + after?: <<Duration>> +} +---- + + +[discrete] +[[IndicesSegmentSortMissing]] +=== IndicesSegmentSortMissing + +[source,ts,subs=+macros] +---- +type IndicesSegmentSortMissing = '_last' | '_first' +---- + + +[discrete] +[[IndicesSegmentSortMode]] +=== IndicesSegmentSortMode + +[source,ts,subs=+macros] +---- +type IndicesSegmentSortMode = 'min' | 'MIN' | 'max' | 'MAX' +---- + + +[discrete] +[[IndicesSegmentSortOrder]] +=== IndicesSegmentSortOrder + +[source,ts,subs=+macros] +---- +type IndicesSegmentSortOrder = 'asc' | 'ASC' | 'desc' | 'DESC' +---- + + +[discrete] +[[IndicesSettingsAnalyze]] +=== IndicesSettingsAnalyze + +[source,ts,subs=+macros] +---- +interface IndicesSettingsAnalyze { + max_token_count?: <<SpecUtilsStringified>><<<integer>>> +} +---- + + +[discrete] +[[IndicesSettingsHighlight]] +=== IndicesSettingsHighlight + +[source,ts,subs=+macros] +---- +interface IndicesSettingsHighlight { + max_analyzed_offset?: <<integer>> +} +---- + + +[discrete] +[[IndicesSettingsQueryString]] +=== IndicesSettingsQueryString + +[source,ts,subs=+macros] +---- +interface IndicesSettingsQueryString { + lenient: <<SpecUtilsStringified>><boolean> +} +---- + + +[discrete] +[[IndicesSettingsSearch]] +=== IndicesSettingsSearch + +[source,ts,subs=+macros] +---- +interface IndicesSettingsSearch { + idle?: <<IndicesSearchIdle>> + slowlog?: <<IndicesSlowlogSettings>> +} +---- + + +[discrete] +[[IndicesSettingsSimilarity]] +=== IndicesSettingsSimilarity + +[source,ts,subs=+macros] +---- +type IndicesSettingsSimilarity = IndicesSettingsSimilarityBm25 | <<IndicesSettingsSimilarityBoolean>> | <<IndicesSettingsSimilarityDfi>> | <<IndicesSettingsSimilarityDfr>> | <<IndicesSettingsSimilarityIb>> | <<IndicesSettingsSimilarityLmd>> | <<IndicesSettingsSimilarityLmj>> | <<IndicesSettingsSimilarityScripted>> +---- + + +[discrete] +[[IndicesSettingsSimilarityBm25]] +=== IndicesSettingsSimilarityBm25 + +[source,ts,subs=+macros] +---- +interface IndicesSettingsSimilarityBm25 { + type: 'BM25' + b?: <<double>> + discount_overlaps?: boolean + k1?: <<double>> +} +---- + + +[discrete] +[[IndicesSettingsSimilarityBoolean]] +=== IndicesSettingsSimilarityBoolean + +[source,ts,subs=+macros] +---- +interface IndicesSettingsSimilarityBoolean { + type: 'boolean' +} +---- + + +[discrete] +[[IndicesSettingsSimilarityDfi]] +=== IndicesSettingsSimilarityDfi + +[source,ts,subs=+macros] +---- +interface IndicesSettingsSimilarityDfi { + type: 'DFI' + independence_measure: <<DFIIndependenceMeasure>> +} +---- + + +[discrete] +[[IndicesSettingsSimilarityDfr]] +=== IndicesSettingsSimilarityDfr + +[source,ts,subs=+macros] +---- +interface IndicesSettingsSimilarityDfr { + type: 'DFR' + after_effect: <<DFRAfterEffect>> + basic_model: <<DFRBasicModel>> + normalization: <<Normalization>> +} +---- + + +[discrete] +[[IndicesSettingsSimilarityIb]] +=== IndicesSettingsSimilarityIb + +[source,ts,subs=+macros] +---- +interface IndicesSettingsSimilarityIb { + type: 'IB' + distribution: <<IBDistribution>> + lambda: <<IBLambda>> + normalization: <<Normalization>> +} +---- + + +[discrete] +[[IndicesSettingsSimilarityLmd]] +=== IndicesSettingsSimilarityLmd + +[source,ts,subs=+macros] +---- +interface IndicesSettingsSimilarityLmd { + type: 'LMDirichlet' + mu?: <<double>> +} +---- + + +[discrete] +[[IndicesSettingsSimilarityLmj]] +=== IndicesSettingsSimilarityLmj + +[source,ts,subs=+macros] +---- +interface IndicesSettingsSimilarityLmj { + type: 'LMJelinekMercer' + lambda?: <<double>> +} +---- + + +[discrete] +[[IndicesSettingsSimilarityScripted]] +=== IndicesSettingsSimilarityScripted + +[source,ts,subs=+macros] +---- +interface IndicesSettingsSimilarityScripted { + type: 'scripted' + script: <<Script>> | string + weight_script?: <<Script>> | string +} +---- + + +[discrete] +[[IndicesSlowlogSettings]] +=== IndicesSlowlogSettings + +[source,ts,subs=+macros] +---- +interface IndicesSlowlogSettings { + level?: string + source?: <<integer>> + reformat?: boolean + threshold?: <<IndicesSlowlogTresholds>> +} +---- + + +[discrete] +[[IndicesSlowlogTresholdLevels]] +=== IndicesSlowlogTresholdLevels + +[source,ts,subs=+macros] +---- +interface IndicesSlowlogTresholdLevels { + warn?: <<Duration>> + info?: <<Duration>> + debug?: <<Duration>> + trace?: <<Duration>> +} +---- + + +[discrete] +[[IndicesSlowlogTresholds]] +=== IndicesSlowlogTresholds + +[source,ts,subs=+macros] +---- +interface IndicesSlowlogTresholds { + query?: <<IndicesSlowlogTresholdLevels>> + fetch?: <<IndicesSlowlogTresholdLevels>> +} +---- + + +[discrete] +[[IndicesSoftDeletes]] +=== IndicesSoftDeletes + +[source,ts,subs=+macros] +---- +interface IndicesSoftDeletes { + pass:[/**] @property enabled Indicates whether soft deletes are enabled on the index. */ + enabled?: boolean + pass:[/**] @property retention_lease The maximum period to retain a shard history retention lease before it is considered expired. Shard history retention leases ensure that soft deletes are retained during merges on the Lucene index. If a soft delete is merged away before it can be replicated to a follower the following process will fail due to incomplete history on the leader. */ + retention_lease?: <<IndicesRetentionLease>> +} +---- + + +[discrete] +[[IndicesStorage]] +=== IndicesStorage + +[source,ts,subs=+macros] +---- +interface IndicesStorage { + type: <<IndicesStorageType>> + pass:[/**] @property allow_mmap You can restrict the use of the mmapfs and the related hybridfs store type via the setting node.store.allow_mmap. This is a boolean setting indicating whether or not memory-mapping is allowed. The default is to allow it. This setting is useful, for example, if you are in an environment where you can not control the ability to create a lot of memory maps so you need disable the ability to use memory-mapping. */ + allow_mmap?: boolean +} +---- + + +[discrete] +[[IndicesStorageType]] +=== IndicesStorageType + +[source,ts,subs=+macros] +---- +type IndicesStorageType = 'fs' | 'niofs' | 'mmapfs' | 'hybridfs' | string +---- + + +[discrete] +[[IndicesTemplateMapping]] +=== IndicesTemplateMapping + +[source,ts,subs=+macros] +---- +interface IndicesTemplateMapping { + aliases: Record<<<IndexName>>, <<IndicesAlias>>> + index_patterns: <<Name>>[] + mappings: <<MappingTypeMapping>> + order: <<integer>> + settings: Record<string, any> + version?: <<VersionNumber>> +} +---- + + +[discrete] +[[IndicesTranslog]] +=== IndicesTranslog + +[source,ts,subs=+macros] +---- +interface IndicesTranslog { + pass:[/**] @property sync_interval How often the translog is fsynced to disk and committed, regardless of write operations. Values less than 100ms are not allowed. */ + sync_interval?: <<Duration>> + pass:[/**] @property durability Whether or not to `fsync` and commit the translog after every index, delete, update, or bulk request. */ + durability?: <<IndicesTranslogDurability>> + pass:[/**] @property flush_threshold_size The translog stores all operations that are not yet safely persisted in Lucene (i.e., are not part of a Lucene commit point). Although these operations are available for reads, they will need to be replayed if the shard was stopped and had to be recovered. This setting controls the maximum total size of these operations, to prevent recoveries from taking too <<long>>. Once the maximum size has been reached a flush will happen, generating a new Lucene commit point. */ + flush_threshold_size?: <<ByteSize>> + retention?: <<IndicesTranslogRetention>> +} +---- + + +[discrete] +[[IndicesTranslogDurability]] +=== IndicesTranslogDurability + +[source,ts,subs=+macros] +---- +type IndicesTranslogDurability = 'request' | 'REQUEST' | 'async' | 'ASYNC' +---- + + +[discrete] +[[IndicesTranslogRetention]] +=== IndicesTranslogRetention + +[source,ts,subs=+macros] +---- +interface IndicesTranslogRetention { + pass:[/**] @property size This controls the total size of translog files to keep for each shard. Keeping more translog files increases the chance of performing an operation based sync when recovering a replica. If the translog files are not sufficient, replica recovery will fall back to a file based sync. This setting is ignored, and should not be set, if soft deletes are enabled. Soft deletes are enabled by default in indices created in Elasticsearch versions 7.0.0 and later. */ + size?: <<ByteSize>> + pass:[/**] @property age This controls the maximum duration for which translog files are kept by each shard. Keeping more translog files increases the chance of performing an operation based sync when recovering replicas. If the translog files are not sufficient, replica recovery will fall back to a file based sync. This setting is ignored, and should not be set, if soft deletes are enabled. Soft deletes are enabled by default in indices created in Elasticsearch versions 7.0.0 and later. */ + age?: <<Duration>> +} +---- + + diff --git a/docs/reference/shared-types/inference-types.asciidoc b/docs/reference/shared-types/inference-types.asciidoc new file mode 100644 index 000000000..d471e5ec7 --- /dev/null +++ b/docs/reference/shared-types/inference-types.asciidoc @@ -0,0 +1,219 @@ +[[reference-shared-types-inference-types]] + +=== `Inference` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[InferenceCompletionResult]] +=== InferenceCompletionResult + +[source,ts,subs=+macros] +---- +interface InferenceCompletionResult { + result: string +} +---- + + +[discrete] +[[InferenceDeleteInferenceEndpointResult]] +=== InferenceDeleteInferenceEndpointResult + +[source,ts,subs=+macros] +---- +interface InferenceDeleteInferenceEndpointResult extends <<AcknowledgedResponseBase>> { + pipelines: string[] +} +---- + + +[discrete] +[[InferenceDenseByteVector]] +=== InferenceDenseByteVector + +[source,ts,subs=+macros] +---- +type InferenceDenseByteVector = <<byte>>[] +---- + + +[discrete] +[[InferenceDenseVector]] +=== InferenceDenseVector + +[source,ts,subs=+macros] +---- +type InferenceDenseVector = <<float>>[] +---- + + +[discrete] +[[InferenceInferenceEndpoint]] +=== InferenceInferenceEndpoint + +[source,ts,subs=+macros] +---- +interface InferenceInferenceEndpoint { + pass:[/**] @property service The service type */ + service: string + pass:[/**] @property service_settings Settings specific to the service */ + service_settings: <<InferenceServiceSettings>> + pass:[/**] @property task_settings Task settings specific to the service and task type */ + task_settings?: <<InferenceTaskSettings>> +} +---- + + +[discrete] +[[InferenceInferenceEndpointInfo]] +=== InferenceInferenceEndpointInfo + +[source,ts,subs=+macros] +---- +interface InferenceInferenceEndpointInfo extends <<InferenceInferenceEndpoint>> { + pass:[/**] @property inference_id The inference <<Id>> */ + inference_id: string + pass:[/**] @property task_type The task type */ + task_type: <<InferenceTaskType>> +} +---- + + +[discrete] +[[InferenceInferenceResult]] +=== InferenceInferenceResult + +[source,ts,subs=+macros] +---- +interface InferenceInferenceResult { + text_embedding_bytes?: <<InferenceTextEmbeddingByteResult>>[] + text_embedding?: <<InferenceTextEmbeddingResult>>[] + sparse_embedding?: <<InferenceSparseEmbeddingResult>>[] + completion?: <<InferenceCompletionResult>>[] + rerank?: <<InferenceRankedDocument>>[] +} +---- + + +[discrete] +[[InferenceRankedDocument]] +=== InferenceRankedDocument + +[source,ts,subs=+macros] +---- +interface InferenceRankedDocument { + index: <<integer>> + score: <<float>> + text?: string +} +---- + + +[discrete] +[[InferenceServiceSettings]] +=== InferenceServiceSettings + +[source,ts,subs=+macros] +---- +type InferenceServiceSettings = any +---- + + +[discrete] +[[InferenceSparseEmbeddingResult]] +=== InferenceSparseEmbeddingResult + +[source,ts,subs=+macros] +---- +interface InferenceSparseEmbeddingResult { + embedding: <<InferenceSparseVector>> +} +---- + + +[discrete] +[[InferenceSparseVector]] +=== InferenceSparseVector + +[source,ts,subs=+macros] +---- +type InferenceSparseVector = Record<string, <<float>>> +---- + + +[discrete] +[[InferenceTaskSettings]] +=== InferenceTaskSettings + +[source,ts,subs=+macros] +---- +type InferenceTaskSettings = any +---- + + +[discrete] +[[InferenceTaskType]] +=== InferenceTaskType + +[source,ts,subs=+macros] +---- +type InferenceTaskType = 'sparse_embedding' | 'text_embedding' | 'rerank' | 'completion' +---- + + +[discrete] +[[InferenceTextEmbeddingByteResult]] +=== InferenceTextEmbeddingByteResult + +[source,ts,subs=+macros] +---- +interface InferenceTextEmbeddingByteResult { + embedding: <<InferenceDenseByteVector>> +} +---- + + +[discrete] +[[InferenceTextEmbeddingResult]] +=== InferenceTextEmbeddingResult + +[source,ts,subs=+macros] +---- +interface InferenceTextEmbeddingResult { + embedding: <<InferenceDenseVector>> +} +---- + + diff --git a/docs/reference/shared-types/ingest-types.asciidoc b/docs/reference/shared-types/ingest-types.asciidoc new file mode 100644 index 000000000..cf4841ff0 --- /dev/null +++ b/docs/reference/shared-types/ingest-types.asciidoc @@ -0,0 +1,1255 @@ +[[reference-shared-types-ingest-types]] + +=== `Ingest` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[IngestAppendProcessor]] +=== IngestAppendProcessor + +[source,ts,subs=+macros] +---- +interface IngestAppendProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property field The field to be appended to. Supports template snippets. */ + field: <<Field>> + pass:[/**] @property value The value to be appended. Supports template snippets. */ + value: any | any[] + pass:[/**] @property allow_duplicates If `false`, the processor does not append values already present in the field. */ + allow_duplicates?: boolean +} +---- + + +[discrete] +[[IngestAttachmentProcessor]] +=== IngestAttachmentProcessor + +[source,ts,subs=+macros] +---- +interface IngestAttachmentProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property field The field to get the base64 encoded field from. */ + field: <<Field>> + pass:[/**] @property ignore_missing If `true` and field does not exist, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + pass:[/**] @property indexed_chars The number of chars being used for extraction to prevent huge fields. Use `-1` for no limit. */ + indexed_chars?: <<long>> + pass:[/**] @property indexed_chars_field <<Field>> name from which you can overwrite the number of chars being used for extraction. */ + indexed_chars_field?: <<Field>> + pass:[/**] @property properties Array of properties to select to be stored. Can be `content`, `title`, `name`, `author`, `keywords`, `date`, `content_type`, `content_length`, `language`. */ + properties?: string[] + pass:[/**] @property target_field The field that will hold the attachment information. */ + target_field?: <<Field>> + pass:[/**] @property remove_binary If true, the binary field will be removed from the document */ + remove_binary?: boolean + pass:[/**] @property resource_name <<Field>> containing the name of the resource to decode. If specified, the processor passes this resource name to the underlying Tika library to enable Resource <<Name>> Based Detection. */ + resource_name?: string +} +---- + + +[discrete] +[[IngestBytesProcessor]] +=== IngestBytesProcessor + +[source,ts,subs=+macros] +---- +interface IngestBytesProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property field The field to convert. */ + field: <<Field>> + pass:[/**] @property ignore_missing If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + pass:[/**] @property target_field The field to assign the converted value to. By default, the field is updated in-place. */ + target_field?: <<Field>> +} +---- + + +[discrete] +[[IngestCircleProcessor]] +=== IngestCircleProcessor + +[source,ts,subs=+macros] +---- +interface IngestCircleProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property error_distance The difference between the resulting inscribed distance from center to side and the circle’s radius (measured in meters for `geo_shape`, unit-less for `shape`). */ + error_distance: <<double>> + pass:[/**] @property field The field to interpret as a circle. Either a string in WKT format or a map for GeoJSON. */ + field: <<Field>> + pass:[/**] @property ignore_missing If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + pass:[/**] @property shape_type Which field mapping type is to be used when processing the circle: `geo_shape` or `shape`. */ + shape_type: <<IngestShapeType>> + pass:[/**] @property target_field The field to assign the polygon shape to By default, the field is updated in-place. */ + target_field?: <<Field>> +} +---- + + +[discrete] +[[IngestCommunityIDProcessor]] +=== IngestCommunityIDProcessor + +[source,ts,subs=+macros] +---- +interface IngestCommunityIDProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property source_ip <<Field>> containing the source IP address. */ + source_ip?: <<Field>> + pass:[/**] @property source_port <<Field>> containing the source port. */ + source_port?: <<Field>> + pass:[/**] @property destination_ip <<Field>> containing the destination IP address. */ + destination_ip?: <<Field>> + pass:[/**] @property destination_port <<Field>> containing the destination port. */ + destination_port?: <<Field>> + pass:[/**] @property iana_number <<Field>> containing the IANA number. */ + iana_number?: <<Field>> + pass:[/**] @property icmp_type <<Field>> containing the ICMP type. */ + icmp_type?: <<Field>> + pass:[/**] @property icmp_code <<Field>> containing the ICMP code. */ + icmp_code?: <<Field>> + pass:[/**] @property transport <<Field>> containing the transport protocol name or number. Used only when the iana_number field is not present. The following protocol names are currently supported: eigrp, gre, icmp, icmpv6, igmp, ipv6-icmp, ospf, pim, sctp, tcp, udp */ + transport?: <<Field>> + pass:[/**] @property target_field Output field for the community ID. */ + target_field?: <<Field>> + pass:[/**] @property seed Seed for the community ID hash. Must be between 0 and 65535 (inclusive). The seed can prevent hash collisions between network domains, such as a staging and production network that use the same addressing scheme. */ + seed?: <<integer>> + pass:[/**] @property ignore_missing If true and any required fields are missing, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean +} +---- + + +[discrete] +[[IngestConvertProcessor]] +=== IngestConvertProcessor + +[source,ts,subs=+macros] +---- +interface IngestConvertProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property field The field whose value is to be converted. */ + field: <<Field>> + pass:[/**] @property ignore_missing If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + pass:[/**] @property target_field The field to assign the converted value to. By default, the `field` is updated in-place. */ + target_field?: <<Field>> + pass:[/**] @property type The type to convert the existing value to. */ + type: <<IngestConvertType>> +} +---- + + +[discrete] +[[IngestConvertType]] +=== IngestConvertType + +[source,ts,subs=+macros] +---- +type IngestConvertType = '<<integer>>' | '<<long>>' | '<<double>>' | '<<float>>' | 'boolean' | 'ip' | 'string' | 'auto' +---- + + +[discrete] +[[IngestCsvProcessor]] +=== IngestCsvProcessor + +[source,ts,subs=+macros] +---- +interface IngestCsvProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property empty_value Value used to fill empty fields. Empty fields are skipped if this is not provided. An empty field is one with no value (2 consecutive separators) or empty quotes (`""`). */ + empty_value?: any + pass:[/**] @property field The field to extract data from. */ + field: <<Field>> + pass:[/**] @property ignore_missing If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + pass:[/**] @property quote Quote used in CSV, has to be single character string. */ + quote?: string + pass:[/**] @property separator Separator used in CSV, has to be single character string. */ + separator?: string + pass:[/**] @property target_fields The array of fields to assign extracted values to. */ + target_fields: <<Fields>> + pass:[/**] @property trim Trim whitespaces in unquoted fields. */ + trim?: boolean +} +---- + + +[discrete] +[[IngestDatabaseConfiguration]] +=== IngestDatabaseConfiguration + +[source,ts,subs=+macros] +---- +interface IngestDatabaseConfiguration { + pass:[/**] @property name The provider-assigned name of the IP geolocation database to download. */ + name: <<Name>> + pass:[/**] @property maxmind The configuration necessary to identify which IP geolocation provider to use to download the database, as well as any provider-specific configuration necessary for such downloading. At present, the only supported provider is maxmind, and the maxmind provider requires that an account_id (string) is configured. */ + maxmind: <<IngestMaxmind>> +} +---- + + +[discrete] +[[IngestDateIndexNameProcessor]] +=== IngestDateIndexNameProcessor + +[source,ts,subs=+macros] +---- +interface IngestDateIndexNameProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property date_formats An array of the expected date formats for parsing dates / timestamps in the document being preprocessed. Can be a java time pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. */ + date_formats: string[] + pass:[/**] @property date_rounding How to round the date when formatting the date into the index name. Valid values are: `y` (year), `M` (month), `w` (week), `d` (day), `h` (hour), `m` (minute) and `s` (second). Supports template snippets. */ + date_rounding: string + pass:[/**] @property field The field to get the date or timestamp from. */ + field: <<Field>> + pass:[/**] @property index_name_format The format to be used when printing the parsed date into the index name. A valid java time pattern is expected here. Supports template snippets. */ + index_name_format?: string + pass:[/**] @property index_name_prefix A prefix of the index name to be prepended before the printed date. Supports template snippets. */ + index_name_prefix?: string + pass:[/**] @property locale The locale to use when parsing the date from the document being preprocessed, relevant when parsing month names or week days. */ + locale?: string + pass:[/**] @property timezone The timezone to use when parsing the date and when date math index supports resolves expressions into concrete index names. */ + timezone?: string +} +---- + + +[discrete] +[[IngestDateProcessor]] +=== IngestDateProcessor + +[source,ts,subs=+macros] +---- +interface IngestDateProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property field The field to get the date from. */ + field: <<Field>> + pass:[/**] @property formats An array of the expected date formats. Can be a java time pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. */ + formats: string[] + pass:[/**] @property locale The locale to use when parsing the date, relevant when parsing month names or week days. Supports template snippets. */ + locale?: string + pass:[/**] @property target_field The field that will hold the parsed date. */ + target_field?: <<Field>> + pass:[/**] @property timezone The timezone to use when parsing the date. Supports template snippets. */ + timezone?: string + pass:[/**] @property output_format The format to use when writing the date to target_field. Must be a valid java time pattern. */ + output_format?: string +} +---- + + +[discrete] +[[IngestDissectProcessor]] +=== IngestDissectProcessor + +[source,ts,subs=+macros] +---- +interface IngestDissectProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property append_separator The character(s) that separate the appended fields. */ + append_separator?: string + pass:[/**] @property field The field to dissect. */ + field: <<Field>> + pass:[/**] @property ignore_missing If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + pass:[/**] @property pattern The pattern to apply to the field. */ + pattern: string +} +---- + + +[discrete] +[[IngestDotExpanderProcessor]] +=== IngestDotExpanderProcessor + +[source,ts,subs=+macros] +---- +interface IngestDotExpanderProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property field The field to expand into an object field. If set to `*`, all top-level fields will be expanded. */ + field: <<Field>> + pass:[/**] @property override Controls the behavior when there is already an existing nested object that conflicts with the expanded field. When `false`, the processor will merge conflicts by combining the old and the new values into an array. When `true`, the value from the expanded field will overwrite the existing value. */ + override?: boolean + pass:[/**] @property path The field that contains the field to expand. Only required if the field to expand is part another object field, because the `field` option can only understand leaf fields. */ + path?: string +} +---- + + +[discrete] +[[IngestDropProcessor]] +=== IngestDropProcessor + +[source,ts,subs=+macros] +---- +interface IngestDropProcessor extends <<IngestProcessorBase>> {} +---- + + +[discrete] +[[IngestEnrichProcessor]] +=== IngestEnrichProcessor + +[source,ts,subs=+macros] +---- +interface IngestEnrichProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property field The field in the input document that matches the policies match_field used to retrieve the enrichment data. Supports template snippets. */ + field: <<Field>> + pass:[/**] @property ignore_missing If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + pass:[/**] @property max_matches The maximum number of matched documents to include under the configured target field. The `target_field` will be turned into a json array if `max_matches` is higher than 1, otherwise `target_field` will become a json object. In order to avoid documents getting too large, the maximum allowed value is 128. */ + max_matches?: <<integer>> + pass:[/**] @property override If processor will update fields with pre-existing non-null-valued field. When set to `false`, such fields will not be touched. */ + override?: boolean + pass:[/**] @property policy_name The name of the enrich policy to use. */ + policy_name: string + pass:[/**] @property shape_relation A spatial relation operator used to match the geoshape of incoming documents to documents in the enrich index. This option is only used for `geo_match` enrich policy types. */ + shape_relation?: <<GeoShapeRelation>> + pass:[/**] @property target_field <<Field>> added to incoming documents to contain enrich data. This field contains both the `match_field` and `enrich_fields` specified in the enrich policy. Supports template snippets. */ + target_field: <<Field>> +} +---- + + +[discrete] +[[IngestFailProcessor]] +=== IngestFailProcessor + +[source,ts,subs=+macros] +---- +interface IngestFailProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property message The error message thrown by the processor. Supports template snippets. */ + message: string +} +---- + + +[discrete] +[[IngestFingerprintDigest]] +=== IngestFingerprintDigest + +[source,ts,subs=+macros] +---- +type IngestFingerprintDigest = 'MD5' | 'SHA-1' | 'SHA-256' | 'SHA-512' | 'MurmurHash3' +---- + + +[discrete] +[[IngestFingerprintProcessor]] +=== IngestFingerprintProcessor + +[source,ts,subs=+macros] +---- +interface IngestFingerprintProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property fields Array of fields to include in the fingerprint. For objects, the processor hashes both the field key and value. For other fields, the processor hashes only the field value. */ + fields: <<Fields>> + pass:[/**] @property target_field Output field for the fingerprint. */ + target_field?: <<Field>> + pass:[/**] @property salt Salt value for the hash function. */ + salt?: string + pass:[/**] @property method The hash method used to compute the fingerprint. Must be one of MD5, SHA-1, SHA-256, SHA-512, or MurmurHash3. */ + method?: <<IngestFingerprintDigest>> + pass:[/**] @property ignore_missing If true, the processor ignores any missing fields. If all fields are missing, the processor silently exits without modifying the document. */ + ignore_missing?: boolean +} +---- + + +[discrete] +[[IngestForeachProcessor]] +=== IngestForeachProcessor + +[source,ts,subs=+macros] +---- +interface IngestForeachProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property field <<Field>> containing array or object values. */ + field: <<Field>> + pass:[/**] @property ignore_missing If `true`, the processor silently exits without changing the document if the `field` is `null` or missing. */ + ignore_missing?: boolean + pass:[/**] @property processor Ingest processor to run on each element. */ + processor: <<IngestProcessorContainer>> +} +---- + + +[discrete] +[[IngestGeoGridProcessor]] +=== IngestGeoGridProcessor + +[source,ts,subs=+macros] +---- +interface IngestGeoGridProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property field The field to interpret as a geo-tile.= The field format is determined by the `tile_type`. */ + field: string + pass:[/**] @property tile_type Three tile formats are understood: geohash, geotile and geohex. */ + tile_type: <<IngestGeoGridTileType>> + pass:[/**] @property target_field The field to assign the polygon shape to, by default, the `field` is updated in-place. */ + target_field?: <<Field>> + pass:[/**] @property parent_field If specified and a parent tile exists, save that tile address to this field. */ + parent_field?: <<Field>> + pass:[/**] @property children_field If specified and children tiles exist, save those tile addresses to this field as an array of strings. */ + children_field?: <<Field>> + pass:[/**] @property non_children_field If specified and intersecting non-child tiles exist, save their addresses to this field as an array of strings. */ + non_children_field?: <<Field>> + pass:[/**] @property precision_field If specified, save the tile precision (zoom) as an <<integer>> to this field. */ + precision_field?: <<Field>> + pass:[/**] @property ignore_missing If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + pass:[/**] @property target_format Which format to save the generated polygon in. */ + target_format?: <<IngestGeoGridTargetFormat>> +} +---- + + +[discrete] +[[IngestGeoGridTargetFormat]] +=== IngestGeoGridTargetFormat + +[source,ts,subs=+macros] +---- +type IngestGeoGridTargetFormat = 'geojson' | 'wkt' +---- + + +[discrete] +[[IngestGeoGridTileType]] +=== IngestGeoGridTileType + +[source,ts,subs=+macros] +---- +type IngestGeoGridTileType = 'geotile' | 'geohex' | 'geohash' +---- + + +[discrete] +[[IngestGeoIpProcessor]] +=== IngestGeoIpProcessor + +[source,ts,subs=+macros] +---- +interface IngestGeoIpProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property database_file The database filename referring to a database the module ships with (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom database in the ingest-geoip config directory. */ + database_file?: string + pass:[/**] @property field The field to get the ip address from for the geographical lookup. */ + field: <<Field>> + pass:[/**] @property first_only If `true`, only the first found geoip data will be returned, even if the field contains an array. */ + first_only?: boolean + pass:[/**] @property ignore_missing If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + pass:[/**] @property properties Controls what properties are added to the `target_field` based on the geoip lookup. */ + properties?: string[] + pass:[/**] @property target_field The field that will hold the geographical information looked up from the MaxMind database. */ + target_field?: <<Field>> + pass:[/**] @property download_database_on_pipeline_creation If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the missing database is downloaded when the pipeline is created. Else, the download is triggered by when the pipeline is used as the `default_pipeline` or `final_pipeline` in an index. */ + download_database_on_pipeline_creation?: boolean +} +---- + + +[discrete] +[[IngestGrokProcessor]] +=== IngestGrokProcessor + +[source,ts,subs=+macros] +---- +interface IngestGrokProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property ecs_compatibility Must be disabled or v1. If v1, the processor uses patterns with Elastic Common Schema (ECS) field names. */ + ecs_compatibility?: string + pass:[/**] @property field The field to use for grok expression parsing. */ + field: <<Field>> + pass:[/**] @property ignore_missing If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + pass:[/**] @property pattern_definitions A map of pattern-name and pattern tuples defining custom patterns to be used by the current processor. Patterns matching existing names will override the pre-existing definition. */ + pattern_definitions?: Record<string, string> + pass:[/**] @property patterns An ordered list of grok expression to match and extract named captures with. Returns on the first expression in the list that matches. */ + patterns: <<GrokPattern>>[] + pass:[/**] @property trace_match When `true`, `_ingest._grok_match_index` will be inserted into your matched document’s metadata with the index into the pattern found in `patterns` that matched. */ + trace_match?: boolean +} +---- + + +[discrete] +[[IngestGsubProcessor]] +=== IngestGsubProcessor + +[source,ts,subs=+macros] +---- +interface IngestGsubProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property field The field to apply the replacement to. */ + field: <<Field>> + pass:[/**] @property ignore_missing If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + pass:[/**] @property pattern The pattern to be replaced. */ + pattern: string + pass:[/**] @property replacement The string to replace the matching patterns with. */ + replacement: string + pass:[/**] @property target_field The field to assign the converted value to By default, the `field` is updated in-place. */ + target_field?: <<Field>> +} +---- + + +[discrete] +[[IngestHtmlStripProcessor]] +=== IngestHtmlStripProcessor + +[source,ts,subs=+macros] +---- +interface IngestHtmlStripProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property field The string-valued field to remove HTML tags from. */ + field: <<Field>> + pass:[/**] @property ignore_missing If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document, */ + ignore_missing?: boolean + pass:[/**] @property target_field The field to assign the converted value to By default, the `field` is updated in-place. */ + target_field?: <<Field>> +} +---- + + +[discrete] +[[IngestInferenceConfig]] +=== IngestInferenceConfig + +[source,ts,subs=+macros] +---- +interface IngestInferenceConfig { + pass:[/**] @property regression Regression configuration for inference. */ + regression?: <<IngestInferenceConfigRegression>> + pass:[/**] @property classification Classification configuration for inference. */ + classification?: <<IngestInferenceConfigClassification>> +} +---- + + +[discrete] +[[IngestInferenceConfigClassification]] +=== IngestInferenceConfigClassification + +[source,ts,subs=+macros] +---- +interface IngestInferenceConfigClassification { + pass:[/**] @property num_top_classes Specifies the number of top class predictions to return. */ + num_top_classes?: <<integer>> + pass:[/**] @property num_top_feature_importance_values Specifies the maximum number of feature importance values per document. */ + num_top_feature_importance_values?: <<integer>> + pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. */ + results_field?: <<Field>> + pass:[/**] @property top_classes_results_field Specifies the field to which the top classes are written. */ + top_classes_results_field?: <<Field>> + pass:[/**] @property prediction_field_type Specifies the type of the predicted field to write. Valid values are: `string`, `number`, `boolean`. */ + prediction_field_type?: string +} +---- + + +[discrete] +[[IngestInferenceConfigRegression]] +=== IngestInferenceConfigRegression + +[source,ts,subs=+macros] +---- +interface IngestInferenceConfigRegression { + pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. */ + results_field?: <<Field>> + pass:[/**] @property num_top_feature_importance_values Specifies the maximum number of feature importance values per document. */ + num_top_feature_importance_values?: <<integer>> +} +---- + + +[discrete] +[[IngestInferenceProcessor]] +=== IngestInferenceProcessor + +[source,ts,subs=+macros] +---- +interface IngestInferenceProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property model_id The ID or alias for the trained model, or the ID of the deployment. */ + model_id: <<Id>> + pass:[/**] @property target_field <<Field>> added to incoming documents to contain results objects. */ + target_field?: <<Field>> + pass:[/**] @property field_map Maps the document field names to the known field names of the model. This mapping takes precedence over any default mappings provided in the model configuration. */ + field_map?: Record<<<Field>>, any> + pass:[/**] @property inference_config Contains the inference type and its options. */ + inference_config?: <<IngestInferenceConfig>> +} +---- + + +[discrete] +[[IngestIpLocationProcessor]] +=== IngestIpLocationProcessor + +[source,ts,subs=+macros] +---- +interface IngestIpLocationProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property database_file The database filename referring to a database the module ships with (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom database in the ingest-geoip config directory. */ + database_file?: string + pass:[/**] @property field The field to get the ip address from for the geographical lookup. */ + field: <<Field>> + pass:[/**] @property first_only If `true`, only the first found IP location data will be returned, even if the field contains an array. */ + first_only?: boolean + pass:[/**] @property ignore_missing If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + pass:[/**] @property properties Controls what properties are added to the `target_field` based on the IP location lookup. */ + properties?: string[] + pass:[/**] @property target_field The field that will hold the geographical information looked up from the MaxMind database. */ + target_field?: <<Field>> + pass:[/**] @property download_database_on_pipeline_creation If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the missing database is downloaded when the pipeline is created. Else, the download is triggered by when the pipeline is used as the `default_pipeline` or `final_pipeline` in an index. */ + download_database_on_pipeline_creation?: boolean +} +---- + + +[discrete] +[[IngestJoinProcessor]] +=== IngestJoinProcessor + +[source,ts,subs=+macros] +---- +interface IngestJoinProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property field <<Field>> containing array values to join. */ + field: <<Field>> + pass:[/**] @property separator The separator character. */ + separator: string + pass:[/**] @property target_field The field to assign the joined value to. By default, the field is updated in-place. */ + target_field?: <<Field>> +} +---- + + +[discrete] +[[IngestJsonProcessor]] +=== IngestJsonProcessor + +[source,ts,subs=+macros] +---- +interface IngestJsonProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property add_to_root Flag that forces the parsed JSON to be added at the top level of the document. `target_field` must not be set when this option is chosen. */ + add_to_root?: boolean + pass:[/**] @property add_to_root_conflict_strategy When set to `replace`, root fields that conflict with fields from the parsed JSON will be overridden. When set to `merge`, conflicting fields will be merged. Only applicable `if add_to_root` is set to true. */ + add_to_root_conflict_strategy?: <<IngestJsonProcessorConflictStrategy>> + pass:[/**] @property allow_duplicate_keys When set to `true`, the JSON parser will not fail if the JSON contains duplicate keys. Instead, the last encountered value for any duplicate key wins. */ + allow_duplicate_keys?: boolean + pass:[/**] @property field The field to be parsed. */ + field: <<Field>> + pass:[/**] @property target_field The field that the converted structured object will be written into. Any existing content in this field will be overwritten. */ + target_field?: <<Field>> +} +---- + + +[discrete] +[[IngestJsonProcessorConflictStrategy]] +=== IngestJsonProcessorConflictStrategy + +[source,ts,subs=+macros] +---- +type IngestJsonProcessorConflictStrategy = 'replace' | 'merge' +---- + + +[discrete] +[[IngestKeyValueProcessor]] +=== IngestKeyValueProcessor + +[source,ts,subs=+macros] +---- +interface IngestKeyValueProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property exclude_keys List of keys to exclude from document. */ + exclude_keys?: string[] + pass:[/**] @property field The field to be parsed. Supports template snippets. */ + field: <<Field>> + pass:[/**] @property field_split Regex pattern to use for splitting key-value pairs. */ + field_split: string + pass:[/**] @property ignore_missing If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + pass:[/**] @property include_keys List of keys to filter and insert into document. Defaults to including all keys. */ + include_keys?: string[] + pass:[/**] @property prefix Prefix to be added to extracted keys. */ + prefix?: string + pass:[/**] @property strip_brackets If `true`. strip brackets `()`, `<>`, `[]` as well as quotes `'` and `"` from extracted values. */ + strip_brackets?: boolean + pass:[/**] @property target_field The field to insert the extracted keys into. Defaults to the root of the document. Supports template snippets. */ + target_field?: <<Field>> + pass:[/**] @property trim_key String of characters to trim from extracted keys. */ + trim_key?: string + pass:[/**] @property trim_value String of characters to trim from extracted values. */ + trim_value?: string + pass:[/**] @property value_split Regex pattern to use for splitting the key from the value within a key-value pair. */ + value_split: string +} +---- + + +[discrete] +[[IngestLowercaseProcessor]] +=== IngestLowercaseProcessor + +[source,ts,subs=+macros] +---- +interface IngestLowercaseProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property field The field to make lowercase. */ + field: <<Field>> + pass:[/**] @property ignore_missing If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + pass:[/**] @property target_field The field to assign the converted value to. By default, the field is updated in-place. */ + target_field?: <<Field>> +} +---- + + +[discrete] +[[IngestMaxmind]] +=== IngestMaxmind + +[source,ts,subs=+macros] +---- +interface IngestMaxmind { + account_id: <<Id>> +} +---- + + +[discrete] +[[IngestNetworkDirectionProcessor]] +=== IngestNetworkDirectionProcessor + +[source,ts,subs=+macros] +---- +interface IngestNetworkDirectionProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property source_ip <<Field>> containing the source IP address. */ + source_ip?: <<Field>> + pass:[/**] @property destination_ip <<Field>> containing the destination IP address. */ + destination_ip?: <<Field>> + pass:[/**] @property target_field Output field for the network direction. */ + target_field?: <<Field>> + pass:[/**] @property internal_networks List of internal networks. Supports IPv4 and IPv6 addresses and ranges in CIDR notation. Also supports the named ranges listed below. These may be constructed with template snippets. Must specify only one of internal_networks or internal_networks_field. */ + internal_networks?: string[] + pass:[/**] @property internal_networks_field A field on the given document to read the internal_networks configuration from. */ + internal_networks_field?: <<Field>> + pass:[/**] @property ignore_missing If true and any required fields are missing, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean +} +---- + + +[discrete] +[[IngestPipeline]] +=== IngestPipeline + +[source,ts,subs=+macros] +---- +interface IngestPipeline { + pass:[/**] @property description Description of the ingest pipeline. */ + description?: string + pass:[/**] @property on_failure Processors to run immediately after a processor failure. */ + on_failure?: <<IngestProcessorContainer>>[] + pass:[/**] @property processors Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. */ + processors?: <<IngestProcessorContainer>>[] + pass:[/**] @property version Version number used by external systems to track ingest pipelines. */ + version?: <<VersionNumber>> + pass:[/**] @property deprecated Marks this ingest pipeline as deprecated. When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. */ + deprecated?: boolean + pass:[/**] @property _meta Arbitrary metadata about the ingest pipeline. This map is not automatically generated by Elasticsearch. */ + _meta?: <<Metadata>> +} +---- + + +[discrete] +[[IngestPipelineConfig]] +=== IngestPipelineConfig + +[source,ts,subs=+macros] +---- +interface IngestPipelineConfig { + pass:[/**] @property description Description of the ingest pipeline. */ + description?: string + pass:[/**] @property version Version number used by external systems to track ingest pipelines. */ + version?: <<VersionNumber>> + pass:[/**] @property processors Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. */ + processors: <<IngestProcessorContainer>>[] +} +---- + + +[discrete] +[[IngestPipelineProcessor]] +=== IngestPipelineProcessor + +[source,ts,subs=+macros] +---- +interface IngestPipelineProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property name The name of the pipeline to execute. Supports template snippets. */ + name: <<Name>> + pass:[/**] @property ignore_missing_pipeline Whether to ignore missing pipelines instead of failing. */ + ignore_missing_pipeline?: boolean +} +---- + + +[discrete] +[[IngestProcessorBase]] +=== IngestProcessorBase + +[source,ts,subs=+macros] +---- +interface IngestProcessorBase { + pass:[/**] @property description Description of the processor. Useful for describing the purpose of the processor or its configuration. */ + description?: string + pass:[/**] @property if Conditionally execute the processor. */ + if?: string + pass:[/**] @property ignore_failure Ignore failures for the processor. */ + ignore_failure?: boolean + pass:[/**] @property on_failure Handle failures for the processor. */ + on_failure?: <<IngestProcessorContainer>>[] + pass:[/**] @property tag Identifier for the processor. Useful for debugging and metrics. */ + tag?: string +} +---- + + +[discrete] +[[IngestProcessorContainer]] +=== IngestProcessorContainer + +[source,ts,subs=+macros] +---- +interface IngestProcessorContainer { + pass:[/**] @property append Appends one or more values to an existing array if the field already exists and it is an array. Converts a scalar to an array and appends one or more values to it if the field exists and it is a scalar. Creates an array containing the provided values if the field doesn’t exist. Accepts a single value or an array of values. */ + append?: <<IngestAppendProcessor>> + pass:[/**] @property attachment The attachment processor lets Elasticsearch extract file attachments in common formats (such as PPT, XLS, and PDF) by using the Apache text extraction library Tika. */ + attachment?: <<IngestAttachmentProcessor>> + pass:[/**] @property bytes Converts a human readable <<byte>> value (for example `1kb`) to its value in bytes (for example `1024`). If the field is an array of strings, all members of the array will be converted. Supported human readable units are "b", "kb", "mb", "gb", "tb", "pb" case insensitive. An error will occur if the field is not a supported format or resultant value exceeds 2^63. */ + bytes?: <<IngestBytesProcessor>> + pass:[/**] @property circle Converts circle definitions of shapes to regular polygons which approximate them. */ + circle?: <<IngestCircleProcessor>> + pass:[/**] @property community_id Computes the Community ID for network flow data as defined in the Community ID Specification. You can use a community ID to correlate network events related to a single flow. */ + community_id?: <<IngestCommunityIDProcessor>> + pass:[/**] @property convert Converts a field in the currently ingested document to a different type, such as converting a string to an <<integer>>. If the field value is an array, all members will be converted. */ + convert?: <<IngestConvertProcessor>> + pass:[/**] @property csv Extracts fields from CSV line out of a single text field within a document. Any empty field in CSV will be skipped. */ + csv?: <<IngestCsvProcessor>> + pass:[/**] @property date Parses dates from fields, and then uses the date or timestamp as the timestamp for the document. */ + date?: <<IngestDateProcessor>> + pass:[/**] @property date_index_name The purpose of this processor is to point documents to the right time based index based on a date or timestamp field in a document by using the date math index name support. */ + date_index_name?: <<IngestDateIndexNameProcessor>> + pass:[/**] @property dissect Extracts structured fields out of a single text field by matching the text field against a delimiter-based pattern. */ + dissect?: <<IngestDissectProcessor>> + pass:[/**] @property dot_expander Expands a field with dots into an object field. This processor allows fields with dots in the name to be accessible by other processors in the pipeline. Otherwise these fields can’t be accessed by any processor. */ + dot_expander?: <<IngestDotExpanderProcessor>> + pass:[/**] @property drop Drops the document without raising any errors. This is useful to prevent the document from getting indexed based on some condition. */ + drop?: <<IngestDropProcessor>> + pass:[/**] @property enrich The `enrich` processor can enrich documents with data from another index. */ + enrich?: <<IngestEnrichProcessor>> + pass:[/**] @property fail Raises an exception. This is useful for when you expect a pipeline to fail and want to relay a specific message to the requester. */ + fail?: <<IngestFailProcessor>> + pass:[/**] @property fingerprint Computes a hash of the document’s content. You can use this hash for content fingerprinting. */ + fingerprint?: <<IngestFingerprintProcessor>> + pass:[/**] @property foreach Runs an ingest processor on each element of an array or object. */ + foreach?: <<IngestForeachProcessor>> + pass:[/**] @property ip_location Currently an undocumented alias for GeoIP Processor. */ + ip_location?: <<IngestIpLocationProcessor>> + pass:[/**] @property geo_grid Converts geo-grid definitions of grid tiles or cells to regular bounding boxes or polygons which describe their shape. This is useful if there is a need to interact with the tile shapes as spatially indexable fields. */ + geo_grid?: <<IngestGeoGridProcessor>> + pass:[/**] @property geoip The `geoip` processor adds information about the geographical location of an IPv4 or IPv6 address. */ + geoip?: <<IngestGeoIpProcessor>> + pass:[/**] @property grok Extracts structured fields out of a single text field within a document. You choose which field to extract matched fields from, as well as the grok pattern you expect will match. A grok pattern is like a regular expression that supports aliased expressions that can be reused. */ + grok?: <<IngestGrokProcessor>> + pass:[/**] @property gsub Converts a string field by applying a regular expression and a replacement. If the field is an array of string, all members of the array will be converted. If any non-string values are encountered, the processor will throw an exception. */ + gsub?: <<IngestGsubProcessor>> + pass:[/**] @property html_strip Removes HTML tags from the field. If the field is an array of strings, HTML tags will be removed from all members of the array. */ + html_strip?: <<IngestHtmlStripProcessor>> + pass:[/**] @property inference Uses a pre-trained data frame analytics model or a model deployed for natural language processing tasks to infer against the data that is being ingested in the pipeline. */ + inference?: <<IngestInferenceProcessor>> + pass:[/**] @property join Joins each element of an array into a single string using a separator character between each element. Throws an error when the field is not an array. */ + join?: <<IngestJoinProcessor>> + pass:[/**] @property json Converts a JSON string into a structured JSON object. */ + json?: <<IngestJsonProcessor>> + pass:[/**] @property kv This processor helps automatically parse messages (or specific event fields) which are of the `foo=bar` variety. */ + kv?: <<IngestKeyValueProcessor>> + pass:[/**] @property lowercase Converts a string to its lowercase equivalent. If the field is an array of strings, all members of the array will be converted. */ + lowercase?: <<IngestLowercaseProcessor>> + pass:[/**] @property network_direction Calculates the network direction given a source IP address, destination IP address, and a list of internal networks. */ + network_direction?: <<IngestNetworkDirectionProcessor>> + pass:[/**] @property pipeline Executes another pipeline. */ + pipeline?: <<IngestPipelineProcessor>> + pass:[/**] @property redact The Redact processor uses the Grok rules engine to obscure text in the input document matching the given Grok patterns. The processor can be used to obscure Personal Identifying Information (PII) by configuring it to detect known patterns such as email or IP addresses. Text that matches a Grok pattern is replaced with a configurable string such as `<EMAIL>` where an email address is matched or simply replace all matches with the text `<REDACTED>` if preferred. */ + redact?: <<IngestRedactProcessor>> + pass:[/**] @property registered_domain Extracts the registered domain (also known as the effective top-level domain or eTLD), sub-domain, and top-level domain from a fully qualified domain name (FQDN). Uses the registered domains defined in the Mozilla Public Suffix List. */ + registered_domain?: <<IngestRegisteredDomainProcessor>> + pass:[/**] @property remove Removes existing fields. If one field doesn’t exist, an exception will be thrown. */ + remove?: <<IngestRemoveProcessor>> + pass:[/**] @property rename Renames an existing field. If the field doesn’t exist or the new name is already used, an exception will be thrown. */ + rename?: <<IngestRenameProcessor>> + pass:[/**] @property reroute Routes a document to another target index or data stream. When setting the `destination` option, the target is explicitly specified and the dataset and namespace options can’t be set. When the `destination` option is not set, this processor is in a data stream mode. Note that in this mode, the reroute processor can only be used on data streams that follow the data stream naming scheme. */ + reroute?: <<IngestRerouteProcessor>> + pass:[/**] @property script Runs an inline or stored script on incoming documents. The script runs in the `ingest` context. */ + script?: <<IngestScriptProcessor>> + pass:[/**] @property set Adds a field with the specified value. If the field already exists, its value will be replaced with the provided one. */ + set?: <<IngestSetProcessor>> + pass:[/**] @property set_security_user Sets user-related details (such as `username`, `roles`, `email`, `full_name`, `metadata`, `api_key`, `realm` and `authentication_type`) from the current authenticated user to the current document by pre-processing the ingest. */ + set_security_user?: <<IngestSetSecurityUserProcessor>> + pass:[/**] @property sort Sorts the elements of an array ascending or descending. Homogeneous arrays of numbers will be sorted numerically, while arrays of strings or heterogeneous arrays of strings + numbers will be sorted lexicographically. Throws an error when the field is not an array. */ + sort?: <<IngestSortProcessor>> + pass:[/**] @property split Splits a field into an array using a separator character. Only works on string fields. */ + split?: <<IngestSplitProcessor>> + pass:[/**] @property terminate Terminates the current ingest pipeline, causing no further processors to be run. This will normally be executed conditionally, using the `if` option. */ + terminate?: <<IngestTerminateProcessor>> + pass:[/**] @property trim Trims whitespace from a field. If the field is an array of strings, all members of the array will be trimmed. This only works on leading and trailing whitespace. */ + trim?: <<IngestTrimProcessor>> + pass:[/**] @property uppercase Converts a string to its uppercase equivalent. If the field is an array of strings, all members of the array will be converted. */ + uppercase?: <<IngestUppercaseProcessor>> + pass:[/**] @property urldecode URL-decodes a string. If the field is an array of strings, all members of the array will be decoded. */ + urldecode?: <<IngestUrlDecodeProcessor>> + pass:[/**] @property uri_parts Parses a Uniform Resource Identifier (URI) string and extracts its components as an object. This URI object includes properties for the URI’s domain, path, fragment, port, query, scheme, user info, username, and password. */ + uri_parts?: <<IngestUriPartsProcessor>> + pass:[/**] @property user_agent The `user_agent` processor extracts details from the user agent string a browser sends with its web requests. This processor adds this information by default under the `user_agent` field. */ + user_agent?: <<IngestUserAgentProcessor>> +} +---- + + +[discrete] +[[IngestRedactProcessor]] +=== IngestRedactProcessor + +[source,ts,subs=+macros] +---- +interface IngestRedactProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property field The field to be redacted */ + field: <<Field>> + pass:[/**] @property patterns A list of grok expressions to match and redact named captures with */ + patterns: <<GrokPattern>>[] + pattern_definitions?: Record<string, string> + pass:[/**] @property prefix Start a redacted section with this token */ + prefix?: string + pass:[/**] @property suffix End a redacted section with this token */ + suffix?: string + pass:[/**] @property ignore_missing If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + pass:[/**] @property skip_if_unlicensed If `true` and the current license does not support running redact processors, then the processor quietly exits without modifying the document */ + skip_if_unlicensed?: boolean + pass:[/**] @property trace_redact If `true` then ingest metadata `_ingest._redact._is_redacted` is set to `true` if the document has been redacted */ + trace_redact?: boolean +} +---- + + +[discrete] +[[IngestRegisteredDomainProcessor]] +=== IngestRegisteredDomainProcessor + +[source,ts,subs=+macros] +---- +interface IngestRegisteredDomainProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property field <<Field>> containing the source FQDN. */ + field: <<Field>> + pass:[/**] @property target_field Object field containing extracted domain components. If an empty string, the processor adds components to the document’s root. */ + target_field?: <<Field>> + pass:[/**] @property ignore_missing If true and any required fields are missing, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean +} +---- + + +[discrete] +[[IngestRemoveProcessor]] +=== IngestRemoveProcessor + +[source,ts,subs=+macros] +---- +interface IngestRemoveProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property field <<Fields>> to be removed. Supports template snippets. */ + field: <<Fields>> + pass:[/**] @property keep <<Fields>> to be kept. When set, all fields other than those specified are removed. */ + keep?: <<Fields>> + pass:[/**] @property ignore_missing If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean +} +---- + + +[discrete] +[[IngestRenameProcessor]] +=== IngestRenameProcessor + +[source,ts,subs=+macros] +---- +interface IngestRenameProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property field The field to be renamed. Supports template snippets. */ + field: <<Field>> + pass:[/**] @property ignore_missing If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + pass:[/**] @property target_field The new name of the field. Supports template snippets. */ + target_field: <<Field>> +} +---- + + +[discrete] +[[IngestRerouteProcessor]] +=== IngestRerouteProcessor + +[source,ts,subs=+macros] +---- +interface IngestRerouteProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property destination A static value for the target. Can’t be set when the dataset or namespace option is set. */ + destination?: string + pass:[/**] @property dataset <<Field>> references or a static value for the dataset part of the data stream name. In addition to the criteria for index names, cannot contain - and must be no longer than 100 characters. Example values are nginx.access and nginx.error. Supports field references with a mustache-like syntax (denoted as {{<<double>>}} or {{{triple}}} curly braces). When resolving field references, the processor replaces invalid characters with _. Uses the <dataset> part of the index name as a fallback if all field references resolve to a null, missing, or non-string value. default {{data_stream.dataset}} */ + dataset?: string | string[] + pass:[/**] @property namespace <<Field>> references or a static value for the namespace part of the data stream name. See the criteria for index names for allowed characters. Must be no longer than 100 characters. Supports field references with a mustache-like syntax (denoted as {{<<double>>}} or {{{triple}}} curly braces). When resolving field references, the processor replaces invalid characters with _. Uses the <namespace> part of the index name as a fallback if all field references resolve to a null, missing, or non-string value. default {{data_stream.namespace}} */ + namespace?: string | string[] +} +---- + + +[discrete] +[[IngestScriptProcessor]] +=== IngestScriptProcessor + +[source,ts,subs=+macros] +---- +interface IngestScriptProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property id ID of a stored script. If no `source` is specified, this parameter is required. */ + id?: <<Id>> + pass:[/**] @property lang <<Script>> language. */ + lang?: string + pass:[/**] @property params Object containing parameters for the script. */ + params?: Record<string, any> + pass:[/**] @property source Inline script. If no `id` is specified, this parameter is required. */ + source?: string +} +---- + + +[discrete] +[[IngestSetProcessor]] +=== IngestSetProcessor + +[source,ts,subs=+macros] +---- +interface IngestSetProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property copy_from The origin field which will be copied to `field`, cannot set `value` simultaneously. Supported data types are `boolean`, `number`, `array`, `object`, `string`, `date`, etc. */ + copy_from?: <<Field>> + pass:[/**] @property field The field to insert, upsert, or update. Supports template snippets. */ + field: <<Field>> + pass:[/**] @property ignore_empty_value If `true` and `value` is a template snippet that evaluates to `null` or the empty string, the processor quietly exits without modifying the document. */ + ignore_empty_value?: boolean + pass:[/**] @property media_type The media type for encoding `value`. Applies only when value is a template snippet. Must be one of `application/json`, `text/plain`, or `application/x-www-form-urlencoded`. */ + media_type?: string + pass:[/**] @property override If `true` processor will update fields with pre-existing non-null-valued field. When set to `false`, such fields will not be touched. */ + override?: boolean + pass:[/**] @property value The value to be set for the field. Supports template snippets. May specify only one of `value` or `copy_from`. */ + value?: any +} +---- + + +[discrete] +[[IngestSetSecurityUserProcessor]] +=== IngestSetSecurityUserProcessor + +[source,ts,subs=+macros] +---- +interface IngestSetSecurityUserProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property field The field to store the user information into. */ + field: <<Field>> + pass:[/**] @property properties Controls what user related properties are added to the field. */ + properties?: string[] +} +---- + + +[discrete] +[[IngestShapeType]] +=== IngestShapeType + +[source,ts,subs=+macros] +---- +type IngestShapeType = 'geo_shape' | 'shape' +---- + + +[discrete] +[[IngestSortProcessor]] +=== IngestSortProcessor + +[source,ts,subs=+macros] +---- +interface IngestSortProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property field The field to be sorted. */ + field: <<Field>> + pass:[/**] @property order The sort order to use. Accepts `"asc"` or `"desc"`. */ + order?: <<SortOrder>> + pass:[/**] @property target_field The field to assign the sorted value to. By default, the field is updated in-place. */ + target_field?: <<Field>> +} +---- + + +[discrete] +[[IngestSplitProcessor]] +=== IngestSplitProcessor + +[source,ts,subs=+macros] +---- +interface IngestSplitProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property field The field to split. */ + field: <<Field>> + pass:[/**] @property ignore_missing If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + pass:[/**] @property preserve_trailing Preserves empty trailing fields, if any. */ + preserve_trailing?: boolean + pass:[/**] @property separator A regex which matches the separator, for example, `,` or `\s+`. */ + separator: string + pass:[/**] @property target_field The field to assign the split value to. By default, the field is updated in-place. */ + target_field?: <<Field>> +} +---- + + +[discrete] +[[IngestTerminateProcessor]] +=== IngestTerminateProcessor + +[source,ts,subs=+macros] +---- +interface IngestTerminateProcessor extends <<IngestProcessorBase>> {} +---- + + +[discrete] +[[IngestTrimProcessor]] +=== IngestTrimProcessor + +[source,ts,subs=+macros] +---- +interface IngestTrimProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property field The string-valued field to trim whitespace from. */ + field: <<Field>> + pass:[/**] @property ignore_missing If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + pass:[/**] @property target_field The field to assign the trimmed value to. By default, the field is updated in-place. */ + target_field?: <<Field>> +} +---- + + +[discrete] +[[IngestUppercaseProcessor]] +=== IngestUppercaseProcessor + +[source,ts,subs=+macros] +---- +interface IngestUppercaseProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property field The field to make uppercase. */ + field: <<Field>> + pass:[/**] @property ignore_missing If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + pass:[/**] @property target_field The field to assign the converted value to. By default, the field is updated in-place. */ + target_field?: <<Field>> +} +---- + + +[discrete] +[[IngestUriPartsProcessor]] +=== IngestUriPartsProcessor + +[source,ts,subs=+macros] +---- +interface IngestUriPartsProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property field <<Field>> containing the URI string. */ + field: <<Field>> + pass:[/**] @property ignore_missing If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + pass:[/**] @property keep_original If `true`, the processor copies the unparsed URI to `<target_field>.original`. */ + keep_original?: boolean + pass:[/**] @property remove_if_successful If `true`, the processor removes the `field` after parsing the URI string. If parsing fails, the processor does not remove the `field`. */ + remove_if_successful?: boolean + pass:[/**] @property target_field Output field for the URI object. */ + target_field?: <<Field>> +} +---- + + +[discrete] +[[IngestUrlDecodeProcessor]] +=== IngestUrlDecodeProcessor + +[source,ts,subs=+macros] +---- +interface IngestUrlDecodeProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property field The field to decode. */ + field: <<Field>> + pass:[/**] @property ignore_missing If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + pass:[/**] @property target_field The field to assign the converted value to. By default, the field is updated in-place. */ + target_field?: <<Field>> +} +---- + + +[discrete] +[[IngestUserAgentProcessor]] +=== IngestUserAgentProcessor + +[source,ts,subs=+macros] +---- +interface IngestUserAgentProcessor extends <<IngestProcessorBase>> { + pass:[/**] @property field The field containing the user agent string. */ + field: <<Field>> + pass:[/**] @property ignore_missing If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + pass:[/**] @property regex_file The name of the file in the `config/ingest-user-agent` directory containing the regular expressions for parsing the user agent string. Both the directory and the file have to be created before starting Elasticsearch. If not specified, ingest-user-agent will use the `regexes.yaml` from uap-core it ships with. */ + regex_file?: string + pass:[/**] @property target_field The field that will be filled with the user agent details. */ + target_field?: <<Field>> + pass:[/**] @property properties Controls what properties are added to `target_field`. */ + properties?: <<IngestUserAgentProperty>>[] + pass:[/**] @property extract_device_type Extracts device type from the user agent string on a best-effort basis. */ + extract_device_type?: boolean +} +---- + + +[discrete] +[[IngestUserAgentProperty]] +=== IngestUserAgentProperty + +[source,ts,subs=+macros] +---- +type IngestUserAgentProperty = 'name' | 'os' | 'device' | 'original' | 'version' +---- + + diff --git a/docs/reference/shared-types/license-types.asciidoc b/docs/reference/shared-types/license-types.asciidoc new file mode 100644 index 000000000..0052ac970 --- /dev/null +++ b/docs/reference/shared-types/license-types.asciidoc @@ -0,0 +1,78 @@ +[[reference-shared-types-license-types]] + +=== `License` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[LicenseLicense]] +=== LicenseLicense + +[source,ts,subs=+macros] +---- +interface LicenseLicense { + expiry_date_in_millis: <<EpochTime>><<<UnitMillis>>> + issue_date_in_millis: <<EpochTime>><<<UnitMillis>>> + start_date_in_millis?: <<EpochTime>><<<UnitMillis>>> + issued_to: string + issuer: string + max_nodes?: <<long>> | null + max_resource_units?: <<long>> + signature: string + type: <<LicenseLicenseType>> + uid: string +} +---- + + +[discrete] +[[LicenseLicenseStatus]] +=== LicenseLicenseStatus + +[source,ts,subs=+macros] +---- +type LicenseLicenseStatus = 'active' | 'valid' | 'invalid' | 'expired' +---- + + +[discrete] +[[LicenseLicenseType]] +=== LicenseLicenseType + +[source,ts,subs=+macros] +---- +type LicenseLicenseType = 'missing' | 'trial' | 'basic' | 'standard' | 'dev' | 'silver' | 'gold' | 'platinum' | 'enterprise' +---- + + diff --git a/docs/reference/shared-types/logstash-types.asciidoc b/docs/reference/shared-types/logstash-types.asciidoc new file mode 100644 index 000000000..a918a9238 --- /dev/null +++ b/docs/reference/shared-types/logstash-types.asciidoc @@ -0,0 +1,98 @@ +[[reference-shared-types-logstash-types]] + +=== `Logstash` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[LogstashPipeline]] +=== LogstashPipeline + +[source,ts,subs=+macros] +---- +interface LogstashPipeline { + pass:[/**] @property description Description of the pipeline. This description is not used by Elasticsearch or Logstash. */ + description: string + pass:[/**] @property last_modified Date the pipeline was last updated. Must be in the `yyyy-MM-dd'T'HH:mm:ss.SSSZZ` strict_date_time format. */ + last_modified: <<DateTime>> + pass:[/**] @property pipeline_metadata Optional metadata about the pipeline. May have any contents. This metadata is not generated or used by Elasticsearch or Logstash. */ + pipeline_metadata: <<LogstashPipelineMetadata>> + pass:[/**] @property username User who last updated the pipeline. */ + username: string + pass:[/**] @property pipeline Configuration for the pipeline. */ + pipeline: string + pass:[/**] @property pipeline_settings Settings for the pipeline. Supports only flat keys in dot notation. */ + pipeline_settings: <<LogstashPipelineSettings>> +} +---- + + +[discrete] +[[LogstashPipelineMetadata]] +=== LogstashPipelineMetadata + +[source,ts,subs=+macros] +---- +interface LogstashPipelineMetadata { + type: string + version: string +} +---- + + +[discrete] +[[LogstashPipelineSettings]] +=== LogstashPipelineSettings + +[source,ts,subs=+macros] +---- +interface LogstashPipelineSettings { + pass:[/**] @property 'pipeline.workers' The number of workers that will, in parallel, execute the filter and output stages of the pipeline. */ + 'pipeline.workers': <<integer>> + pass:[/**] @property 'pipeline.batch.size' The maximum number of events an individual worker thread will collect from inputs before attempting to execute its filters and outputs. */ + 'pipeline.batch.size': <<integer>> + pass:[/**] @property 'pipeline.batch.delay' When creating pipeline event batches, how <<long>> in milliseconds to wait for each event before dispatching an undersized batch to pipeline workers. */ + 'pipeline.batch.delay': <<integer>> + pass:[/**] @property 'queue.type' The internal queuing model to use for event buffering. */ + 'queue.type': string + pass:[/**] @property 'queue.max_bytes.number' The total capacity of the queue (`queue.type: persisted`) in number of bytes. */ + 'queue.max_bytes.number': <<integer>> + pass:[/**] @property 'queue.max_bytes.units' The total capacity of the queue (`queue.type: persisted`) in terms of units of bytes. */ + 'queue.max_bytes.units': string + pass:[/**] @property 'queue.checkpoint.writes' The maximum number of written events before forcing a checkpoint when persistent queues are enabled (`queue.type: persisted`). */ + 'queue.checkpoint.writes': <<integer>> +} +---- + + diff --git a/docs/reference/shared-types/ml-types.asciidoc b/docs/reference/shared-types/ml-types.asciidoc new file mode 100644 index 000000000..18c3dd319 --- /dev/null +++ b/docs/reference/shared-types/ml-types.asciidoc @@ -0,0 +1,3165 @@ +[[reference-shared-types-ml-types]] + +=== `Ml` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[MlAnalysisConfig]] +=== MlAnalysisConfig + +[source,ts,subs=+macros] +---- +interface MlAnalysisConfig { + pass:[/**] @property bucket_span The size of the interval that the analysis is aggregated into, typically between `5m` and `1h`. This value should be either a whole number of days or equate to a whole number of buckets in one day. If the anomaly detection job uses a datafeed with aggregations, this value must also be divisible by the interval of the date histogram aggregation. */ + bucket_span?: <<Duration>> + pass:[/**] @property categorization_analyzer If `categorization_field_name` is specified, you can also define the analyzer that is used to interpret the categorization field. This property cannot be used at the same time as `categorization_filters`. The categorization analyzer specifies how the `categorization_field` is interpreted by the categorization process. The `categorization_analyzer` field can be specified either as a string or as an object. If it is a string, it must refer to a built-in analyzer or one added by another plugin. */ + categorization_analyzer?: <<MlCategorizationAnalyzer>> + pass:[/**] @property categorization_field_name If this property is specified, the values of the specified field will be categorized. The resulting categories must be used in a detector by setting `by_field_name`, `over_field_name`, or `partition_field_name` to the keyword `mlcategory`. */ + categorization_field_name?: <<Field>> + pass:[/**] @property categorization_filters If `categorization_field_name` is specified, you can also define optional filters. This property expects an array of regular expressions. The expressions are used to filter out matching sequences from the categorization field values. You can use this functionality to fine tune the categorization by excluding sequences from consideration when categories are defined. For example, you can exclude SQL statements that appear in your log files. This property cannot be used at the same time as `categorization_analyzer`. If you only want to define simple regular expression filters that are applied prior to tokenization, setting this property is the easiest method. If you also want to customize the tokenizer or post-tokenization filtering, use the `categorization_analyzer` property instead and include the filters as pattern_replace character filters. The effect is exactly the same. */ + categorization_filters?: string[] + pass:[/**] @property detectors Detector configuration objects specify which data fields a job analyzes. They also specify which analytical functions are used. You can specify multiple detectors for a job. If the detectors array does not contain at least one detector, no analysis can occur and an error is returned. */ + detectors: <<MlDetector>>[] + pass:[/**] @property influencers A comma separated list of influencer field names. Typically these can be the by, over, or partition fields that are used in the detector configuration. You might also want to use a field name that is not specifically named in a detector, but is available as part of the input data. When you use multiple detectors, the use of influencers is recommended as it aggregates results for each influencer entity. */ + influencers?: <<Field>>[] + pass:[/**] @property latency The size of the window in which to expect data that is out of time order. If you specify a non-zero value, it must be greater than or equal to one second. NOTE: Latency is applicable only when you send data by using the post data API. */ + latency?: <<Duration>> + pass:[/**] @property model_prune_window Advanced configuration option. Affects the pruning of models that have not been updated for the given time duration. The value must be set to a multiple of the `bucket_span`. If set too low, important information may be removed from the model. For jobs created in 8.1 and later, the default value is the greater of `30d` or 20 times `bucket_span`. */ + model_prune_window?: <<Duration>> + pass:[/**] @property multivariate_by_fields This functionality is reserved for internal use. It is not supported for use in customer environments and is not subject to the support SLA of official GA features. If set to `true`, the analysis will automatically find correlations between metrics for a given by field value and report anomalies when those correlations cease to hold. For example, suppose CPU and memory usage on host A is usually highly correlated with the same metrics on host B. Perhaps this correlation occurs because they are running a load-balanced application. If you enable this property, anomalies will be reported when, for example, CPU usage on host A is high and the value of CPU usage on host B is low. That is to say, you’ll see an anomaly when the CPU of host A is unusual given the CPU of host B. To use the `multivariate_by_fields` property, you must also specify `by_field_name` in your detector. */ + multivariate_by_fields?: boolean + pass:[/**] @property per_partition_categorization Settings related to how categorization interacts with partition fields. */ + per_partition_categorization?: <<MlPerPartitionCategorization>> + pass:[/**] @property summary_count_field_name If this property is specified, the data that is fed to the job is expected to be pre-summarized. This property value is the name of the field that contains the count of raw data points that have been summarized. The same `summary_count_field_name` applies to all detectors in the job. NOTE: The `summary_count_field_name` property cannot be used with the `metric` function. */ + summary_count_field_name?: <<Field>> +} +---- + + +[discrete] +[[MlAnalysisConfigRead]] +=== MlAnalysisConfigRead + +[source,ts,subs=+macros] +---- +interface MlAnalysisConfigRead { + pass:[/**] @property bucket_span The size of the interval that the analysis is aggregated into, typically between `5m` and `1h`. */ + bucket_span: <<Duration>> + pass:[/**] @property categorization_analyzer If `categorization_field_name` is specified, you can also define the analyzer that is used to interpret the categorization field. This property cannot be used at the same time as `categorization_filters`. The categorization analyzer specifies how the `categorization_field` is interpreted by the categorization process. */ + categorization_analyzer?: <<MlCategorizationAnalyzer>> + pass:[/**] @property categorization_field_name If this property is specified, the values of the specified field will be categorized. The resulting categories must be used in a detector by setting `by_field_name`, `over_field_name`, or `partition_field_name` to the keyword `mlcategory`. */ + categorization_field_name?: <<Field>> + pass:[/**] @property categorization_filters If `categorization_field_name` is specified, you can also define optional filters. This property expects an array of regular expressions. The expressions are used to filter out matching sequences from the categorization field values. */ + categorization_filters?: string[] + pass:[/**] @property detectors An array of detector configuration objects. Detector configuration objects specify which data fields a job analyzes. They also specify which analytical functions are used. You can specify multiple detectors for a job. */ + detectors: <<MlDetectorRead>>[] + pass:[/**] @property influencers A comma separated list of influencer field names. Typically these can be the by, over, or partition fields that are used in the detector configuration. You might also want to use a field name that is not specifically named in a detector, but is available as part of the input data. When you use multiple detectors, the use of influencers is recommended as it aggregates results for each influencer entity. */ + influencers: <<Field>>[] + pass:[/**] @property model_prune_window Advanced configuration option. Affects the pruning of models that have not been updated for the given time duration. The value must be set to a multiple of the `bucket_span`. If set too low, important information may be removed from the model. Typically, set to `30d` or longer. If not set, model pruning only occurs if the model memory status reaches the soft limit or the hard limit. For jobs created in 8.1 and later, the default value is the greater of `30d` or 20 times `bucket_span`. */ + model_prune_window?: <<Duration>> + pass:[/**] @property latency The size of the window in which to expect data that is out of time order. Defaults to no latency. If you specify a non-zero value, it must be greater than or equal to one second. */ + latency?: <<Duration>> + pass:[/**] @property multivariate_by_fields This functionality is reserved for internal use. It is not supported for use in customer environments and is not subject to the support SLA of official GA features. If set to `true`, the analysis will automatically find correlations between metrics for a given by field value and report anomalies when those correlations cease to hold. */ + multivariate_by_fields?: boolean + pass:[/**] @property per_partition_categorization Settings related to how categorization interacts with partition fields. */ + per_partition_categorization?: <<MlPerPartitionCategorization>> + pass:[/**] @property summary_count_field_name If this property is specified, the data that is fed to the job is expected to be pre-summarized. This property value is the name of the field that contains the count of raw data points that have been summarized. The same `summary_count_field_name` applies to all detectors in the job. */ + summary_count_field_name?: <<Field>> +} +---- + + +[discrete] +[[MlAnalysisLimits]] +=== MlAnalysisLimits + +[source,ts,subs=+macros] +---- +interface MlAnalysisLimits { + pass:[/**] @property categorization_examples_limit The maximum number of examples stored per category in memory and in the results data store. If you increase this value, more examples are available, however it requires that you have more storage available. If you set this value to 0, no examples are stored. NOTE: The `categorization_examples_limit` applies only to analysis that uses categorization. */ + categorization_examples_limit?: <<long>> + pass:[/**] @property model_memory_limit The approximate maximum amount of memory resources that are required for analytical processing. Once this limit is approached, data pruning becomes more aggressive. Upon exceeding this limit, new entities are not modeled. If the `xpack.ml.max_model_memory_limit` setting has a value greater than 0 and less than 1024mb, that value is used instead of the default. The default value is relatively small to ensure that high resource usage is a conscious decision. If you have jobs that are expected to analyze high cardinality fields, you will likely need to use a higher value. If you specify a number instead of a string, the units are assumed to be MiB. Specifying a string is recommended for clarity. If you specify a <<byte>> size unit of `b` or `kb` and the number does not equate to a discrete number of megabytes, it is rounded down to the closest MiB. The minimum valid value is 1 MiB. If you specify a value less than 1 MiB, an error occurs. If you specify a value for the `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create jobs that have `model_memory_limit` values greater than that setting value. */ + model_memory_limit?: string +} +---- + + +[discrete] +[[MlAnalysisMemoryLimit]] +=== MlAnalysisMemoryLimit + +[source,ts,subs=+macros] +---- +interface MlAnalysisMemoryLimit { + pass:[/**] @property model_memory_limit Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. */ + model_memory_limit: string +} +---- + + +[discrete] +[[MlAnomaly]] +=== MlAnomaly + +[source,ts,subs=+macros] +---- +interface MlAnomaly { + pass:[/**] @property actual The actual value for the bucket. */ + actual?: <<double>>[] + pass:[/**] @property anomaly_score_explanation Information about the factors impacting the initial anomaly score. */ + anomaly_score_explanation?: <<MlAnomalyExplanation>> + pass:[/**] @property bucket_span The length of the bucket in seconds. This value matches the `bucket_span` that is specified in the job. */ + bucket_span: <<DurationValue>><<<UnitSeconds>>> + pass:[/**] @property by_field_name The field used to split the data. In particular, this property is used for analyzing the splits with respect to their own history. It is used for finding unusual values in the context of the split. */ + by_field_name?: string + pass:[/**] @property by_field_value The value of `by_field_name`. */ + by_field_value?: string + pass:[/**] @property causes For population analysis, an over field must be specified in the detector. This property contains an array of anomaly records that are the causes for the anomaly that has been identified for the over field. This sub-resource contains the most anomalous records for the `over_field_name`. For scalability reasons, a maximum of the 10 most significant causes of the anomaly are returned. As part of the core analytical modeling, these low-level anomaly records are aggregated for their parent over field record. The `causes` resource contains similar elements to the record resource, namely `actual`, `typical`, `geo_results.actual_point`, `geo_results.typical_point`, `*_field_name` and `*_field_value`. Probability and scores are not applicable to causes. */ + causes?: <<MlAnomalyCause>>[] + pass:[/**] @property detector_index A unique identifier for the detector. */ + detector_index: <<integer>> + pass:[/**] @property field_name Certain functions require a field to operate on, for example, `sum()`. For those functions, this value is the name of the field to be analyzed. */ + field_name?: string + pass:[/**] @property function The function in which the anomaly occurs, as specified in the detector configuration. For example, `max`. */ + function?: string + pass:[/**] @property function_description The description of the function in which the anomaly occurs, as specified in the detector configuration. */ + function_description?: string + pass:[/**] @property geo_results If the detector function is `lat_long`, this object contains comma delimited strings for the latitude and longitude of the actual and typical values. */ + geo_results?: <<MlGeoResults>> + pass:[/**] @property influencers If influencers were specified in the detector configuration, this array contains influencers that contributed to or were to blame for an anomaly. */ + influencers?: <<MlInfluence>>[] + pass:[/**] @property initial_record_score A normalized score between 0-100, which is based on the probability of the anomalousness of this record. This is the initial value that was calculated at the time the bucket was processed. */ + initial_record_score: <<double>> + pass:[/**] @property is_interim If true, this is an interim result. In other words, the results are calculated based on partial input data. */ + is_interim: boolean + pass:[/**] @property job_id Identifier for the anomaly detection job. */ + job_id: string + pass:[/**] @property over_field_name The field used to split the data. In particular, this property is used for analyzing the splits with respect to the history of all splits. It is used for finding unusual values in the population of all splits. */ + over_field_name?: string + pass:[/**] @property over_field_value The value of `over_field_name`. */ + over_field_value?: string + pass:[/**] @property partition_field_name The field used to segment the analysis. When you use this property, you have completely independent baselines for each value of this field. */ + partition_field_name?: string + pass:[/**] @property partition_field_value The value of `partition_field_name`. */ + partition_field_value?: string + pass:[/**] @property probability The probability of the individual anomaly occurring, in the range 0 to 1. For example, `0.0000772031`. This value can be held to a high precision of over 300 decimal places, so the `record_score` is provided as a human-readable and friendly interpretation of this. */ + probability: <<double>> + pass:[/**] @property record_score A normalized score between 0-100, which is based on the probability of the anomalousness of this record. Unlike `initial_record_score`, this value will be updated by a re-normalization process as new data is analyzed. */ + record_score: <<double>> + pass:[/**] @property result_type Internal. This is always set to `record`. */ + result_type: string + pass:[/**] @property timestamp The start time of the bucket for which these results were calculated. */ + timestamp: <<EpochTime>><<<UnitMillis>>> + pass:[/**] @property typical The typical value for the bucket, according to analytical modeling. */ + typical?: <<double>>[] +} +---- + + +[discrete] +[[MlAnomalyCause]] +=== MlAnomalyCause + +[source,ts,subs=+macros] +---- +interface MlAnomalyCause { + actual: <<double>>[] + by_field_name: <<Name>> + by_field_value: string + correlated_by_field_value: string + field_name: <<Field>> + function: string + function_description: string + influencers: <<MlInfluence>>[] + over_field_name: <<Name>> + over_field_value: string + partition_field_name: string + partition_field_value: string + probability: <<double>> + typical: <<double>>[] +} +---- + + +[discrete] +[[MlAnomalyExplanation]] +=== MlAnomalyExplanation + +[source,ts,subs=+macros] +---- +interface MlAnomalyExplanation { + pass:[/**] @property anomaly_characteristics_impact Impact from the duration and magnitude of the detected anomaly relative to the historical average. */ + anomaly_characteristics_impact?: <<integer>> + pass:[/**] @property anomaly_length Length of the detected anomaly in the number of buckets. */ + anomaly_length?: <<integer>> + pass:[/**] @property anomaly_type Type of the detected anomaly: `spike` or `dip`. */ + anomaly_type?: string + pass:[/**] @property high_variance_penalty Indicates reduction of anomaly score for the bucket with large confidence intervals. If a bucket has large confidence intervals, the score is reduced. */ + high_variance_penalty?: boolean + pass:[/**] @property incomplete_bucket_penalty If the bucket contains fewer samples than expected, the score is reduced. */ + incomplete_bucket_penalty?: boolean + pass:[/**] @property lower_confidence_bound Lower bound of the 95% confidence interval. */ + lower_confidence_bound?: <<double>> + pass:[/**] @property multi_bucket_impact Impact of the deviation between actual and typical values in the past 12 buckets. */ + multi_bucket_impact?: <<integer>> + pass:[/**] @property single_bucket_impact Impact of the deviation between actual and typical values in the current bucket. */ + single_bucket_impact?: <<integer>> + pass:[/**] @property typical_value Typical (expected) value for this bucket. */ + typical_value?: <<double>> + pass:[/**] @property upper_confidence_bound Upper bound of the 95% confidence interval. */ + upper_confidence_bound?: <<double>> +} +---- + + +[discrete] +[[MlApiKeyAuthorization]] +=== MlApiKeyAuthorization + +[source,ts,subs=+macros] +---- +interface MlApiKeyAuthorization { + pass:[/**] @property id The identifier for the API key. */ + id: string + pass:[/**] @property name The name of the API key. */ + name: string +} +---- + + +[discrete] +[[MlAppliesTo]] +=== MlAppliesTo + +[source,ts,subs=+macros] +---- +type MlAppliesTo = 'actual' | 'typical' | 'diff_from_typical' | 'time' +---- + + +[discrete] +[[MlBucketInfluencer]] +=== MlBucketInfluencer + +[source,ts,subs=+macros] +---- +interface MlBucketInfluencer { + pass:[/**] @property anomaly_score A normalized score between 0-100, which is calculated for each bucket influencer. This score might be updated as newer data is analyzed. */ + anomaly_score: <<double>> + pass:[/**] @property bucket_span The length of the bucket in seconds. This value matches the bucket span that is specified in the job. */ + bucket_span: <<DurationValue>><<<UnitSeconds>>> + pass:[/**] @property influencer_field_name The field name of the influencer. */ + influencer_field_name: <<Field>> + pass:[/**] @property initial_anomaly_score The score between 0-100 for each bucket influencer. This score is the initial value that was calculated at the time the bucket was processed. */ + initial_anomaly_score: <<double>> + pass:[/**] @property is_interim If true, this is an interim result. In other words, the results are calculated based on partial input data. */ + is_interim: boolean + pass:[/**] @property job_id Identifier for the anomaly detection job. */ + job_id: <<Id>> + pass:[/**] @property probability The probability that the bucket has this behavior, in the range 0 to 1. This value can be held to a high precision of over 300 decimal places, so the `anomaly_score` is provided as a human-readable and friendly interpretation of this. */ + probability: <<double>> + pass:[/**] @property raw_anomaly_score Internal. */ + raw_anomaly_score: <<double>> + pass:[/**] @property result_type Internal. This value is always set to `bucket_influencer`. */ + result_type: string + pass:[/**] @property timestamp The start time of the bucket for which these results were calculated. */ + timestamp: <<EpochTime>><<<UnitMillis>>> + pass:[/**] @property timestamp_string The start time of the bucket for which these results were calculated. */ + timestamp_string?: <<DateTime>> +} +---- + + +[discrete] +[[MlBucketSummary]] +=== MlBucketSummary + +[source,ts,subs=+macros] +---- +interface MlBucketSummary { + pass:[/**] @property anomaly_score The maximum anomaly score, between 0-100, for any of the bucket influencers. This is an overall, rate-limited score for the job. All the anomaly records in the bucket contribute to this score. This value might be updated as new data is analyzed. */ + anomaly_score: <<double>> + bucket_influencers: <<MlBucketInfluencer>>[] + pass:[/**] @property bucket_span The length of the bucket in seconds. This value matches the bucket span that is specified in the job. */ + bucket_span: <<DurationValue>><<<UnitSeconds>>> + pass:[/**] @property event_count The number of input data records processed in this bucket. */ + event_count: <<long>> + pass:[/**] @property initial_anomaly_score The maximum anomaly score for any of the bucket influencers. This is the initial value that was calculated at the time the bucket was processed. */ + initial_anomaly_score: <<double>> + pass:[/**] @property is_interim If true, this is an interim result. In other words, the results are calculated based on partial input data. */ + is_interim: boolean + pass:[/**] @property job_id Identifier for the anomaly detection job. */ + job_id: <<Id>> + pass:[/**] @property processing_time_ms The amount of time, in milliseconds, that it took to analyze the bucket contents and calculate results. */ + processing_time_ms: <<DurationValue>><<<UnitMillis>>> + pass:[/**] @property result_type Internal. This value is always set to bucket. */ + result_type: string + pass:[/**] @property timestamp The start time of the bucket. This timestamp uniquely identifies the bucket. Events that occur exactly at the timestamp of the bucket are included in the results for the bucket. */ + timestamp: <<EpochTime>><<<UnitMillis>>> + pass:[/**] @property timestamp_string The start time of the bucket. This timestamp uniquely identifies the bucket. Events that occur exactly at the timestamp of the bucket are included in the results for the bucket. */ + timestamp_string?: <<DateTime>> +} +---- + + +[discrete] +[[MlCalendarEvent]] +=== MlCalendarEvent + +[source,ts,subs=+macros] +---- +interface MlCalendarEvent { + pass:[/**] @property calendar_id A string that uniquely identifies a calendar. */ + calendar_id?: <<Id>> + event_id?: <<Id>> + pass:[/**] @property description A description of the scheduled event. */ + description: string + pass:[/**] @property end_time The timestamp for the end of the scheduled event in milliseconds since the epoch or ISO 8601 format. */ + end_time: <<DateTime>> + pass:[/**] @property start_time The timestamp for the beginning of the scheduled event in milliseconds since the epoch or ISO 8601 format. */ + start_time: <<DateTime>> + pass:[/**] @property skip_result When true the model will not create results for this calendar period. */ + skip_result?: boolean + pass:[/**] @property skip_model_update When true the model will not be updated for this calendar period. */ + skip_model_update?: boolean + pass:[/**] @property force_time_shift Shift time by this many seconds. For example adjust time for daylight savings changes */ + force_time_shift?: <<integer>> +} +---- + + +[discrete] +[[MlCategorizationAnalyzer]] +=== MlCategorizationAnalyzer + +[source,ts,subs=+macros] +---- +type MlCategorizationAnalyzer = string | <<MlCategorizationAnalyzerDefinition>> +---- + + +[discrete] +[[MlCategorizationAnalyzerDefinition]] +=== MlCategorizationAnalyzerDefinition + +[source,ts,subs=+macros] +---- +interface MlCategorizationAnalyzerDefinition { + pass:[/**] @property char_filter One or more character filters. In addition to the built-in character filters, other plugins can provide more character filters. If this property is not specified, no character filters are applied prior to categorization. If you are customizing some other aspect of the analyzer and you need to achieve the equivalent of `categorization_filters` (which are not permitted when some other aspect of the analyzer is customized), add them here as pattern replace character filters. */ + char_filter?: <<AnalysisCharFilter>>[] + pass:[/**] @property filter One or more token filters. In addition to the built-in token filters, other plugins can provide more token filters. If this property is not specified, no token filters are applied prior to categorization. */ + filter?: <<AnalysisTokenFilter>>[] + pass:[/**] @property tokenizer The name or definition of the tokenizer to use after character filters are applied. This property is compulsory if `categorization_analyzer` is specified as an object. Machine learning provides a tokenizer called `ml_standard` that tokenizes in a way that has been determined to produce good categorization results on a variety of log file formats for logs in English. If you want to use that tokenizer but change the character or token filters, specify "tokenizer": "ml_standard" in your `categorization_analyzer`. Additionally, the `ml_classic` tokenizer is available, which tokenizes in the same way as the non-customizable tokenizer in old versions of the product (before 6.2). `ml_classic` was the default categorization tokenizer in versions 6.2 to 7.13, so if you need categorization identical to the default for jobs created in these versions, specify "tokenizer": "ml_classic" in your `categorization_analyzer`. */ + tokenizer?: <<AnalysisTokenizer>> +} +---- + + +[discrete] +[[MlCategorizationStatus]] +=== MlCategorizationStatus + +[source,ts,subs=+macros] +---- +type MlCategorizationStatus = 'ok' | 'warn' +---- + + +[discrete] +[[MlCategory]] +=== MlCategory + +[source,ts,subs=+macros] +---- +interface MlCategory { + pass:[/**] @property category_id A unique identifier for the category. category_id is unique at the job level, even when per-partition categorization is enabled. */ + category_id: <<ulong>> + pass:[/**] @property examples A list of examples of actual values that matched the category. */ + examples: string[] + pass:[/**] @property grok_pattern [experimental] A Grok pattern that could be used in Logstash or an ingest pipeline to extract fields from messages that match the category. This field is experimental and may be changed or removed in a future release. The Grok patterns that are found are not optimal, but are often a good starting point for manual tweaking. */ + grok_pattern?: <<GrokPattern>> + pass:[/**] @property job_id Identifier for the anomaly detection job. */ + job_id: <<Id>> + pass:[/**] @property max_matching_length The maximum length of the fields that matched the category. The value is increased by 10% to enable matching for similar fields that have not been analyzed. */ + max_matching_length: <<ulong>> + pass:[/**] @property partition_field_name If per-partition categorization is enabled, this property identifies the field used to segment the categorization. It is not present when per-partition categorization is disabled. */ + partition_field_name?: string + pass:[/**] @property partition_field_value If per-partition categorization is enabled, this property identifies the value of the partition_field_name for the category. It is not present when per-partition categorization is disabled. */ + partition_field_value?: string + pass:[/**] @property regex A regular expression that is used to search for values that match the category. */ + regex: string + pass:[/**] @property terms A space separated list of the common tokens that are matched in values of the category. */ + terms: string + pass:[/**] @property num_matches The number of messages that have been matched by this category. This is only guaranteed to have the latest accurate count after a job _flush or _close */ + num_matches?: <<long>> + pass:[/**] @property preferred_to_categories A list of category_id entries that this current category encompasses. Any new message that is processed by the categorizer will match against this category and not any of the categories in this list. This is only guaranteed to have the latest accurate list of categories after a job _flush or _close */ + preferred_to_categories?: <<Id>>[] + p?: string + result_type: string + mlcategory: string +} +---- + + +[discrete] +[[MlChunkingConfig]] +=== MlChunkingConfig + +[source,ts,subs=+macros] +---- +interface MlChunkingConfig { + pass:[/**] @property mode If the mode is `auto`, the chunk size is dynamically calculated; this is the recommended value when the datafeed does not use aggregations. If the mode is `manual`, chunking is applied according to the specified `time_span`; use this mode when the datafeed uses aggregations. If the mode is `off`, no chunking is applied. */ + mode: <<MlChunkingMode>> + pass:[/**] @property time_span The time span that each search will be querying. This setting is applicable only when the `mode` is set to `manual`. */ + time_span?: <<Duration>> +} +---- + + +[discrete] +[[MlChunkingMode]] +=== MlChunkingMode + +[source,ts,subs=+macros] +---- +type MlChunkingMode = 'auto' | 'manual' | 'off' +---- + + +[discrete] +[[MlClassificationInferenceOptions]] +=== MlClassificationInferenceOptions + +[source,ts,subs=+macros] +---- +interface MlClassificationInferenceOptions { + pass:[/**] @property num_top_classes Specifies the number of top class predictions to return. Defaults to 0. */ + num_top_classes?: <<integer>> + pass:[/**] @property num_top_feature_importance_values Specifies the maximum number of feature importance values per document. */ + num_top_feature_importance_values?: <<integer>> + pass:[/**] @property prediction_field_type Specifies the type of the predicted field to write. Acceptable values are: string, number, boolean. When boolean is provided 1.0 is transformed to true and 0.0 to false. */ + prediction_field_type?: string + pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string + pass:[/**] @property top_classes_results_field Specifies the field to which the top classes are written. Defaults to top_classes. */ + top_classes_results_field?: string +} +---- + + +[discrete] +[[MlConditionOperator]] +=== MlConditionOperator + +[source,ts,subs=+macros] +---- +type MlConditionOperator = 'gt' | 'gte' | 'lt' | 'lte' +---- + + +[discrete] +[[MlCustomSettings]] +=== MlCustomSettings + +[source,ts,subs=+macros] +---- +type MlCustomSettings = any +---- + + +[discrete] +[[MlDataCounts]] +=== MlDataCounts + +[source,ts,subs=+macros] +---- +interface MlDataCounts { + bucket_count: <<long>> + earliest_record_timestamp?: <<long>> + empty_bucket_count: <<long>> + input_bytes: <<long>> + input_field_count: <<long>> + input_record_count: <<long>> + invalid_date_count: <<long>> + job_id: <<Id>> + last_data_time?: <<long>> + latest_empty_bucket_timestamp?: <<long>> + latest_record_timestamp?: <<long>> + latest_sparse_bucket_timestamp?: <<long>> + latest_bucket_timestamp?: <<long>> + log_time?: <<long>> + missing_field_count: <<long>> + out_of_order_timestamp_count: <<long>> + processed_field_count: <<long>> + processed_record_count: <<long>> + sparse_bucket_count: <<long>> +} +---- + + +[discrete] +[[MlDataDescription]] +=== MlDataDescription + +[source,ts,subs=+macros] +---- +interface MlDataDescription { + pass:[/**] @property format Only JSON format is supported at this time. */ + format?: string + pass:[/**] @property time_field The name of the field that contains the timestamp. */ + time_field?: <<Field>> + pass:[/**] @property time_format The time format, which can be `epoch`, `epoch_ms`, or a custom pattern. The value `epoch` refers to UNIX or Epoch time (the number of seconds since 1 Jan 1970). The value `epoch_ms` indicates that time is measured in milliseconds since the epoch. The `epoch` and `epoch_ms` time formats accept either <<integer>> or real values. Custom patterns must conform to the Java DateTimeFormatter class. When you use date-time formatting patterns, it is recommended that you provide the full date, time and time zone. For example: `yyyy-MM-dd'T'HH:mm:ssX`. If the pattern that you specify is not sufficient to produce a complete timestamp, job creation fails. */ + time_format?: string + field_delimiter?: string +} +---- + + +[discrete] +[[MlDatafeed]] +=== MlDatafeed + +[source,ts,subs=+macros] +---- +interface MlDatafeed { + aggregations?: Record<string, <<AggregationsAggregationContainer>>> + aggs?: Record<string, <<AggregationsAggregationContainer>>> + pass:[/**] @property authorization The security privileges that the datafeed uses to run its queries. If Elastic Stack security features were disabled at the time of the most recent update to the datafeed, this property is omitted. */ + authorization?: <<MlDatafeedAuthorization>> + chunking_config?: <<MlChunkingConfig>> + datafeed_id: <<Id>> + frequency?: <<Duration>> + indices: string[] + indexes?: string[] + job_id: <<Id>> + max_empty_searches?: <<integer>> + query: <<QueryDslQueryContainer>> + query_delay?: <<Duration>> + script_fields?: Record<string, <<ScriptField>>> + scroll_size?: <<integer>> + delayed_data_check_config: <<MlDelayedDataCheckConfig>> + runtime_mappings?: <<MappingRuntimeFields>> + indices_options?: <<IndicesOptions>> +} +---- + + +[discrete] +[[MlDatafeedAuthorization]] +=== MlDatafeedAuthorization + +[source,ts,subs=+macros] +---- +interface MlDatafeedAuthorization { + pass:[/**] @property api_key If an API key was used for the most recent update to the datafeed, its name and identifier are listed in the response. */ + api_key?: <<MlApiKeyAuthorization>> + pass:[/**] @property roles If a user ID was used for the most recent update to the datafeed, its roles at the time of the update are listed in the response. */ + roles?: string[] + pass:[/**] @property service_account If a service account was used for the most recent update to the datafeed, the account name is listed in the response. */ + service_account?: string +} +---- + + +[discrete] +[[MlDatafeedConfig]] +=== MlDatafeedConfig + +[source,ts,subs=+macros] +---- +interface MlDatafeedConfig { + pass:[/**] @property aggregations If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. */ + aggregations?: Record<string, <<AggregationsAggregationContainer>>> + pass:[/**] @property aggs If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. */ + aggs?: Record<string, <<AggregationsAggregationContainer>>> + pass:[/**] @property chunking_config Datafeeds might be required to search over <<long>> time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of these time chunks are calculated and is an advanced configuration option. */ + chunking_config?: <<MlChunkingConfig>> + pass:[/**] @property datafeed_id A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. The default value is the job identifier. */ + datafeed_id?: <<Id>> + pass:[/**] @property delayed_data_check_config Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally search over indices that have already been read in an effort to determine whether any data has subsequently been added to the index. If missing data is found, it is a good indication that the `query_delay` option is set too low and the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time datafeeds. */ + delayed_data_check_config?: <<MlDelayedDataCheckConfig>> + pass:[/**] @property frequency The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for <<short>> bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. For example: `150s`. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. */ + frequency?: <<Duration>> + pass:[/**] @property indices An array of index names. Wildcards are supported. If any indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. */ + indices?: <<Indices>> + pass:[/**] @property indexes An array of index names. Wildcards are supported. If any indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. */ + indexes?: <<Indices>> + pass:[/**] @property indices_options Specifies index expansion options that are used during search. */ + indices_options?: <<IndicesOptions>> + job_id?: <<Id>> + pass:[/**] @property max_empty_searches If a real-time datafeed has never seen any data (including during any initial training period) then it will automatically stop itself and close its associated job after this many real-time searches that return no documents. In other words, it will stop after `frequency` times `max_empty_searches` of real-time operation. If not set then a datafeed with no end time that sees no data will remain started until it is explicitly stopped. */ + max_empty_searches?: <<integer>> + pass:[/**] @property query The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. */ + query?: <<QueryDslQueryContainer>> + pass:[/**] @property query_delay The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node. */ + query_delay?: <<Duration>> + pass:[/**] @property runtime_mappings Specifies runtime fields for the datafeed search. */ + runtime_mappings?: <<MappingRuntimeFields>> + pass:[/**] @property script_fields Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. The detector configuration objects in a job can contain functions that use these script fields. */ + script_fields?: Record<string, <<ScriptField>>> + pass:[/**] @property scroll_size The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`, which is 10,000 by default. */ + scroll_size?: <<integer>> +} +---- + + +[discrete] +[[MlDatafeedRunningState]] +=== MlDatafeedRunningState + +[source,ts,subs=+macros] +---- +interface MlDatafeedRunningState { + pass:[/**] @property real_time_configured Indicates if the datafeed is "real-time"; meaning that the datafeed has no configured `end` time. */ + real_time_configured: boolean + pass:[/**] @property real_time_running Indicates whether the datafeed has finished running on the available past data. For datafeeds without a configured `end` time, this means that the datafeed is now running on "real-time" data. */ + real_time_running: boolean + pass:[/**] @property search_interval Provides the latest time interval the datafeed has searched. */ + search_interval?: <<MlRunningStateSearchInterval>> +} +---- + + +[discrete] +[[MlDatafeedState]] +=== MlDatafeedState + +[source,ts,subs=+macros] +---- +type MlDatafeedState = 'started' | 'stopped' | 'starting' | 'stopping' +---- + + +[discrete] +[[MlDatafeedStats]] +=== MlDatafeedStats + +[source,ts,subs=+macros] +---- +interface MlDatafeedStats { + pass:[/**] @property assignment_explanation For started datafeeds only, contains messages relating to the selection of a node. */ + assignment_explanation?: string + pass:[/**] @property datafeed_id A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ + datafeed_id: <<Id>> + pass:[/**] @property node For started datafeeds only, this information pertains to the node upon which the datafeed is started. */ + node?: <<MlDiscoveryNode>> + pass:[/**] @property state The status of the datafeed, which can be one of the following values: `starting`, `started`, `stopping`, `stopped`. */ + state: <<MlDatafeedState>> + pass:[/**] @property timing_stats An object that provides statistical information about timing aspect of this datafeed. */ + timing_stats: <<MlDatafeedTimingStats>> + pass:[/**] @property running_state An object containing the running state for this datafeed. It is only provided if the datafeed is started. */ + running_state?: <<MlDatafeedRunningState>> +} +---- + + +[discrete] +[[MlDatafeedTimingStats]] +=== MlDatafeedTimingStats + +[source,ts,subs=+macros] +---- +interface MlDatafeedTimingStats { + pass:[/**] @property bucket_count The number of buckets processed. */ + bucket_count: <<long>> + pass:[/**] @property exponential_average_search_time_per_hour_ms The exponential average search time per hour, in milliseconds. */ + exponential_average_search_time_per_hour_ms: <<DurationValue>><<<UnitFloatMillis>>> + pass:[/**] @property job_id Identifier for the anomaly detection job. */ + job_id: <<Id>> + pass:[/**] @property search_count The number of searches run by the datafeed. */ + search_count: <<long>> + pass:[/**] @property total_search_time_ms The total time the datafeed spent searching, in milliseconds. */ + total_search_time_ms: <<DurationValue>><<<UnitFloatMillis>>> + pass:[/**] @property average_search_time_per_bucket_ms The average search time per bucket, in milliseconds. */ + average_search_time_per_bucket_ms?: <<DurationValue>><<<UnitFloatMillis>>> +} +---- + + +[discrete] +[[MlDataframeAnalysis]] +=== MlDataframeAnalysis + +[source,ts,subs=+macros] +---- +interface MlDataframeAnalysis { + pass:[/**] @property alpha Advanced configuration option. Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. This parameter affects loss calculations by acting as a multiplier of the tree depth. Higher alpha values result in shallower trees and faster training times. By default, this value is calculated during hyperparameter optimization. It must be greater than or equal to zero. */ + alpha?: <<double>> + pass:[/**] @property dependent_variable Defines which field of the document is to be predicted. It must match one of the fields in the index being used to train. If this field is missing from a document, then that document will not be used for training, but a prediction with the trained model will be generated for it. It is also known as continuous target variable. For classification analysis, the data type of the field must be numeric (`<<integer>>`, `<<short>>`, `<<long>>`, `<<byte>>`), categorical (`ip` or `keyword`), or `boolean`. There must be no more than 30 different values in this field. For regression analysis, the data type of the field must be numeric. */ + dependent_variable: string + pass:[/**] @property downsample_factor Advanced configuration option. Controls the fraction of data that is used to compute the derivatives of the loss function for tree training. A small value results in the use of a small fraction of the data. If this value is set to be less than 1, accuracy typically improves. However, too small a value may result in poor convergence for the ensemble and so require more trees. By default, this value is calculated during hyperparameter optimization. It must be greater than zero and less than or equal to 1. */ + downsample_factor?: <<double>> + pass:[/**] @property early_stopping_enabled Advanced configuration option. Specifies whether the training process should finish if it is not finding any better performing models. If disabled, the training process can take significantly longer and the chance of finding a better performing model is unremarkable. */ + early_stopping_enabled?: boolean + pass:[/**] @property eta Advanced configuration option. The shrinkage applied to the weights. Smaller values result in larger forests which have a better generalization error. However, larger forests cause slower training. By default, this value is calculated during hyperparameter optimization. It must be a value between 0.001 and 1. */ + eta?: <<double>> + pass:[/**] @property eta_growth_rate_per_tree Advanced configuration option. Specifies the rate at which `eta` increases for each new tree that is added to the forest. For example, a rate of 1.05 increases `eta` by 5% for each extra tree. By default, this value is calculated during hyperparameter optimization. It must be between 0.5 and 2. */ + eta_growth_rate_per_tree?: <<double>> + pass:[/**] @property feature_bag_fraction Advanced configuration option. Defines the fraction of features that will be used when selecting a random bag for each candidate split. By default, this value is calculated during hyperparameter optimization. */ + feature_bag_fraction?: <<double>> + pass:[/**] @property feature_processors Advanced configuration option. A collection of feature preprocessors that modify one or more included fields. The analysis uses the resulting one or more features instead of the original document field. However, these features are ephemeral; they are not stored in the destination index. Multiple `feature_processors` entries can refer to the same document fields. Automatic categorical feature encoding still occurs for the fields that are unprocessed by a custom processor or that have categorical values. Use this property only if you want to override the automatic feature encoding of the specified fields. */ + feature_processors?: <<MlDataframeAnalysisFeatureProcessor>>[] + pass:[/**] @property gamma Advanced configuration option. Regularization parameter to prevent overfitting on the training data set. Multiplies a linear penalty associated with the size of individual trees in the forest. A high gamma value causes training to prefer small trees. A small gamma value results in larger individual trees and slower training. By default, this value is calculated during hyperparameter optimization. It must be a nonnegative value. */ + gamma?: <<double>> + pass:[/**] @property lambda Advanced configuration option. Regularization parameter to prevent overfitting on the training data set. Multiplies an L2 regularization term which applies to leaf weights of the individual trees in the forest. A high lambda value causes training to favor small leaf weights. This behavior makes the prediction function smoother at the expense of potentially not being able to capture relevant relationships between the features and the dependent variable. A small lambda value results in large individual trees and slower training. By default, this value is calculated during hyperparameter optimization. It must be a nonnegative value. */ + lambda?: <<double>> + pass:[/**] @property max_optimization_rounds_per_hyperparameter Advanced configuration option. A multiplier responsible for determining the maximum number of hyperparameter optimization steps in the Bayesian optimization procedure. The maximum number of steps is determined based on the number of undefined hyperparameters times the maximum optimization rounds per hyperparameter. By default, this value is calculated during hyperparameter optimization. */ + max_optimization_rounds_per_hyperparameter?: <<integer>> + pass:[/**] @property max_trees Advanced configuration option. Defines the maximum number of decision trees in the forest. The maximum value is 2000. By default, this value is calculated during hyperparameter optimization. */ + max_trees?: <<integer>> + pass:[/**] @property maximum_number_trees Advanced configuration option. Defines the maximum number of decision trees in the forest. The maximum value is 2000. By default, this value is calculated during hyperparameter optimization. */ + maximum_number_trees?: <<integer>> + pass:[/**] @property num_top_feature_importance_values Advanced configuration option. Specifies the maximum number of feature importance values per document to return. By default, no feature importance calculation occurs. */ + num_top_feature_importance_values?: <<integer>> + pass:[/**] @property prediction_field_name Defines the name of the prediction field in the results. Defaults to `<dependent_variable>_prediction`. */ + prediction_field_name?: <<Field>> + pass:[/**] @property randomize_seed Defines the seed for the random generator that is used to pick training data. By default, it is randomly generated. Set it to a specific value to use the same training data each time you start a job (assuming other related parameters such as `source` and `analyzed_fields` are the same). */ + randomize_seed?: <<double>> + pass:[/**] @property soft_tree_depth_limit Advanced configuration option. Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. This soft limit combines with the `soft_tree_depth_tolerance` to penalize trees that exceed the specified depth; the regularized loss increases quickly beyond this depth. By default, this value is calculated during hyperparameter optimization. It must be greater than or equal to 0. */ + soft_tree_depth_limit?: <<integer>> + pass:[/**] @property soft_tree_depth_tolerance Advanced configuration option. This option controls how quickly the regularized loss increases when the tree depth exceeds `soft_tree_depth_limit`. By default, this value is calculated during hyperparameter optimization. It must be greater than or equal to 0.01. */ + soft_tree_depth_tolerance?: <<double>> + pass:[/**] @property training_percent Defines what percentage of the eligible documents that will be used for training. Documents that are ignored by the analysis (for example those that contain arrays with more than one value) won’t be included in the calculation for used percentage. */ + training_percent?: <<Percentage>> +} +---- + + +[discrete] +[[MlDataframeAnalysisAnalyzedFields]] +=== MlDataframeAnalysisAnalyzedFields + +[source,ts,subs=+macros] +---- +interface MlDataframeAnalysisAnalyzedFields { + pass:[/**] @property includes An array of strings that defines the fields that will be excluded from the analysis. You do not need to add fields with unsupported data types to excludes, these fields are excluded from the analysis automatically. */ + includes: string[] + pass:[/**] @property excludes An array of strings that defines the fields that will be included in the analysis. */ + excludes: string[] +} +---- + + +[discrete] +[[MlDataframeAnalysisClassification]] +=== MlDataframeAnalysisClassification + +[source,ts,subs=+macros] +---- +interface MlDataframeAnalysisClassification extends <<MlDataframeAnalysis>> { + class_assignment_objective?: string + pass:[/**] @property num_top_classes Defines the number of categories for which the predicted probabilities are reported. It must be non-negative or -1. If it is -1 or greater than the total number of categories, probabilities are reported for all categories; if you have a large number of categories, there could be a significant effect on the size of your destination index. NOTE: To use the AUC ROC evaluation method, `num_top_classes` must be set to -1 or a value greater than or equal to the total number of categories. */ + num_top_classes?: <<integer>> +} +---- + + +[discrete] +[[MlDataframeAnalysisContainer]] +=== MlDataframeAnalysisContainer + +[source,ts,subs=+macros] +---- +interface MlDataframeAnalysisContainer { + pass:[/**] @property classification The configuration information necessary to perform classification. */ + classification?: <<MlDataframeAnalysisClassification>> + pass:[/**] @property outlier_detection The configuration information necessary to perform outlier detection. NOTE: Advanced parameters are for fine-tuning classification analysis. They are set automatically by hyperparameter optimization to give the minimum validation error. It is highly recommended to use the default values unless you fully understand the function of these parameters. */ + outlier_detection?: <<MlDataframeAnalysisOutlierDetection>> + pass:[/**] @property regression The configuration information necessary to perform regression. NOTE: Advanced parameters are for fine-tuning regression analysis. They are set automatically by hyperparameter optimization to give the minimum validation error. It is highly recommended to use the default values unless you fully understand the function of these parameters. */ + regression?: <<MlDataframeAnalysisRegression>> +} +---- + + +[discrete] +[[MlDataframeAnalysisFeatureProcessor]] +=== MlDataframeAnalysisFeatureProcessor + +[source,ts,subs=+macros] +---- +interface MlDataframeAnalysisFeatureProcessor { + pass:[/**] @property frequency_encoding The configuration information necessary to perform frequency encoding. */ + frequency_encoding?: <<MlDataframeAnalysisFeatureProcessorFrequencyEncoding>> + pass:[/**] @property multi_encoding The configuration information necessary to perform multi encoding. It allows multiple processors to be changed together. This way the output of a processor can then be passed to another as an input. */ + multi_encoding?: <<MlDataframeAnalysisFeatureProcessorMultiEncoding>> + pass:[/**] @property n_gram_encoding The configuration information necessary to perform n-gram encoding. Features created by this encoder have the following name format: <feature_prefix>.<ngram><string position>. For example, if the feature_prefix is f, the feature name for the second unigram in a string is f.11. */ + n_gram_encoding?: <<MlDataframeAnalysisFeatureProcessorNGramEncoding>> + pass:[/**] @property one_hot_encoding The configuration information necessary to perform one hot encoding. */ + one_hot_encoding?: <<MlDataframeAnalysisFeatureProcessorOneHotEncoding>> + pass:[/**] @property target_mean_encoding The configuration information necessary to perform target mean encoding. */ + target_mean_encoding?: <<MlDataframeAnalysisFeatureProcessorTargetMeanEncoding>> +} +---- + + +[discrete] +[[MlDataframeAnalysisFeatureProcessorFrequencyEncoding]] +=== MlDataframeAnalysisFeatureProcessorFrequencyEncoding + +[source,ts,subs=+macros] +---- +interface MlDataframeAnalysisFeatureProcessorFrequencyEncoding { + pass:[/**] @property feature_name The resulting feature name. */ + feature_name: <<Name>> + field: <<Field>> + pass:[/**] @property frequency_map The resulting frequency map for the field value. If the field value is missing from the frequency_map, the resulting value is 0. */ + frequency_map: Record<string, <<double>>> +} +---- + + +[discrete] +[[MlDataframeAnalysisFeatureProcessorMultiEncoding]] +=== MlDataframeAnalysisFeatureProcessorMultiEncoding + +[source,ts,subs=+macros] +---- +interface MlDataframeAnalysisFeatureProcessorMultiEncoding { + pass:[/**] @property processors The ordered array of custom processors to execute. Must be more than 1. */ + processors: <<integer>>[] +} +---- + + +[discrete] +[[MlDataframeAnalysisFeatureProcessorNGramEncoding]] +=== MlDataframeAnalysisFeatureProcessorNGramEncoding + +[source,ts,subs=+macros] +---- +interface MlDataframeAnalysisFeatureProcessorNGramEncoding { + pass:[/**] @property feature_prefix The feature name prefix. Defaults to ngram_<start>_<length>. */ + feature_prefix?: string + pass:[/**] @property field The name of the text field to encode. */ + field: <<Field>> + pass:[/**] @property length Specifies the length of the n-gram substring. Defaults to 50. Must be greater than 0. */ + length?: <<integer>> + pass:[/**] @property n_grams Specifies which n-grams to gather. It’s an array of <<integer>> values where the minimum value is 1, and a maximum value is 5. */ + n_grams: <<integer>>[] + pass:[/**] @property start Specifies the zero-indexed start of the n-gram substring. Negative values are allowed for encoding n-grams of string suffixes. Defaults to 0. */ + start?: <<integer>> + custom?: boolean +} +---- + + +[discrete] +[[MlDataframeAnalysisFeatureProcessorOneHotEncoding]] +=== MlDataframeAnalysisFeatureProcessorOneHotEncoding + +[source,ts,subs=+macros] +---- +interface MlDataframeAnalysisFeatureProcessorOneHotEncoding { + pass:[/**] @property field The name of the field to encode. */ + field: <<Field>> + pass:[/**] @property hot_map The one hot map mapping the field value with the column name. */ + hot_map: string +} +---- + + +[discrete] +[[MlDataframeAnalysisFeatureProcessorTargetMeanEncoding]] +=== MlDataframeAnalysisFeatureProcessorTargetMeanEncoding + +[source,ts,subs=+macros] +---- +interface MlDataframeAnalysisFeatureProcessorTargetMeanEncoding { + pass:[/**] @property default_value The default value if field value is not found in the target_map. */ + default_value: <<integer>> + pass:[/**] @property feature_name The resulting feature name. */ + feature_name: <<Name>> + pass:[/**] @property field The name of the field to encode. */ + field: <<Field>> + pass:[/**] @property target_map The field value to target mean transition map. */ + target_map: Record<string, any> +} +---- + + +[discrete] +[[MlDataframeAnalysisOutlierDetection]] +=== MlDataframeAnalysisOutlierDetection + +[source,ts,subs=+macros] +---- +interface MlDataframeAnalysisOutlierDetection { + pass:[/**] @property compute_feature_influence Specifies whether the feature influence calculation is enabled. */ + compute_feature_influence?: boolean + pass:[/**] @property feature_influence_threshold The minimum outlier score that a document needs to have in order to calculate its feature influence score. Value range: 0-1. */ + feature_influence_threshold?: <<double>> + pass:[/**] @property method The method that outlier detection uses. Available methods are `lof`, `ldof`, `distance_kth_nn`, `distance_knn`, and `ensemble`. The default value is ensemble, which means that outlier detection uses an ensemble of different methods and normalises and combines their individual outlier scores to obtain the overall outlier score. */ + method?: string + pass:[/**] @property n_neighbors Defines the value for how many nearest neighbors each method of outlier detection uses to calculate its outlier score. When the value is not set, different values are used for different ensemble members. This default behavior helps improve the diversity in the ensemble; only override it if you are confident that the value you choose is appropriate for the data set. */ + n_neighbors?: <<integer>> + pass:[/**] @property outlier_fraction The proportion of the data set that is assumed to be outlying prior to outlier detection. For example, 0.05 means it is assumed that 5% of values are real outliers and 95% are inliers. */ + outlier_fraction?: <<double>> + pass:[/**] @property standardization_enabled If true, the following operation is performed on the columns before computing outlier scores: `(x_i - mean(x_i)) / sd(x_i)`. */ + standardization_enabled?: boolean +} +---- + + +[discrete] +[[MlDataframeAnalysisRegression]] +=== MlDataframeAnalysisRegression + +[source,ts,subs=+macros] +---- +interface MlDataframeAnalysisRegression extends <<MlDataframeAnalysis>> { + pass:[/**] @property loss_function The loss function used during regression. Available options are `mse` (mean squared error), `msle` (mean squared logarithmic error), `huber` (Pseudo-Huber loss). */ + loss_function?: string + pass:[/**] @property loss_function_parameter A positive number that is used as a parameter to the `loss_function`. */ + loss_function_parameter?: <<double>> +} +---- + + +[discrete] +[[MlDataframeAnalytics]] +=== MlDataframeAnalytics + +[source,ts,subs=+macros] +---- +interface MlDataframeAnalytics { + pass:[/**] @property analysis_stats An object containing information about the analysis job. */ + analysis_stats?: <<MlDataframeAnalyticsStatsContainer>> + pass:[/**] @property assignment_explanation For running jobs only, contains messages relating to the selection of a node to run the job. */ + assignment_explanation?: string + pass:[/**] @property data_counts An object that provides counts for the quantity of documents skipped, used in training, or available for testing. */ + data_counts: <<MlDataframeAnalyticsStatsDataCounts>> + pass:[/**] @property id The unique identifier of the data frame analytics job. */ + id: <<Id>> + pass:[/**] @property memory_usage An object describing memory usage of the analytics. It is present only after the job is started and memory usage is reported. */ + memory_usage: <<MlDataframeAnalyticsStatsMemoryUsage>> + pass:[/**] @property node Contains properties for the node that runs the job. This information is available only for running jobs. */ + node?: <<NodeAttributes>> + pass:[/**] @property progress The progress report of the data frame analytics job by phase. */ + progress: <<MlDataframeAnalyticsStatsProgress>>[] + pass:[/**] @property state The status of the data frame analytics job, which can be one of the following values: failed, started, starting, stopping, stopped. */ + state: <<MlDataframeState>> +} +---- + + +[discrete] +[[MlDataframeAnalyticsAuthorization]] +=== MlDataframeAnalyticsAuthorization + +[source,ts,subs=+macros] +---- +interface MlDataframeAnalyticsAuthorization { + pass:[/**] @property api_key If an API key was used for the most recent update to the job, its name and identifier are listed in the response. */ + api_key?: <<MlApiKeyAuthorization>> + pass:[/**] @property roles If a user ID was used for the most recent update to the job, its roles at the time of the update are listed in the response. */ + roles?: string[] + pass:[/**] @property service_account If a service account was used for the most recent update to the job, the account name is listed in the response. */ + service_account?: string +} +---- + + +[discrete] +[[MlDataframeAnalyticsDestination]] +=== MlDataframeAnalyticsDestination + +[source,ts,subs=+macros] +---- +interface MlDataframeAnalyticsDestination { + pass:[/**] @property index Defines the destination index to store the results of the data frame analytics job. */ + index: <<IndexName>> + pass:[/**] @property results_field Defines the name of the field in which to store the results of the analysis. Defaults to `ml`. */ + results_field?: <<Field>> +} +---- + + +[discrete] +[[MlDataframeAnalyticsFieldSelection]] +=== MlDataframeAnalyticsFieldSelection + +[source,ts,subs=+macros] +---- +interface MlDataframeAnalyticsFieldSelection { + pass:[/**] @property is_included Whether the field is selected to be included in the analysis. */ + is_included: boolean + pass:[/**] @property is_required Whether the field is required. */ + is_required: boolean + pass:[/**] @property feature_type The feature type of this field for the analysis. May be categorical or numerical. */ + feature_type?: string + pass:[/**] @property mapping_types The mapping types of the field. */ + mapping_types: string[] + pass:[/**] @property name The field name. */ + name: <<Field>> + pass:[/**] @property reason The reason a field is not selected to be included in the analysis. */ + reason?: string +} +---- + + +[discrete] +[[MlDataframeAnalyticsMemoryEstimation]] +=== MlDataframeAnalyticsMemoryEstimation + +[source,ts,subs=+macros] +---- +interface MlDataframeAnalyticsMemoryEstimation { + pass:[/**] @property expected_memory_with_disk Estimated memory usage under the assumption that overflowing to disk is allowed during data frame analytics. expected_memory_with_disk is usually smaller than expected_memory_without_disk as using disk allows to limit the main memory needed to perform data frame analytics. */ + expected_memory_with_disk: string + pass:[/**] @property expected_memory_without_disk Estimated memory usage under the assumption that the whole data frame analytics should happen in memory (i.e. without overflowing to disk). */ + expected_memory_without_disk: string +} +---- + + +[discrete] +[[MlDataframeAnalyticsSource]] +=== MlDataframeAnalyticsSource + +[source,ts,subs=+macros] +---- +interface MlDataframeAnalyticsSource { + pass:[/**] @property index Index or indices on which to perform the analysis. It can be a single index or index pattern as well as an array of indices or patterns. NOTE: If your source indices contain documents with the same IDs, only the document that is indexed last appears in the destination index. */ + index: <<Indices>> + pass:[/**] @property query The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. By default, this property has the following value: {"match_all": {}}. */ + query?: <<QueryDslQueryContainer>> + pass:[/**] @property runtime_mappings Definitions of runtime fields that will become part of the mapping of the destination index. */ + runtime_mappings?: <<MappingRuntimeFields>> + pass:[/**] @property _source Specify `includes` and/or `excludes patterns to select which fields will be present in the destination. <<Fields>> that are excluded cannot be included in the analysis. */ + _source?: <<MlDataframeAnalysisAnalyzedFields>> | string[] +} +---- + + +[discrete] +[[MlDataframeAnalyticsStatsContainer]] +=== MlDataframeAnalyticsStatsContainer + +[source,ts,subs=+macros] +---- +interface MlDataframeAnalyticsStatsContainer { + pass:[/**] @property classification_stats An object containing information about the classification analysis job. */ + classification_stats?: <<MlDataframeAnalyticsStatsHyperparameters>> + pass:[/**] @property outlier_detection_stats An object containing information about the outlier detection job. */ + outlier_detection_stats?: <<MlDataframeAnalyticsStatsOutlierDetection>> + pass:[/**] @property regression_stats An object containing information about the regression analysis. */ + regression_stats?: <<MlDataframeAnalyticsStatsHyperparameters>> +} +---- + + +[discrete] +[[MlDataframeAnalyticsStatsDataCounts]] +=== MlDataframeAnalyticsStatsDataCounts + +[source,ts,subs=+macros] +---- +interface MlDataframeAnalyticsStatsDataCounts { + pass:[/**] @property skipped_docs_count The number of documents that are skipped during the analysis because they contained values that are not supported by the analysis. For example, outlier detection does not support missing fields so it skips documents with missing fields. Likewise, all types of analysis skip documents that contain arrays with more than one element. */ + skipped_docs_count: <<integer>> + pass:[/**] @property test_docs_count The number of documents that are not used for training the model and can be used for testing. */ + test_docs_count: <<integer>> + pass:[/**] @property training_docs_count The number of documents that are used for training the model. */ + training_docs_count: <<integer>> +} +---- + + +[discrete] +[[MlDataframeAnalyticsStatsHyperparameters]] +=== MlDataframeAnalyticsStatsHyperparameters + +[source,ts,subs=+macros] +---- +interface MlDataframeAnalyticsStatsHyperparameters { + pass:[/**] @property hyperparameters An object containing the parameters of the classification analysis job. */ + hyperparameters: <<MlHyperparameters>> + pass:[/**] @property iteration The number of iterations on the analysis. */ + iteration: <<integer>> + pass:[/**] @property timestamp The timestamp when the statistics were reported in milliseconds since the epoch. */ + timestamp: <<EpochTime>><<<UnitMillis>>> + pass:[/**] @property timing_stats An object containing time statistics about the data frame analytics job. */ + timing_stats: <<MlTimingStats>> + pass:[/**] @property validation_loss An object containing information about validation loss. */ + validation_loss: <<MlValidationLoss>> +} +---- + + +[discrete] +[[MlDataframeAnalyticsStatsMemoryUsage]] +=== MlDataframeAnalyticsStatsMemoryUsage + +[source,ts,subs=+macros] +---- +interface MlDataframeAnalyticsStatsMemoryUsage { + pass:[/**] @property memory_reestimate_bytes This value is present when the status is hard_limit and it is a new estimate of how much memory the job needs. */ + memory_reestimate_bytes?: <<long>> + pass:[/**] @property peak_usage_bytes The number of bytes used at the highest peak of memory usage. */ + peak_usage_bytes: <<long>> + pass:[/**] @property status The memory usage status. */ + status: string + pass:[/**] @property timestamp The timestamp when memory usage was calculated. */ + timestamp?: <<EpochTime>><<<UnitMillis>>> +} +---- + + +[discrete] +[[MlDataframeAnalyticsStatsOutlierDetection]] +=== MlDataframeAnalyticsStatsOutlierDetection + +[source,ts,subs=+macros] +---- +interface MlDataframeAnalyticsStatsOutlierDetection { + pass:[/**] @property parameters The list of job parameters specified by the user or determined by algorithmic heuristics. */ + parameters: <<MlOutlierDetectionParameters>> + pass:[/**] @property timestamp The timestamp when the statistics were reported in milliseconds since the epoch. */ + timestamp: <<EpochTime>><<<UnitMillis>>> + pass:[/**] @property timing_stats An object containing time statistics about the data frame analytics job. */ + timing_stats: <<MlTimingStats>> +} +---- + + +[discrete] +[[MlDataframeAnalyticsStatsProgress]] +=== MlDataframeAnalyticsStatsProgress + +[source,ts,subs=+macros] +---- +interface MlDataframeAnalyticsStatsProgress { + pass:[/**] @property phase Defines the phase of the data frame analytics job. */ + phase: string + pass:[/**] @property progress_percent The progress that the data frame analytics job has made expressed in percentage. */ + progress_percent: <<integer>> +} +---- + + +[discrete] +[[MlDataframeAnalyticsSummary]] +=== MlDataframeAnalyticsSummary + +[source,ts,subs=+macros] +---- +interface MlDataframeAnalyticsSummary { + allow_lazy_start?: boolean + analysis: <<MlDataframeAnalysisContainer>> + analyzed_fields?: <<MlDataframeAnalysisAnalyzedFields>> | string[] + pass:[/**] @property authorization The security privileges that the job uses to run its queries. If Elastic Stack security features were disabled at the time of the most recent update to the job, this property is omitted. */ + authorization?: <<MlDataframeAnalyticsAuthorization>> + create_time?: <<EpochTime>><<<UnitMillis>>> + description?: string + dest: <<MlDataframeAnalyticsDestination>> + id: <<Id>> + max_num_threads?: <<integer>> + model_memory_limit?: string + source: <<MlDataframeAnalyticsSource>> + version?: <<VersionString>> +} +---- + + +[discrete] +[[MlDataframeEvaluationClassification]] +=== MlDataframeEvaluationClassification + +[source,ts,subs=+macros] +---- +interface MlDataframeEvaluationClassification { + pass:[/**] @property actual_field The field of the index which contains the ground truth. The data type of this field can be boolean or <<integer>>. If the data type is <<integer>>, the value has to be either 0 (false) or 1 (true). */ + actual_field: <<Field>> + pass:[/**] @property predicted_field The field in the index which contains the predicted value, in other words the results of the classification analysis. */ + predicted_field?: <<Field>> + pass:[/**] @property top_classes_field The field of the index which is an array of documents of the form { "class_name": XXX, "class_probability": YYY }. This field must be defined as nested in the mappings. */ + top_classes_field?: <<Field>> + pass:[/**] @property metrics Specifies the metrics that are used for the evaluation. */ + metrics?: <<MlDataframeEvaluationClassificationMetrics>> +} +---- + + +[discrete] +[[MlDataframeEvaluationClassificationMetrics]] +=== MlDataframeEvaluationClassificationMetrics + +[source,ts,subs=+macros] +---- +interface MlDataframeEvaluationClassificationMetrics extends <<MlDataframeEvaluationMetrics>> { + pass:[/**] @property accuracy Accuracy of predictions (per-class and overall). */ + accuracy?: Record<string, any> + pass:[/**] @property multiclass_confusion_matrix Multiclass confusion matrix. */ + multiclass_confusion_matrix?: Record<string, any> +} +---- + + +[discrete] +[[MlDataframeEvaluationClassificationMetricsAucRoc]] +=== MlDataframeEvaluationClassificationMetricsAucRoc + +[source,ts,subs=+macros] +---- +interface MlDataframeEvaluationClassificationMetricsAucRoc { + pass:[/**] @property class_name <<Name>> of the only class that is treated as positive during AUC ROC calculation. Other classes are treated as negative ("one-vs-all" strategy). All the evaluated documents must have class_name in the list of their top classes. */ + class_name?: <<Name>> + pass:[/**] @property include_curve Whether or not the curve should be returned in addition to the score. Default value is false. */ + include_curve?: boolean +} +---- + + +[discrete] +[[MlDataframeEvaluationContainer]] +=== MlDataframeEvaluationContainer + +[source,ts,subs=+macros] +---- +interface MlDataframeEvaluationContainer { + pass:[/**] @property classification Classification evaluation evaluates the results of a classification analysis which outputs a prediction that identifies to which of the classes each document belongs. */ + classification?: <<MlDataframeEvaluationClassification>> + pass:[/**] @property outlier_detection Outlier detection evaluates the results of an outlier detection analysis which outputs the probability that each document is an outlier. */ + outlier_detection?: <<MlDataframeEvaluationOutlierDetection>> + pass:[/**] @property regression Regression evaluation evaluates the results of a regression analysis which outputs a prediction of values. */ + regression?: <<MlDataframeEvaluationRegression>> +} +---- + + +[discrete] +[[MlDataframeEvaluationMetrics]] +=== MlDataframeEvaluationMetrics + +[source,ts,subs=+macros] +---- +interface MlDataframeEvaluationMetrics { + pass:[/**] @property auc_roc The AUC ROC (area under the curve of the receiver operating characteristic) score and optionally the curve. It is calculated for a specific class (provided as "class_name") treated as positive. */ + auc_roc?: <<MlDataframeEvaluationClassificationMetricsAucRoc>> + pass:[/**] @property precision Precision of predictions (per-class and average). */ + precision?: Record<string, any> + pass:[/**] @property recall Recall of predictions (per-class and average). */ + recall?: Record<string, any> +} +---- + + +[discrete] +[[MlDataframeEvaluationOutlierDetection]] +=== MlDataframeEvaluationOutlierDetection + +[source,ts,subs=+macros] +---- +interface MlDataframeEvaluationOutlierDetection { + pass:[/**] @property actual_field The field of the index which contains the ground truth. The data type of this field can be boolean or <<integer>>. If the data type is <<integer>>, the value has to be either 0 (false) or 1 (true). */ + actual_field: <<Field>> + pass:[/**] @property predicted_probability_field The field of the index that defines the probability of whether the item belongs to the class in question or not. It’s the field that contains the results of the analysis. */ + predicted_probability_field: <<Field>> + pass:[/**] @property metrics Specifies the metrics that are used for the evaluation. */ + metrics?: <<MlDataframeEvaluationOutlierDetectionMetrics>> +} +---- + + +[discrete] +[[MlDataframeEvaluationOutlierDetectionMetrics]] +=== MlDataframeEvaluationOutlierDetectionMetrics + +[source,ts,subs=+macros] +---- +interface MlDataframeEvaluationOutlierDetectionMetrics extends <<MlDataframeEvaluationMetrics>> { + pass:[/**] @property confusion_matrix Accuracy of predictions (per-class and overall). */ + confusion_matrix?: Record<string, any> +} +---- + + +[discrete] +[[MlDataframeEvaluationRegression]] +=== MlDataframeEvaluationRegression + +[source,ts,subs=+macros] +---- +interface MlDataframeEvaluationRegression { + pass:[/**] @property actual_field The field of the index which contains the ground truth. The data type of this field must be numerical. */ + actual_field: <<Field>> + pass:[/**] @property predicted_field The field in the index that contains the predicted value, in other words the results of the regression analysis. */ + predicted_field: <<Field>> + pass:[/**] @property metrics Specifies the metrics that are used for the evaluation. For more information on mse, msle, and huber, consult the Jupyter notebook on regression loss functions. */ + metrics?: <<MlDataframeEvaluationRegressionMetrics>> +} +---- + + +[discrete] +[[MlDataframeEvaluationRegressionMetrics]] +=== MlDataframeEvaluationRegressionMetrics + +[source,ts,subs=+macros] +---- +interface MlDataframeEvaluationRegressionMetrics { + pass:[/**] @property mse Average squared difference between the predicted values and the actual (ground truth) value. For more information, read this wiki article. */ + mse?: Record<string, any> + pass:[/**] @property msle Average squared difference between the logarithm of the predicted values and the logarithm of the actual (ground truth) value. */ + msle?: <<MlDataframeEvaluationRegressionMetricsMsle>> + pass:[/**] @property huber Pseudo Huber loss function. */ + huber?: <<MlDataframeEvaluationRegressionMetricsHuber>> + pass:[/**] @property r_squared Proportion of the variance in the dependent variable that is predictable from the independent variables. */ + r_squared?: Record<string, any> +} +---- + + +[discrete] +[[MlDataframeEvaluationRegressionMetricsHuber]] +=== MlDataframeEvaluationRegressionMetricsHuber + +[source,ts,subs=+macros] +---- +interface MlDataframeEvaluationRegressionMetricsHuber { + pass:[/**] @property delta Approximates 1/2 (prediction - actual)2 for values much less than delta and approximates a straight line with slope delta for values much larger than delta. Defaults to 1. Delta needs to be greater than 0. */ + delta?: <<double>> +} +---- + + +[discrete] +[[MlDataframeEvaluationRegressionMetricsMsle]] +=== MlDataframeEvaluationRegressionMetricsMsle + +[source,ts,subs=+macros] +---- +interface MlDataframeEvaluationRegressionMetricsMsle { + pass:[/**] @property offset Defines the transition point at which you switch from minimizing quadratic error to minimizing quadratic log error. Defaults to 1. */ + offset?: <<double>> +} +---- + + +[discrete] +[[MlDataframeState]] +=== MlDataframeState + +[source,ts,subs=+macros] +---- +type MlDataframeState = 'started' | 'stopped' | 'starting' | 'stopping' | 'failed' +---- + + +[discrete] +[[MlDelayedDataCheckConfig]] +=== MlDelayedDataCheckConfig + +[source,ts,subs=+macros] +---- +interface MlDelayedDataCheckConfig { + pass:[/**] @property check_window The window of time that is searched for late data. This window of time ends with the latest finalized bucket. It defaults to null, which causes an appropriate `check_window` to be calculated when the real-time datafeed runs. In particular, the default `check_window` span calculation is based on the maximum of `2h` or `8 * bucket_span`. */ + check_window?: <<Duration>> + pass:[/**] @property enabled Specifies whether the datafeed periodically checks for delayed data. */ + enabled: boolean +} +---- + + +[discrete] +[[MlDeploymentAllocationState]] +=== MlDeploymentAllocationState + +[source,ts,subs=+macros] +---- +type MlDeploymentAllocationState = 'started' | 'starting' | 'fully_allocated' +---- + + +[discrete] +[[MlDeploymentAssignmentState]] +=== MlDeploymentAssignmentState + +[source,ts,subs=+macros] +---- +type MlDeploymentAssignmentState = 'started' | 'starting' | 'stopping' | 'failed' +---- + + +[discrete] +[[MlDetectionRule]] +=== MlDetectionRule + +[source,ts,subs=+macros] +---- +interface MlDetectionRule { + pass:[/**] @property actions The set of actions to be triggered when the rule applies. If more than one action is specified the effects of all actions are combined. */ + actions?: <<MlRuleAction>>[] + pass:[/**] @property conditions An array of numeric conditions when the rule applies. A rule must either have a non-empty scope or at least one condition. Multiple conditions are combined together with a logical AND. */ + conditions?: <<MlRuleCondition>>[] + pass:[/**] @property scope A scope of series where the rule applies. A rule must either have a non-empty scope or at least one condition. By default, the scope includes all series. Scoping is allowed for any of the fields that are also specified in `by_field_name`, `over_field_name`, or `partition_field_name`. */ + scope?: Record<<<Field>>, <<MlFilterRef>>> +} +---- + + +[discrete] +[[MlDetector]] +=== MlDetector + +[source,ts,subs=+macros] +---- +interface MlDetector { + pass:[/**] @property by_field_name The field used to split the data. In particular, this property is used for analyzing the splits with respect to their own history. It is used for finding unusual values in the context of the split. */ + by_field_name?: <<Field>> + pass:[/**] @property custom_rules Custom rules enable you to customize the way detectors operate. For example, a rule may dictate conditions under which results should be skipped. Kibana refers to custom rules as job rules. */ + custom_rules?: <<MlDetectionRule>>[] + pass:[/**] @property detector_description A description of the detector. */ + detector_description?: string + pass:[/**] @property detector_index A unique identifier for the detector. This identifier is based on the order of the detectors in the `analysis_config`, starting at zero. If you specify a value for this property, it is ignored. */ + detector_index?: <<integer>> + pass:[/**] @property exclude_frequent If set, frequent entities are excluded from influencing the anomaly results. Entities can be considered frequent over time or frequent in a population. If you are working with both over and by fields, you can set `exclude_frequent` to `all` for both fields, or to `by` or `over` for those specific fields. */ + exclude_frequent?: <<MlExcludeFrequent>> + pass:[/**] @property field_name The field that the detector uses in the function. If you use an event rate function such as count or rare, do not specify this field. The `field_name` cannot contain <<double>> quotes or backslashes. */ + field_name?: <<Field>> + pass:[/**] @property function The analysis function that is used. For example, `count`, `rare`, `mean`, `min`, `max`, or `sum`. */ + function?: string + pass:[/**] @property over_field_name The field used to split the data. In particular, this property is used for analyzing the splits with respect to the history of all splits. It is used for finding unusual values in the population of all splits. */ + over_field_name?: <<Field>> + pass:[/**] @property partition_field_name The field used to segment the analysis. When you use this property, you have completely independent baselines for each value of this field. */ + partition_field_name?: <<Field>> + pass:[/**] @property use_null Defines whether a new series is used as the null series when there is no value for the by or partition fields. */ + use_null?: boolean +} +---- + + +[discrete] +[[MlDetectorRead]] +=== MlDetectorRead + +[source,ts,subs=+macros] +---- +interface MlDetectorRead { + pass:[/**] @property by_field_name The field used to split the data. In particular, this property is used for analyzing the splits with respect to their own history. It is used for finding unusual values in the context of the split. */ + by_field_name?: <<Field>> + pass:[/**] @property custom_rules An array of custom rule objects, which enable you to customize the way detectors operate. For example, a rule may dictate to the detector conditions under which results should be skipped. Kibana refers to custom rules as job rules. */ + custom_rules?: <<MlDetectionRule>>[] + pass:[/**] @property detector_description A description of the detector. */ + detector_description?: string + pass:[/**] @property detector_index A unique identifier for the detector. This identifier is based on the order of the detectors in the `analysis_config`, starting at zero. */ + detector_index?: <<integer>> + pass:[/**] @property exclude_frequent Contains one of the following values: `all`, `none`, `by`, or `over`. If set, frequent entities are excluded from influencing the anomaly results. Entities can be considered frequent over time or frequent in a population. If you are working with both over and by fields, then you can set `exclude_frequent` to all for both fields, or to `by` or `over` for those specific fields. */ + exclude_frequent?: <<MlExcludeFrequent>> + pass:[/**] @property field_name The field that the detector uses in the function. If you use an event rate function such as `count` or `rare`, do not specify this field. */ + field_name?: <<Field>> + pass:[/**] @property function The analysis function that is used. For example, `count`, `rare`, `mean`, `min`, `max`, and `sum`. */ + function: string + pass:[/**] @property over_field_name The field used to split the data. In particular, this property is used for analyzing the splits with respect to the history of all splits. It is used for finding unusual values in the population of all splits. */ + over_field_name?: <<Field>> + pass:[/**] @property partition_field_name The field used to segment the analysis. When you use this property, you have completely independent baselines for each value of this field. */ + partition_field_name?: <<Field>> + pass:[/**] @property use_null Defines whether a new series is used as the null series when there is no value for the by or partition fields. */ + use_null?: boolean +} +---- + + +[discrete] +[[MlDiscoveryNode]] +=== MlDiscoveryNode + +[source,ts,subs=+macros] +---- +interface MlDiscoveryNode { + attributes: Record<string, string> + ephemeral_id: <<Id>> + id: <<Id>> + name: <<Name>> + transport_address: <<TransportAddress>> +} +---- + + +[discrete] +[[MlExcludeFrequent]] +=== MlExcludeFrequent + +[source,ts,subs=+macros] +---- +type MlExcludeFrequent = 'all' | 'none' | 'by' | 'over' +---- + + +[discrete] +[[MlFillMaskInferenceOptions]] +=== MlFillMaskInferenceOptions + +[source,ts,subs=+macros] +---- +interface MlFillMaskInferenceOptions { + pass:[/**] @property mask_token The string/token which will be removed from incoming documents and replaced with the inference prediction(s). In a response, this field contains the mask token for the specified model/tokenizer. Each model and tokenizer has a predefined mask token which cannot be changed. Thus, it is recommended not to set this value in requests. However, if this field is present in a request, its value must match the predefined value for that model/tokenizer, otherwise the request will fail. */ + mask_token?: string + pass:[/**] @property num_top_classes Specifies the number of top class predictions to return. Defaults to 0. */ + num_top_classes?: <<integer>> + pass:[/**] @property tokenization The tokenization options to update when inferring */ + tokenization?: <<MlTokenizationConfigContainer>> + pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string +} +---- + + +[discrete] +[[MlFillMaskInferenceUpdateOptions]] +=== MlFillMaskInferenceUpdateOptions + +[source,ts,subs=+macros] +---- +interface MlFillMaskInferenceUpdateOptions { + pass:[/**] @property num_top_classes Specifies the number of top class predictions to return. Defaults to 0. */ + num_top_classes?: <<integer>> + pass:[/**] @property tokenization The tokenization options to update when inferring */ + tokenization?: <<MlNlpTokenizationUpdateOptions>> + pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string +} +---- + + +[discrete] +[[MlFilter]] +=== MlFilter + +[source,ts,subs=+macros] +---- +interface MlFilter { + pass:[/**] @property description A description of the filter. */ + description?: string + pass:[/**] @property filter_id A string that uniquely identifies a filter. */ + filter_id: <<Id>> + pass:[/**] @property items An array of strings which is the filter item list. */ + items: string[] +} +---- + + +[discrete] +[[MlFilterRef]] +=== MlFilterRef + +[source,ts,subs=+macros] +---- +interface MlFilterRef { + pass:[/**] @property filter_id The identifier for the filter. */ + filter_id: <<Id>> + pass:[/**] @property filter_type If set to `include`, the rule applies for values in the filter. If set to `exclude`, the rule applies for values not in the filter. */ + filter_type?: <<MlFilterType>> +} +---- + + +[discrete] +[[MlFilterType]] +=== MlFilterType + +[source,ts,subs=+macros] +---- +type MlFilterType = 'include' | 'exclude' +---- + + +[discrete] +[[MlGeoResults]] +=== MlGeoResults + +[source,ts,subs=+macros] +---- +interface MlGeoResults { + pass:[/**] @property actual_point The actual value for the bucket formatted as a `geo_point`. */ + actual_point: string + pass:[/**] @property typical_point The typical value for the bucket formatted as a `geo_point`. */ + typical_point: string +} +---- + + +[discrete] +[[MlHyperparameter]] +=== MlHyperparameter + +[source,ts,subs=+macros] +---- +interface MlHyperparameter { + pass:[/**] @property absolute_importance A positive number showing how much the parameter influences the variation of the loss function. For hyperparameters with values that are not specified by the user but tuned during hyperparameter optimization. */ + absolute_importance?: <<double>> + pass:[/**] @property name <<Name>> of the hyperparameter. */ + name: <<Name>> + pass:[/**] @property relative_importance A number between 0 and 1 showing the proportion of influence on the variation of the loss function among all tuned hyperparameters. For hyperparameters with values that are not specified by the user but tuned during hyperparameter optimization. */ + relative_importance?: <<double>> + pass:[/**] @property supplied Indicates if the hyperparameter is specified by the user (true) or optimized (false). */ + supplied: boolean + pass:[/**] @property value The value of the hyperparameter, either optimized or specified by the user. */ + value: <<double>> +} +---- + + +[discrete] +[[MlHyperparameters]] +=== MlHyperparameters + +[source,ts,subs=+macros] +---- +interface MlHyperparameters { + pass:[/**] @property alpha Advanced configuration option. Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. This parameter affects loss calculations by acting as a multiplier of the tree depth. Higher alpha values result in shallower trees and faster training times. By default, this value is calculated during hyperparameter optimization. It must be greater than or equal to zero. */ + alpha?: <<double>> + pass:[/**] @property lambda Advanced configuration option. Regularization parameter to prevent overfitting on the training data set. Multiplies an L2 regularization term which applies to leaf weights of the individual trees in the forest. A high lambda value causes training to favor small leaf weights. This behavior makes the prediction function smoother at the expense of potentially not being able to capture relevant relationships between the features and the dependent variable. A small lambda value results in large individual trees and slower training. By default, this value is calculated during hyperparameter optimization. It must be a nonnegative value. */ + lambda?: <<double>> + pass:[/**] @property gamma Advanced configuration option. Regularization parameter to prevent overfitting on the training data set. Multiplies a linear penalty associated with the size of individual trees in the forest. A high gamma value causes training to prefer small trees. A small gamma value results in larger individual trees and slower training. By default, this value is calculated during hyperparameter optimization. It must be a nonnegative value. */ + gamma?: <<double>> + pass:[/**] @property eta Advanced configuration option. The shrinkage applied to the weights. Smaller values result in larger forests which have a better generalization error. However, larger forests cause slower training. By default, this value is calculated during hyperparameter optimization. It must be a value between `0.001` and `1`. */ + eta?: <<double>> + pass:[/**] @property eta_growth_rate_per_tree Advanced configuration option. Specifies the rate at which `eta` increases for each new tree that is added to the forest. For example, a rate of 1.05 increases `eta` by 5% for each extra tree. By default, this value is calculated during hyperparameter optimization. It must be between `0.5` and `2`. */ + eta_growth_rate_per_tree?: <<double>> + pass:[/**] @property feature_bag_fraction Advanced configuration option. Defines the fraction of features that will be used when selecting a random bag for each candidate split. By default, this value is calculated during hyperparameter optimization. */ + feature_bag_fraction?: <<double>> + pass:[/**] @property downsample_factor Advanced configuration option. Controls the fraction of data that is used to compute the derivatives of the loss function for tree training. A small value results in the use of a small fraction of the data. If this value is set to be less than 1, accuracy typically improves. However, too small a value may result in poor convergence for the ensemble and so require more trees. By default, this value is calculated during hyperparameter optimization. It must be greater than zero and less than or equal to 1. */ + downsample_factor?: <<double>> + pass:[/**] @property max_attempts_to_add_tree If the algorithm fails to determine a non-trivial tree (more than a single leaf), this parameter determines how many of such consecutive failures are tolerated. Once the number of attempts exceeds the threshold, the forest training stops. */ + max_attempts_to_add_tree?: <<integer>> + pass:[/**] @property max_optimization_rounds_per_hyperparameter Advanced configuration option. A multiplier responsible for determining the maximum number of hyperparameter optimization steps in the Bayesian optimization procedure. The maximum number of steps is determined based on the number of undefined hyperparameters times the maximum optimization rounds per hyperparameter. By default, this value is calculated during hyperparameter optimization. */ + max_optimization_rounds_per_hyperparameter?: <<integer>> + pass:[/**] @property max_trees Advanced configuration option. Defines the maximum number of decision trees in the forest. The maximum value is 2000. By default, this value is calculated during hyperparameter optimization. */ + max_trees?: <<integer>> + pass:[/**] @property num_folds The maximum number of folds for the cross-validation procedure. */ + num_folds?: <<integer>> + pass:[/**] @property num_splits_per_feature Determines the maximum number of splits for every feature that can occur in a decision tree when the tree is trained. */ + num_splits_per_feature?: <<integer>> + pass:[/**] @property soft_tree_depth_limit Advanced configuration option. Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. This soft limit combines with the `soft_tree_depth_tolerance` to penalize trees that exceed the specified depth; the regularized loss increases quickly beyond this depth. By default, this value is calculated during hyperparameter optimization. It must be greater than or equal to 0. */ + soft_tree_depth_limit?: <<integer>> + pass:[/**] @property soft_tree_depth_tolerance Advanced configuration option. This option controls how quickly the regularized loss increases when the tree depth exceeds `soft_tree_depth_limit`. By default, this value is calculated during hyperparameter optimization. It must be greater than or equal to 0.01. */ + soft_tree_depth_tolerance?: <<double>> +} +---- + + +[discrete] +[[MlInclude]] +=== MlInclude + +[source,ts,subs=+macros] +---- +type MlInclude = 'definition' | 'feature_importance_baseline' | 'hyperparameters' | 'total_feature_importance' | 'definition_status' +---- + + +[discrete] +[[MlInferenceConfigCreateContainer]] +=== MlInferenceConfigCreateContainer + +[source,ts,subs=+macros] +---- +interface MlInferenceConfigCreateContainer { + pass:[/**] @property regression Regression configuration for inference. */ + regression?: <<MlRegressionInferenceOptions>> + pass:[/**] @property classification Classification configuration for inference. */ + classification?: <<MlClassificationInferenceOptions>> + pass:[/**] @property text_classification Text classification configuration for inference. */ + text_classification?: <<MlTextClassificationInferenceOptions>> + pass:[/**] @property zero_shot_classification Zeroshot classification configuration for inference. */ + zero_shot_classification?: <<MlZeroShotClassificationInferenceOptions>> + pass:[/**] @property fill_mask Fill mask configuration for inference. */ + fill_mask?: <<MlFillMaskInferenceOptions>> + pass:[/**] @property ner Named entity recognition configuration for inference. */ + ner?: <<MlNerInferenceOptions>> + pass:[/**] @property pass_through Pass through configuration for inference. */ + pass_through?: <<MlPassThroughInferenceOptions>> + pass:[/**] @property text_embedding Text embedding configuration for inference. */ + text_embedding?: <<MlTextEmbeddingInferenceOptions>> + pass:[/**] @property text_expansion Text expansion configuration for inference. */ + text_expansion?: <<MlTextExpansionInferenceOptions>> + pass:[/**] @property question_answering Question answering configuration for inference. */ + question_answering?: <<MlQuestionAnsweringInferenceOptions>> +} +---- + + +[discrete] +[[MlInferenceConfigUpdateContainer]] +=== MlInferenceConfigUpdateContainer + +[source,ts,subs=+macros] +---- +interface MlInferenceConfigUpdateContainer { + pass:[/**] @property regression Regression configuration for inference. */ + regression?: <<MlRegressionInferenceOptions>> + pass:[/**] @property classification Classification configuration for inference. */ + classification?: <<MlClassificationInferenceOptions>> + pass:[/**] @property text_classification Text classification configuration for inference. */ + text_classification?: <<MlTextClassificationInferenceUpdateOptions>> + pass:[/**] @property zero_shot_classification Zeroshot classification configuration for inference. */ + zero_shot_classification?: <<MlZeroShotClassificationInferenceUpdateOptions>> + pass:[/**] @property fill_mask Fill mask configuration for inference. */ + fill_mask?: <<MlFillMaskInferenceUpdateOptions>> + pass:[/**] @property ner Named entity recognition configuration for inference. */ + ner?: <<MlNerInferenceUpdateOptions>> + pass:[/**] @property pass_through Pass through configuration for inference. */ + pass_through?: <<MlPassThroughInferenceUpdateOptions>> + pass:[/**] @property text_embedding Text embedding configuration for inference. */ + text_embedding?: <<MlTextEmbeddingInferenceUpdateOptions>> + pass:[/**] @property text_expansion Text expansion configuration for inference. */ + text_expansion?: <<MlTextExpansionInferenceUpdateOptions>> + pass:[/**] @property question_answering Question answering configuration for inference */ + question_answering?: <<MlQuestionAnsweringInferenceUpdateOptions>> +} +---- + + +[discrete] +[[MlInferenceResponseResult]] +=== MlInferenceResponseResult + +[source,ts,subs=+macros] +---- +interface MlInferenceResponseResult { + pass:[/**] @property entities If the model is trained for named entity recognition (NER) tasks, the response contains the recognized entities. */ + entities?: <<MlTrainedModelEntities>>[] + pass:[/**] @property is_truncated Indicates whether the input text was truncated to meet the model's maximum sequence length limit. This property is present only when it is true. */ + is_truncated?: boolean + pass:[/**] @property predicted_value If the model is trained for a text classification or zero shot classification task, the response is the predicted class. For named entity recognition (NER) tasks, it contains the annotated text output. For fill mask tasks, it contains the top prediction for replacing the mask token. For text embedding tasks, it contains the raw numerical text embedding values. For regression models, its a numerical value For classification models, it may be an <<integer>>, <<double>>, boolean or string depending on prediction type */ + predicted_value?: <<MlPredictedValue>> | <<MlPredictedValue>>[] + pass:[/**] @property predicted_value_sequence For fill mask tasks, the response contains the input text sequence with the mask token replaced by the predicted value. Additionally */ + predicted_value_sequence?: string + pass:[/**] @property prediction_probability Specifies a probability for the predicted value. */ + prediction_probability?: <<double>> + pass:[/**] @property prediction_score Specifies a confidence score for the predicted value. */ + prediction_score?: <<double>> + pass:[/**] @property top_classes For fill mask, text classification, and zero shot classification tasks, the response contains a list of top class entries. */ + top_classes?: <<MlTopClassEntry>>[] + pass:[/**] @property warning If the request failed, the response contains the reason for the failure. */ + warning?: string + pass:[/**] @property feature_importance The feature importance for the inference results. Relevant only for classification or regression models */ + feature_importance?: <<MlTrainedModelInferenceFeatureImportance>>[] +} +---- + + +[discrete] +[[MlInfluence]] +=== MlInfluence + +[source,ts,subs=+macros] +---- +interface MlInfluence { + influencer_field_name: string + influencer_field_values: string[] +} +---- + + +[discrete] +[[MlInfluencer]] +=== MlInfluencer + +[source,ts,subs=+macros] +---- +interface MlInfluencer { + pass:[/**] @property bucket_span The length of the bucket in seconds. This value matches the bucket span that is specified in the job. */ + bucket_span: <<DurationValue>><<<UnitSeconds>>> + pass:[/**] @property influencer_score A normalized score between 0-100, which is based on the probability of the influencer in this bucket aggregated across detectors. Unlike `initial_influencer_score`, this value is updated by a re-normalization process as new data is analyzed. */ + influencer_score: <<double>> + pass:[/**] @property influencer_field_name The field name of the influencer. */ + influencer_field_name: <<Field>> + pass:[/**] @property influencer_field_value The entity that influenced, contributed to, or was to blame for the anomaly. */ + influencer_field_value: string + pass:[/**] @property initial_influencer_score A normalized score between 0-100, which is based on the probability of the influencer aggregated across detectors. This is the initial value that was calculated at the time the bucket was processed. */ + initial_influencer_score: <<double>> + pass:[/**] @property is_interim If true, this is an interim result. In other words, the results are calculated based on partial input data. */ + is_interim: boolean + pass:[/**] @property job_id Identifier for the anomaly detection job. */ + job_id: <<Id>> + pass:[/**] @property probability The probability that the influencer has this behavior, in the range 0 to 1. This value can be held to a high precision of over 300 decimal places, so the `influencer_score` is provided as a human-readable and friendly interpretation of this value. */ + probability: <<double>> + pass:[/**] @property result_type Internal. This value is always set to `influencer`. */ + result_type: string + pass:[/**] @property timestamp The start time of the bucket for which these results were calculated. */ + timestamp: <<EpochTime>><<<UnitMillis>>> + pass:[/**] @property foo Additional influencer properties are added, depending on the fields being analyzed. For example, if it’s analyzing `user_name` as an influencer, a field `user_name` is added to the result document. This information enables you to filter the anomaly results more easily. */ + foo?: string +} +---- + + +[discrete] +[[MlJob]] +=== MlJob + +[source,ts,subs=+macros] +---- +interface MlJob { + pass:[/**] @property allow_lazy_open Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. */ + allow_lazy_open: boolean + pass:[/**] @property analysis_config The analysis configuration, which specifies how to analyze the data. After you create a job, you cannot change the analysis configuration; all the properties are informational. */ + analysis_config: <<MlAnalysisConfig>> + pass:[/**] @property analysis_limits Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. */ + analysis_limits?: <<MlAnalysisLimits>> + pass:[/**] @property background_persist_interval Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. */ + background_persist_interval?: <<Duration>> + blocked?: <<MlJobBlocked>> + create_time?: <<DateTime>> + pass:[/**] @property custom_settings Advanced configuration option. Contains custom metadata about the job. */ + custom_settings?: <<MlCustomSettings>> + pass:[/**] @property daily_model_snapshot_retention_after_days Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. */ + daily_model_snapshot_retention_after_days?: <<long>> + pass:[/**] @property data_description The data description defines the format of the input data when you send data to the job by using the post data API. Note that when configuring a datafeed, these properties are automatically set. When data is received via the post data API, it is not stored in Elasticsearch. Only the results for anomaly detection are retained. */ + data_description: <<MlDataDescription>> + pass:[/**] @property datafeed_config The datafeed, which retrieves data from Elasticsearch for analysis by the job. You can associate only one datafeed with each anomaly detection job. */ + datafeed_config?: <<MlDatafeed>> + pass:[/**] @property deleting Indicates that the process of deleting the job is in progress but not yet completed. It is only reported when `true`. */ + deleting?: boolean + pass:[/**] @property description A description of the job. */ + description?: string + pass:[/**] @property finished_time If the job closed or failed, this is the time the job finished, otherwise it is `null`. This property is informational; you cannot change its value. */ + finished_time?: <<DateTime>> + pass:[/**] @property groups A list of job groups. A job can belong to no groups or many. */ + groups?: string[] + pass:[/**] @property job_id Identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ + job_id: <<Id>> + pass:[/**] @property job_type Reserved for future use, currently set to `anomaly_detector`. */ + job_type?: string + pass:[/**] @property job_version The machine learning configuration version number at which the the job was created. */ + job_version?: <<VersionString>> + pass:[/**] @property model_plot_config This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. Model plot provides a simplified and indicative view of the model and its bounds. */ + model_plot_config?: <<MlModelPlotConfig>> + model_snapshot_id?: <<Id>> + pass:[/**] @property model_snapshot_retention_days Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. By default, snapshots ten days older than the newest snapshot are deleted. */ + model_snapshot_retention_days: <<long>> + pass:[/**] @property renormalization_window_days Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. The default value is the longer of 30 days or 100 `bucket_spans`. */ + renormalization_window_days?: <<long>> + pass:[/**] @property results_index_name A text string that affects the name of the machine learning results index. The default value is `shared`, which generates an index named `.ml-anomalies-shared`. */ + results_index_name: <<IndexName>> + pass:[/**] @property results_retention_days Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. Annotations added by users are retained forever. */ + results_retention_days?: <<long>> +} +---- + + +[discrete] +[[MlJobBlocked]] +=== MlJobBlocked + +[source,ts,subs=+macros] +---- +interface MlJobBlocked { + reason: <<MlJobBlockedReason>> + task_id?: <<TaskId>> +} +---- + + +[discrete] +[[MlJobBlockedReason]] +=== MlJobBlockedReason + +[source,ts,subs=+macros] +---- +type MlJobBlockedReason = 'delete' | 'reset' | 'revert' +---- + + +[discrete] +[[MlJobConfig]] +=== MlJobConfig + +[source,ts,subs=+macros] +---- +interface MlJobConfig { + pass:[/**] @property allow_lazy_open Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. */ + allow_lazy_open?: boolean + pass:[/**] @property analysis_config The analysis configuration, which specifies how to analyze the data. After you create a job, you cannot change the analysis configuration; all the properties are informational. */ + analysis_config: <<MlAnalysisConfig>> + pass:[/**] @property analysis_limits Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. */ + analysis_limits?: <<MlAnalysisLimits>> + pass:[/**] @property background_persist_interval Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. */ + background_persist_interval?: <<Duration>> + pass:[/**] @property custom_settings Advanced configuration option. Contains custom metadata about the job. */ + custom_settings?: <<MlCustomSettings>> + pass:[/**] @property daily_model_snapshot_retention_after_days Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. */ + daily_model_snapshot_retention_after_days?: <<long>> + pass:[/**] @property data_description The data description defines the format of the input data when you send data to the job by using the post data API. Note that when configure a datafeed, these properties are automatically set. */ + data_description: <<MlDataDescription>> + pass:[/**] @property datafeed_config The datafeed, which retrieves data from Elasticsearch for analysis by the job. You can associate only one datafeed with each anomaly detection job. */ + datafeed_config?: <<MlDatafeedConfig>> + pass:[/**] @property description A description of the job. */ + description?: string + pass:[/**] @property groups A list of job groups. A job can belong to no groups or many. */ + groups?: string[] + pass:[/**] @property job_id Identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ + job_id?: <<Id>> + pass:[/**] @property job_type Reserved for future use, currently set to `anomaly_detector`. */ + job_type?: string + pass:[/**] @property model_plot_config This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. Model plot provides a simplified and indicative view of the model and its bounds. */ + model_plot_config?: <<MlModelPlotConfig>> + pass:[/**] @property model_snapshot_retention_days Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. The default value is `10`, which means snapshots ten days older than the newest snapshot are deleted. */ + model_snapshot_retention_days?: <<long>> + pass:[/**] @property renormalization_window_days Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. The default value is the longer of 30 days or 100 `bucket_spans`. */ + renormalization_window_days?: <<long>> + pass:[/**] @property results_index_name A text string that affects the name of the machine learning results index. The default value is `shared`, which generates an index named `.ml-anomalies-shared`. */ + results_index_name?: <<IndexName>> + pass:[/**] @property results_retention_days Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. Annotations added by users are retained forever. */ + results_retention_days?: <<long>> +} +---- + + +[discrete] +[[MlJobForecastStatistics]] +=== MlJobForecastStatistics + +[source,ts,subs=+macros] +---- +interface MlJobForecastStatistics { + memory_bytes?: <<MlJobStatistics>> + processing_time_ms?: <<MlJobStatistics>> + records?: <<MlJobStatistics>> + status?: Record<string, <<long>>> + total: <<long>> + forecasted_jobs: <<integer>> +} +---- + + +[discrete] +[[MlJobState]] +=== MlJobState + +[source,ts,subs=+macros] +---- +type MlJobState = 'closing' | 'closed' | 'opened' | 'failed' | 'opening' +---- + + +[discrete] +[[MlJobStatistics]] +=== MlJobStatistics + +[source,ts,subs=+macros] +---- +interface MlJobStatistics { + avg: <<double>> + max: <<double>> + min: <<double>> + total: <<double>> +} +---- + + +[discrete] +[[MlJobStats]] +=== MlJobStats + +[source,ts,subs=+macros] +---- +interface MlJobStats { + pass:[/**] @property assignment_explanation For open anomaly detection jobs only, contains messages relating to the selection of a node to run the job. */ + assignment_explanation?: string + pass:[/**] @property data_counts An object that describes the quantity of input to the job and any related error counts. The `data_count` values are cumulative for the lifetime of a job. If a model snapshot is reverted or old results are deleted, the job counts are not reset. */ + data_counts: <<MlDataCounts>> + pass:[/**] @property forecasts_stats An object that provides statistical information about forecasts belonging to this job. Some statistics are omitted if no forecasts have been made. */ + forecasts_stats: <<MlJobForecastStatistics>> + pass:[/**] @property job_id Identifier for the anomaly detection job. */ + job_id: string + pass:[/**] @property model_size_stats An object that provides information about the size and contents of the model. */ + model_size_stats: <<MlModelSizeStats>> + pass:[/**] @property node Contains properties for the node that runs the job. This information is available only for open jobs. */ + node?: <<MlDiscoveryNode>> + pass:[/**] @property open_time For open jobs only, the elapsed time for which the job has been open. */ + open_time?: <<DateTime>> + pass:[/**] @property state The status of the anomaly detection job, which can be one of the following values: `closed`, `closing`, `failed`, `opened`, `opening`. */ + state: <<MlJobState>> + pass:[/**] @property timing_stats An object that provides statistical information about timing aspect of this job. */ + timing_stats: <<MlJobTimingStats>> + pass:[/**] @property deleting Indicates that the process of deleting the job is in progress but not yet completed. It is only reported when `true`. */ + deleting?: boolean +} +---- + + +[discrete] +[[MlJobTimingStats]] +=== MlJobTimingStats + +[source,ts,subs=+macros] +---- +interface MlJobTimingStats { + average_bucket_processing_time_ms?: <<DurationValue>><<<UnitFloatMillis>>> + bucket_count: <<long>> + exponential_average_bucket_processing_time_ms?: <<DurationValue>><<<UnitFloatMillis>>> + exponential_average_bucket_processing_time_per_hour_ms: <<DurationValue>><<<UnitFloatMillis>>> + job_id: <<Id>> + total_bucket_processing_time_ms: <<DurationValue>><<<UnitFloatMillis>>> + maximum_bucket_processing_time_ms?: <<DurationValue>><<<UnitFloatMillis>>> + minimum_bucket_processing_time_ms?: <<DurationValue>><<<UnitFloatMillis>>> +} +---- + + +[discrete] +[[MlMemoryStatus]] +=== MlMemoryStatus + +[source,ts,subs=+macros] +---- +type MlMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit' +---- + + +[discrete] +[[MlModelPlotConfig]] +=== MlModelPlotConfig + +[source,ts,subs=+macros] +---- +interface MlModelPlotConfig { + pass:[/**] @property annotations_enabled If true, enables calculation and storage of the model change annotations for each entity that is being analyzed. */ + annotations_enabled?: boolean + pass:[/**] @property enabled If true, enables calculation and storage of the model bounds for each entity that is being analyzed. */ + enabled?: boolean + pass:[/**] @property terms Limits data collection to this comma separated list of partition or by field values. If terms are not specified or it is an empty string, no filtering is applied. Wildcards are not supported. Only the specified terms can be viewed when using the Single Metric Viewer. */ + terms?: <<Field>> +} +---- + + +[discrete] +[[MlModelSizeStats]] +=== MlModelSizeStats + +[source,ts,subs=+macros] +---- +interface MlModelSizeStats { + bucket_allocation_failures_count: <<long>> + job_id: <<Id>> + log_time: <<DateTime>> + memory_status: <<MlMemoryStatus>> + model_bytes: <<ByteSize>> + model_bytes_exceeded?: <<ByteSize>> + model_bytes_memory_limit?: <<ByteSize>> + peak_model_bytes?: <<ByteSize>> + assignment_memory_basis?: string + result_type: string + total_by_field_count: <<long>> + total_over_field_count: <<long>> + total_partition_field_count: <<long>> + categorization_status: <<MlCategorizationStatus>> + categorized_doc_count: <<integer>> + dead_category_count: <<integer>> + failed_category_count: <<integer>> + frequent_category_count: <<integer>> + rare_category_count: <<integer>> + total_category_count: <<integer>> + timestamp?: <<long>> +} +---- + + +[discrete] +[[MlModelSnapshot]] +=== MlModelSnapshot + +[source,ts,subs=+macros] +---- +interface MlModelSnapshot { + pass:[/**] @property description An optional description of the job. */ + description?: string + pass:[/**] @property job_id A numerical character string that uniquely identifies the job that the snapshot was created for. */ + job_id: <<Id>> + pass:[/**] @property latest_record_time_stamp The timestamp of the latest processed record. */ + latest_record_time_stamp?: <<integer>> + pass:[/**] @property latest_result_time_stamp The timestamp of the latest bucket result. */ + latest_result_time_stamp?: <<integer>> + pass:[/**] @property min_version The minimum version required to be able to restore the model snapshot. */ + min_version: <<VersionString>> + pass:[/**] @property model_size_stats Summary information describing the model. */ + model_size_stats?: <<MlModelSizeStats>> + pass:[/**] @property retain If true, this snapshot will not be deleted during automatic cleanup of snapshots older than model_snapshot_retention_days. However, this snapshot will be deleted when the job is deleted. The default value is false. */ + retain: boolean + pass:[/**] @property snapshot_doc_count For internal use only. */ + snapshot_doc_count: <<long>> + pass:[/**] @property snapshot_id A numerical character string that uniquely identifies the model snapshot. */ + snapshot_id: <<Id>> + pass:[/**] @property timestamp The creation timestamp for the snapshot. */ + timestamp: <<long>> +} +---- + + +[discrete] +[[MlModelSnapshotUpgrade]] +=== MlModelSnapshotUpgrade + +[source,ts,subs=+macros] +---- +interface MlModelSnapshotUpgrade { + job_id: <<Id>> + snapshot_id: <<Id>> + state: <<MlSnapshotUpgradeState>> + node: <<MlDiscoveryNode>> + assignment_explanation: string +} +---- + + +[discrete] +[[MlNerInferenceOptions]] +=== MlNerInferenceOptions + +[source,ts,subs=+macros] +---- +interface MlNerInferenceOptions { + pass:[/**] @property tokenization The tokenization options */ + tokenization?: <<MlTokenizationConfigContainer>> + pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string + pass:[/**] @property classification_labels The token classification labels. Must be IOB formatted tags */ + classification_labels?: string[] + vocabulary?: <<MlVocabulary>> +} +---- + + +[discrete] +[[MlNerInferenceUpdateOptions]] +=== MlNerInferenceUpdateOptions + +[source,ts,subs=+macros] +---- +interface MlNerInferenceUpdateOptions { + pass:[/**] @property tokenization The tokenization options to update when inferring */ + tokenization?: <<MlNlpTokenizationUpdateOptions>> + pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string +} +---- + + +[discrete] +[[MlNlpBertTokenizationConfig]] +=== MlNlpBertTokenizationConfig + +[source,ts,subs=+macros] +---- +interface MlNlpBertTokenizationConfig { + pass:[/**] @property do_lower_case Should the tokenizer lower case the text */ + do_lower_case?: boolean + pass:[/**] @property with_special_tokens Is tokenization completed with special tokens */ + with_special_tokens?: boolean + pass:[/**] @property max_sequence_length Maximum input sequence length for the model */ + max_sequence_length?: <<integer>> + pass:[/**] @property truncate Should tokenization input be automatically truncated before sending to the model for inference */ + truncate?: <<MlTokenizationTruncate>> + pass:[/**] @property span Tokenization spanning options. Special value of -1 indicates no spanning takes place */ + span?: <<integer>> +} +---- + + +[discrete] +[[MlNlpRobertaTokenizationConfig]] +=== MlNlpRobertaTokenizationConfig + +[source,ts,subs=+macros] +---- +interface MlNlpRobertaTokenizationConfig { + pass:[/**] @property add_prefix_space Should the tokenizer prefix input with a space character */ + add_prefix_space?: boolean + pass:[/**] @property with_special_tokens Is tokenization completed with special tokens */ + with_special_tokens?: boolean + pass:[/**] @property max_sequence_length Maximum input sequence length for the model */ + max_sequence_length?: <<integer>> + pass:[/**] @property truncate Should tokenization input be automatically truncated before sending to the model for inference */ + truncate?: <<MlTokenizationTruncate>> + pass:[/**] @property span Tokenization spanning options. Special value of -1 indicates no spanning takes place */ + span?: <<integer>> +} +---- + + +[discrete] +[[MlNlpTokenizationUpdateOptions]] +=== MlNlpTokenizationUpdateOptions + +[source,ts,subs=+macros] +---- +interface MlNlpTokenizationUpdateOptions { + pass:[/**] @property truncate Truncate options to apply */ + truncate?: <<MlTokenizationTruncate>> + pass:[/**] @property span Span options to apply */ + span?: <<integer>> +} +---- + + +[discrete] +[[MlOutlierDetectionParameters]] +=== MlOutlierDetectionParameters + +[source,ts,subs=+macros] +---- +interface MlOutlierDetectionParameters { + pass:[/**] @property compute_feature_influence Specifies whether the feature influence calculation is enabled. */ + compute_feature_influence?: boolean + pass:[/**] @property feature_influence_threshold The minimum outlier score that a document needs to have in order to calculate its feature influence score. Value range: 0-1 */ + feature_influence_threshold?: <<double>> + pass:[/**] @property method The method that outlier detection uses. Available methods are `lof`, `ldof`, `distance_kth_nn`, `distance_knn`, and `ensemble`. The default value is ensemble, which means that outlier detection uses an ensemble of different methods and normalises and combines their individual outlier scores to obtain the overall outlier score. */ + method?: string + pass:[/**] @property n_neighbors Defines the value for how many nearest neighbors each method of outlier detection uses to calculate its outlier score. When the value is not set, different values are used for different ensemble members. This default behavior helps improve the diversity in the ensemble; only override it if you are confident that the value you choose is appropriate for the data set. */ + n_neighbors?: <<integer>> + pass:[/**] @property outlier_fraction The proportion of the data set that is assumed to be outlying prior to outlier detection. For example, 0.05 means it is assumed that 5% of values are real outliers and 95% are inliers. */ + outlier_fraction?: <<double>> + pass:[/**] @property standardization_enabled If `true`, the following operation is performed on the columns before computing outlier scores: (x_i - mean(x_i)) / sd(x_i). */ + standardization_enabled?: boolean +} +---- + + +[discrete] +[[MlOverallBucket]] +=== MlOverallBucket + +[source,ts,subs=+macros] +---- +interface MlOverallBucket { + pass:[/**] @property bucket_span The length of the bucket in seconds. Matches the job with the longest bucket_span value. */ + bucket_span: <<DurationValue>><<<UnitSeconds>>> + pass:[/**] @property is_interim If true, this is an interim result. In other words, the results are calculated based on partial input data. */ + is_interim: boolean + pass:[/**] @property jobs An array of objects that contain the max_anomaly_score per job_id. */ + jobs: <<MlOverallBucketJob>>[] + pass:[/**] @property overall_score The top_n average of the maximum bucket anomaly_score per job. */ + overall_score: <<double>> + pass:[/**] @property result_type Internal. This is always set to overall_bucket. */ + result_type: string + pass:[/**] @property timestamp The start time of the bucket for which these results were calculated. */ + timestamp: <<EpochTime>><<<UnitMillis>>> + pass:[/**] @property timestamp_string The start time of the bucket for which these results were calculated. */ + timestamp_string: <<DateTime>> +} +---- + + +[discrete] +[[MlOverallBucketJob]] +=== MlOverallBucketJob + +[source,ts,subs=+macros] +---- +interface MlOverallBucketJob { + job_id: <<Id>> + max_anomaly_score: <<double>> +} +---- + + +[discrete] +[[MlPage]] +=== MlPage + +[source,ts,subs=+macros] +---- +interface MlPage { + pass:[/**] @property from Skips the specified number of items. */ + from?: <<integer>> + pass:[/**] @property size Specifies the maximum number of items to obtain. */ + size?: <<integer>> +} +---- + + +[discrete] +[[MlPassThroughInferenceOptions]] +=== MlPassThroughInferenceOptions + +[source,ts,subs=+macros] +---- +interface MlPassThroughInferenceOptions { + pass:[/**] @property tokenization The tokenization options */ + tokenization?: <<MlTokenizationConfigContainer>> + pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string + vocabulary?: <<MlVocabulary>> +} +---- + + +[discrete] +[[MlPassThroughInferenceUpdateOptions]] +=== MlPassThroughInferenceUpdateOptions + +[source,ts,subs=+macros] +---- +interface MlPassThroughInferenceUpdateOptions { + pass:[/**] @property tokenization The tokenization options to update when inferring */ + tokenization?: <<MlNlpTokenizationUpdateOptions>> + pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string +} +---- + + +[discrete] +[[MlPerPartitionCategorization]] +=== MlPerPartitionCategorization + +[source,ts,subs=+macros] +---- +interface MlPerPartitionCategorization { + pass:[/**] @property enabled To enable this setting, you must also set the `partition_field_name` property to the same value in every detector that uses the keyword `mlcategory`. Otherwise, job creation fails. */ + enabled?: boolean + pass:[/**] @property stop_on_warn This setting can be set to true only if per-partition categorization is enabled. If true, both categorization and subsequent anomaly detection stops for partitions where the categorization status changes to warn. This setting makes it viable to have a job where it is expected that categorization works well for some partitions but not others; you do not pay the cost of bad categorization forever in the partitions where it works badly. */ + stop_on_warn?: boolean +} +---- + + +[discrete] +[[MlPredictedValue]] +=== MlPredictedValue + +[source,ts,subs=+macros] +---- +type MlPredictedValue = <<ScalarValue>> | <<ScalarValue>>[] +---- + + +[discrete] +[[MlQuestionAnsweringInferenceOptions]] +=== MlQuestionAnsweringInferenceOptions + +[source,ts,subs=+macros] +---- +interface MlQuestionAnsweringInferenceOptions { + pass:[/**] @property num_top_classes Specifies the number of top class predictions to return. Defaults to 0. */ + num_top_classes?: <<integer>> + pass:[/**] @property tokenization The tokenization options to update when inferring */ + tokenization?: <<MlTokenizationConfigContainer>> + pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string + pass:[/**] @property max_answer_length The maximum answer length to consider */ + max_answer_length?: <<integer>> +} +---- + + +[discrete] +[[MlQuestionAnsweringInferenceUpdateOptions]] +=== MlQuestionAnsweringInferenceUpdateOptions + +[source,ts,subs=+macros] +---- +interface MlQuestionAnsweringInferenceUpdateOptions { + pass:[/**] @property question The question to answer given the inference context */ + question: string + pass:[/**] @property num_top_classes Specifies the number of top class predictions to return. Defaults to 0. */ + num_top_classes?: <<integer>> + pass:[/**] @property tokenization The tokenization options to update when inferring */ + tokenization?: <<MlNlpTokenizationUpdateOptions>> + pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string + pass:[/**] @property max_answer_length The maximum answer length to consider for extraction */ + max_answer_length?: <<integer>> +} +---- + + +[discrete] +[[MlRegressionInferenceOptions]] +=== MlRegressionInferenceOptions + +[source,ts,subs=+macros] +---- +interface MlRegressionInferenceOptions { + pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: <<Field>> + pass:[/**] @property num_top_feature_importance_values Specifies the maximum number of feature importance values per document. */ + num_top_feature_importance_values?: <<integer>> +} +---- + + +[discrete] +[[MlRoutingState]] +=== MlRoutingState + +[source,ts,subs=+macros] +---- +type MlRoutingState = 'failed' | 'started' | 'starting' | 'stopped' | 'stopping' +---- + + +[discrete] +[[MlRuleAction]] +=== MlRuleAction + +[source,ts,subs=+macros] +---- +type MlRuleAction = 'skip_result' | 'skip_model_update' +---- + + +[discrete] +[[MlRuleCondition]] +=== MlRuleCondition + +[source,ts,subs=+macros] +---- +interface MlRuleCondition { + pass:[/**] @property applies_to Specifies the result property to which the condition applies. If your detector uses `lat_long`, `metric`, `rare`, or `freq_rare` functions, you can only specify conditions that apply to time. */ + applies_to: <<MlAppliesTo>> + pass:[/**] @property operator Specifies the condition operator. The available options are greater than, greater than or equals, less than, and less than or equals. */ + operator: <<MlConditionOperator>> + pass:[/**] @property value The value that is compared against the `applies_to` field using the operator. */ + value: <<double>> +} +---- + + +[discrete] +[[MlRunningStateSearchInterval]] +=== MlRunningStateSearchInterval + +[source,ts,subs=+macros] +---- +interface MlRunningStateSearchInterval { + pass:[/**] @property end The end time. */ + end?: <<Duration>> + pass:[/**] @property end_ms The end time as an epoch in milliseconds. */ + end_ms: <<DurationValue>><<<UnitMillis>>> + pass:[/**] @property start The start time. */ + start?: <<Duration>> + pass:[/**] @property start_ms The start time as an epoch in milliseconds. */ + start_ms: <<DurationValue>><<<UnitMillis>>> +} +---- + + +[discrete] +[[MlSnapshotUpgradeState]] +=== MlSnapshotUpgradeState + +[source,ts,subs=+macros] +---- +type MlSnapshotUpgradeState = 'loading_old_state' | 'saving_new_state' | 'stopped' | 'failed' +---- + + +[discrete] +[[MlTextClassificationInferenceOptions]] +=== MlTextClassificationInferenceOptions + +[source,ts,subs=+macros] +---- +interface MlTextClassificationInferenceOptions { + pass:[/**] @property num_top_classes Specifies the number of top class predictions to return. Defaults to 0. */ + num_top_classes?: <<integer>> + pass:[/**] @property tokenization The tokenization options */ + tokenization?: <<MlTokenizationConfigContainer>> + pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string + pass:[/**] @property classification_labels Classification labels to apply other than the stored labels. Must have the same deminsions as the default configured labels */ + classification_labels?: string[] +} +---- + + +[discrete] +[[MlTextClassificationInferenceUpdateOptions]] +=== MlTextClassificationInferenceUpdateOptions + +[source,ts,subs=+macros] +---- +interface MlTextClassificationInferenceUpdateOptions { + pass:[/**] @property num_top_classes Specifies the number of top class predictions to return. Defaults to 0. */ + num_top_classes?: <<integer>> + pass:[/**] @property tokenization The tokenization options to update when inferring */ + tokenization?: <<MlNlpTokenizationUpdateOptions>> + pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string + pass:[/**] @property classification_labels Classification labels to apply other than the stored labels. Must have the same deminsions as the default configured labels */ + classification_labels?: string[] +} +---- + + +[discrete] +[[MlTextEmbeddingInferenceOptions]] +=== MlTextEmbeddingInferenceOptions + +[source,ts,subs=+macros] +---- +interface MlTextEmbeddingInferenceOptions { + pass:[/**] @property embedding_size The number of dimensions in the embedding output */ + embedding_size?: <<integer>> + pass:[/**] @property tokenization The tokenization options */ + tokenization?: <<MlTokenizationConfigContainer>> + pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string +} +---- + + +[discrete] +[[MlTextEmbeddingInferenceUpdateOptions]] +=== MlTextEmbeddingInferenceUpdateOptions + +[source,ts,subs=+macros] +---- +interface MlTextEmbeddingInferenceUpdateOptions { + tokenization?: <<MlNlpTokenizationUpdateOptions>> + pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string +} +---- + + +[discrete] +[[MlTextExpansionInferenceOptions]] +=== MlTextExpansionInferenceOptions + +[source,ts,subs=+macros] +---- +interface MlTextExpansionInferenceOptions { + pass:[/**] @property tokenization The tokenization options */ + tokenization?: <<MlTokenizationConfigContainer>> + pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string +} +---- + + +[discrete] +[[MlTextExpansionInferenceUpdateOptions]] +=== MlTextExpansionInferenceUpdateOptions + +[source,ts,subs=+macros] +---- +interface MlTextExpansionInferenceUpdateOptions { + tokenization?: <<MlNlpTokenizationUpdateOptions>> + pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string +} +---- + + +[discrete] +[[MlTimingStats]] +=== MlTimingStats + +[source,ts,subs=+macros] +---- +interface MlTimingStats { + pass:[/**] @property elapsed_time Runtime of the analysis in milliseconds. */ + elapsed_time: <<DurationValue>><<<UnitMillis>>> + pass:[/**] @property iteration_time Runtime of the latest iteration of the analysis in milliseconds. */ + iteration_time?: <<DurationValue>><<<UnitMillis>>> +} +---- + + +[discrete] +[[MlTokenizationConfigContainer]] +=== MlTokenizationConfigContainer + +[source,ts,subs=+macros] +---- +interface MlTokenizationConfigContainer { + pass:[/**] @property bert Indicates BERT tokenization and its options */ + bert?: <<MlNlpBertTokenizationConfig>> + pass:[/**] @property mpnet Indicates MPNET tokenization and its options */ + mpnet?: <<MlNlpBertTokenizationConfig>> + pass:[/**] @property roberta Indicates RoBERTa tokenization and its options */ + roberta?: <<MlNlpRobertaTokenizationConfig>> +} +---- + + +[discrete] +[[MlTokenizationTruncate]] +=== MlTokenizationTruncate + +[source,ts,subs=+macros] +---- +type MlTokenizationTruncate = 'first' | 'second' | 'none' +---- + + +[discrete] +[[MlTopClassEntry]] +=== MlTopClassEntry + +[source,ts,subs=+macros] +---- +interface MlTopClassEntry { + class_name: string + class_probability: <<double>> + class_score: <<double>> +} +---- + + +[discrete] +[[MlTotalFeatureImportance]] +=== MlTotalFeatureImportance + +[source,ts,subs=+macros] +---- +interface MlTotalFeatureImportance { + pass:[/**] @property feature_name The feature for which this importance was calculated. */ + feature_name: <<Name>> + pass:[/**] @property importance A collection of feature importance statistics related to the training data set for this particular feature. */ + importance: <<MlTotalFeatureImportanceStatistics>>[] + pass:[/**] @property classes If the trained model is a classification model, feature importance statistics are gathered per target class value. */ + classes: <<MlTotalFeatureImportanceClass>>[] +} +---- + + +[discrete] +[[MlTotalFeatureImportanceClass]] +=== MlTotalFeatureImportanceClass + +[source,ts,subs=+macros] +---- +interface MlTotalFeatureImportanceClass { + pass:[/**] @property class_name The target class value. Could be a string, boolean, or number. */ + class_name: <<Name>> + pass:[/**] @property importance A collection of feature importance statistics related to the training data set for this particular feature. */ + importance: <<MlTotalFeatureImportanceStatistics>>[] +} +---- + + +[discrete] +[[MlTotalFeatureImportanceStatistics]] +=== MlTotalFeatureImportanceStatistics + +[source,ts,subs=+macros] +---- +interface MlTotalFeatureImportanceStatistics { + pass:[/**] @property mean_magnitude The average magnitude of this feature across all the training data. This value is the average of the absolute values of the importance for this feature. */ + mean_magnitude: <<double>> + pass:[/**] @property max The maximum importance value across all the training data for this feature. */ + max: <<integer>> + pass:[/**] @property min The minimum importance value across all the training data for this feature. */ + min: <<integer>> +} +---- + + +[discrete] +[[MlTrainedModelAssignment]] +=== MlTrainedModelAssignment + +[source,ts,subs=+macros] +---- +interface MlTrainedModelAssignment { + pass:[/**] @property assignment_state The overall assignment state. */ + assignment_state: <<MlDeploymentAssignmentState>> + max_assigned_allocations?: <<integer>> + pass:[/**] @property routing_table The allocation state for each node. */ + routing_table: Record<string, <<MlTrainedModelAssignmentRoutingTable>>> + pass:[/**] @property start_time The timestamp when the deployment started. */ + start_time: <<DateTime>> + task_parameters: <<MlTrainedModelAssignmentTaskParameters>> +} +---- + + +[discrete] +[[MlTrainedModelAssignmentRoutingTable]] +=== MlTrainedModelAssignmentRoutingTable + +[source,ts,subs=+macros] +---- +interface MlTrainedModelAssignmentRoutingTable { + pass:[/**] @property reason The reason for the current state. It is usually populated only when the `routing_state` is `failed`. */ + reason: string + pass:[/**] @property routing_state The current routing state. */ + routing_state: <<MlRoutingState>> + pass:[/**] @property current_allocations Current number of allocations. */ + current_allocations: <<integer>> + pass:[/**] @property target_allocations Target number of allocations. */ + target_allocations: <<integer>> +} +---- + + +[discrete] +[[MlTrainedModelAssignmentTaskParameters]] +=== MlTrainedModelAssignmentTaskParameters + +[source,ts,subs=+macros] +---- +interface MlTrainedModelAssignmentTaskParameters { + pass:[/**] @property model_bytes The size of the trained model in bytes. */ + model_bytes: <<integer>> + pass:[/**] @property model_id The unique identifier for the trained model. */ + model_id: <<Id>> + pass:[/**] @property deployment_id The unique identifier for the trained model deployment. */ + deployment_id: <<Id>> + pass:[/**] @property cache_size The size of the trained model cache. */ + cache_size: <<ByteSize>> + pass:[/**] @property number_of_allocations The total number of allocations this model is assigned across ML nodes. */ + number_of_allocations: <<integer>> + priority: <<MlTrainingPriority>> + pass:[/**] @property queue_capacity Number of inference requests are allowed in the queue at a time. */ + queue_capacity: <<integer>> + pass:[/**] @property threads_per_allocation Number of threads per allocation. */ + threads_per_allocation: <<integer>> +} +---- + + +[discrete] +[[MlTrainedModelConfig]] +=== MlTrainedModelConfig + +[source,ts,subs=+macros] +---- +interface MlTrainedModelConfig { + pass:[/**] @property model_id Identifier for the trained model. */ + model_id: <<Id>> + pass:[/**] @property model_type The model type */ + model_type?: <<MlTrainedModelType>> + pass:[/**] @property tags A comma delimited string of tags. A trained model can have many tags, or none. */ + tags: string[] + pass:[/**] @property version The Elasticsearch version number in which the trained model was created. */ + version?: <<VersionString>> + compressed_definition?: string + pass:[/**] @property created_by Information on the creator of the trained model. */ + created_by?: string + pass:[/**] @property create_time The time when the trained model was created. */ + create_time?: <<DateTime>> + pass:[/**] @property default_field_map Any field map described in the inference configuration takes precedence. */ + default_field_map?: Record<string, string> + pass:[/**] @property description The free-text description of the trained model. */ + description?: string + pass:[/**] @property estimated_heap_memory_usage_bytes The estimated heap usage in bytes to keep the trained model in memory. */ + estimated_heap_memory_usage_bytes?: <<integer>> + pass:[/**] @property estimated_operations The estimated number of operations to use the trained model. */ + estimated_operations?: <<integer>> + pass:[/**] @property fully_defined True if the full model definition is present. */ + fully_defined?: boolean + pass:[/**] @property inference_config The default configuration for inference. This can be either a regression, classification, or one of the many NLP focused configurations. It must match the underlying definition.trained_model's target_type. For pre-packaged models such as ELSER the config is not required. */ + inference_config?: <<MlInferenceConfigCreateContainer>> + pass:[/**] @property input The input field names for the model definition. */ + input: <<MlTrainedModelConfigInput>> + pass:[/**] @property license_level The license level of the trained model. */ + license_level?: string + pass:[/**] @property metadata An object containing metadata about the trained model. For example, models created by data frame analytics contain analysis_config and input objects. */ + metadata?: <<MlTrainedModelConfigMetadata>> + model_size_bytes?: <<ByteSize>> + location?: <<MlTrainedModelLocation>> + prefix_strings?: <<MlTrainedModelPrefixStrings>> +} +---- + + +[discrete] +[[MlTrainedModelConfigInput]] +=== MlTrainedModelConfigInput + +[source,ts,subs=+macros] +---- +interface MlTrainedModelConfigInput { + pass:[/**] @property field_names An array of input field names for the model. */ + field_names: <<Field>>[] +} +---- + + +[discrete] +[[MlTrainedModelConfigMetadata]] +=== MlTrainedModelConfigMetadata + +[source,ts,subs=+macros] +---- +interface MlTrainedModelConfigMetadata { + model_aliases?: string[] + pass:[/**] @property feature_importance_baseline An object that contains the baseline for feature importance values. For regression analysis, it is a single value. For classification analysis, there is a value for each class. */ + feature_importance_baseline?: Record<string, string> + pass:[/**] @property hyperparameters List of the available hyperparameters optimized during the fine_parameter_tuning phase as well as specified by the user. */ + hyperparameters?: <<MlHyperparameter>>[] + pass:[/**] @property total_feature_importance An array of the total feature importance for each feature used from the training data set. This array of objects is returned if data frame analytics trained the model and the request includes total_feature_importance in the include request parameter. */ + total_feature_importance?: <<MlTotalFeatureImportance>>[] +} +---- + + +[discrete] +[[MlTrainedModelDeploymentAllocationStatus]] +=== MlTrainedModelDeploymentAllocationStatus + +[source,ts,subs=+macros] +---- +interface MlTrainedModelDeploymentAllocationStatus { + pass:[/**] @property allocation_count The current number of nodes where the model is allocated. */ + allocation_count: <<integer>> + pass:[/**] @property state The detailed allocation state related to the nodes. */ + state: <<MlDeploymentAllocationState>> + pass:[/**] @property target_allocation_count The desired number of nodes for model allocation. */ + target_allocation_count: <<integer>> +} +---- + + +[discrete] +[[MlTrainedModelDeploymentNodesStats]] +=== MlTrainedModelDeploymentNodesStats + +[source,ts,subs=+macros] +---- +interface MlTrainedModelDeploymentNodesStats { + pass:[/**] @property average_inference_time_ms The average time for each inference call to complete on this node. */ + average_inference_time_ms: <<DurationValue>><<<UnitFloatMillis>>> + pass:[/**] @property error_count The number of errors when evaluating the trained model. */ + error_count: <<integer>> + pass:[/**] @property inference_count The total number of inference calls made against this node for this model. */ + inference_count: <<integer>> + pass:[/**] @property last_access The epoch time stamp of the last inference call for the model on this node. */ + last_access: <<long>> + pass:[/**] @property node Information pertaining to the node. */ + node: <<MlDiscoveryNode>> + pass:[/**] @property number_of_allocations The number of allocations assigned to this node. */ + number_of_allocations: <<integer>> + pass:[/**] @property number_of_pending_requests The number of inference requests queued to be processed. */ + number_of_pending_requests: <<integer>> + pass:[/**] @property rejection_execution_count The number of inference requests that were not processed because the queue was full. */ + rejection_execution_count: <<integer>> + pass:[/**] @property routing_state The current routing state and reason for the current routing state for this allocation. */ + routing_state: <<MlTrainedModelAssignmentRoutingTable>> + pass:[/**] @property start_time The epoch timestamp when the allocation started. */ + start_time: <<EpochTime>><<<UnitMillis>>> + pass:[/**] @property threads_per_allocation The number of threads used by each allocation during inference. */ + threads_per_allocation: <<integer>> + pass:[/**] @property timeout_count The number of inference requests that timed out before being processed. */ + timeout_count: <<integer>> +} +---- + + +[discrete] +[[MlTrainedModelDeploymentStats]] +=== MlTrainedModelDeploymentStats + +[source,ts,subs=+macros] +---- +interface MlTrainedModelDeploymentStats { + pass:[/**] @property allocation_status The detailed allocation status for the deployment. */ + allocation_status: <<MlTrainedModelDeploymentAllocationStatus>> + cache_size?: <<ByteSize>> + pass:[/**] @property deployment_id The unique identifier for the trained model deployment. */ + deployment_id: <<Id>> + pass:[/**] @property error_count The sum of `error_count` for all nodes in the deployment. */ + error_count: <<integer>> + pass:[/**] @property inference_count The sum of `inference_count` for all nodes in the deployment. */ + inference_count: <<integer>> + pass:[/**] @property model_id The unique identifier for the trained model. */ + model_id: <<Id>> + pass:[/**] @property nodes The deployment stats for each node that currently has the model allocated. In serverless, stats are reported for a single unnamed virtual node. */ + nodes: <<MlTrainedModelDeploymentNodesStats>>[] + pass:[/**] @property number_of_allocations The number of allocations requested. */ + number_of_allocations: <<integer>> + pass:[/**] @property queue_capacity The number of inference requests that can be queued before new requests are rejected. */ + queue_capacity: <<integer>> + pass:[/**] @property rejected_execution_count The sum of `rejected_execution_count` for all nodes in the deployment. Individual nodes reject an inference request if the inference queue is full. The queue size is controlled by the `queue_capacity` setting in the start trained model deployment API. */ + rejected_execution_count: <<integer>> + pass:[/**] @property reason The reason for the current deployment state. Usually only populated when the model is not deployed to a node. */ + reason: string + pass:[/**] @property start_time The epoch timestamp when the deployment started. */ + start_time: <<EpochTime>><<<UnitMillis>>> + pass:[/**] @property state The overall state of the deployment. */ + state: <<MlDeploymentAssignmentState>> + pass:[/**] @property threads_per_allocation The number of threads used be each allocation during inference. */ + threads_per_allocation: <<integer>> + pass:[/**] @property timeout_count The sum of `timeout_count` for all nodes in the deployment. */ + timeout_count: <<integer>> +} +---- + + +[discrete] +[[MlTrainedModelEntities]] +=== MlTrainedModelEntities + +[source,ts,subs=+macros] +---- +interface MlTrainedModelEntities { + class_name: string + class_probability: <<double>> + entity: string + start_pos: <<integer>> + end_pos: <<integer>> +} +---- + + +[discrete] +[[MlTrainedModelInferenceClassImportance]] +=== MlTrainedModelInferenceClassImportance + +[source,ts,subs=+macros] +---- +interface MlTrainedModelInferenceClassImportance { + class_name: string + importance: <<double>> +} +---- + + +[discrete] +[[MlTrainedModelInferenceFeatureImportance]] +=== MlTrainedModelInferenceFeatureImportance + +[source,ts,subs=+macros] +---- +interface MlTrainedModelInferenceFeatureImportance { + feature_name: string + importance?: <<double>> + classes?: <<MlTrainedModelInferenceClassImportance>>[] +} +---- + + +[discrete] +[[MlTrainedModelInferenceStats]] +=== MlTrainedModelInferenceStats + +[source,ts,subs=+macros] +---- +interface MlTrainedModelInferenceStats { + pass:[/**] @property cache_miss_count The number of times the model was loaded for inference and was not retrieved from the cache. If this number is close to the `inference_count`, the cache is not being appropriately used. This can be solved by increasing the cache size or its time-to-live (TTL). Refer to general machine learning settings for the appropriate settings. */ + cache_miss_count: <<integer>> + pass:[/**] @property failure_count The number of failures when using the model for inference. */ + failure_count: <<integer>> + pass:[/**] @property inference_count The total number of times the model has been called for inference. This is across all inference contexts, including all pipelines. */ + inference_count: <<integer>> + pass:[/**] @property missing_all_fields_count The number of inference calls where all the training features for the model were missing. */ + missing_all_fields_count: <<integer>> + pass:[/**] @property timestamp The time when the statistics were last updated. */ + timestamp: <<EpochTime>><<<UnitMillis>>> +} +---- + + +[discrete] +[[MlTrainedModelLocation]] +=== MlTrainedModelLocation + +[source,ts,subs=+macros] +---- +interface MlTrainedModelLocation { + index: <<MlTrainedModelLocationIndex>> +} +---- + + +[discrete] +[[MlTrainedModelLocationIndex]] +=== MlTrainedModelLocationIndex + +[source,ts,subs=+macros] +---- +interface MlTrainedModelLocationIndex { + name: <<IndexName>> +} +---- + + +[discrete] +[[MlTrainedModelPrefixStrings]] +=== MlTrainedModelPrefixStrings + +[source,ts,subs=+macros] +---- +interface MlTrainedModelPrefixStrings { + pass:[/**] @property ingest String prepended to input at ingest */ + ingest?: string + pass:[/**] @property search String prepended to input at search */ + search?: string +} +---- + + +[discrete] +[[MlTrainedModelSizeStats]] +=== MlTrainedModelSizeStats + +[source,ts,subs=+macros] +---- +interface MlTrainedModelSizeStats { + pass:[/**] @property model_size_bytes The size of the model in bytes. */ + model_size_bytes: <<ByteSize>> + pass:[/**] @property required_native_memory_bytes The amount of memory required to load the model in bytes. */ + required_native_memory_bytes: <<ByteSize>> +} +---- + + +[discrete] +[[MlTrainedModelStats]] +=== MlTrainedModelStats + +[source,ts,subs=+macros] +---- +interface MlTrainedModelStats { + pass:[/**] @property deployment_stats A collection of deployment stats, which is present when the models are deployed. */ + deployment_stats?: <<MlTrainedModelDeploymentStats>> + pass:[/**] @property inference_stats A collection of inference stats fields. */ + inference_stats?: <<MlTrainedModelInferenceStats>> + pass:[/**] @property ingest A collection of ingest stats for the model across all nodes. The values are summations of the individual node statistics. The format matches the ingest section in the nodes stats API. */ + ingest?: Record<string, any> + pass:[/**] @property model_id The unique identifier of the trained model. */ + model_id: <<Id>> + pass:[/**] @property model_size_stats A collection of model size stats. */ + model_size_stats: <<MlTrainedModelSizeStats>> + pass:[/**] @property pipeline_count The number of ingest pipelines that currently refer to the model. */ + pipeline_count: <<integer>> +} +---- + + +[discrete] +[[MlTrainedModelType]] +=== MlTrainedModelType + +[source,ts,subs=+macros] +---- +type MlTrainedModelType = 'tree_ensemble' | 'lang_ident' | 'pytorch' +---- + + +[discrete] +[[MlTrainingPriority]] +=== MlTrainingPriority + +[source,ts,subs=+macros] +---- +type MlTrainingPriority = 'normal' | 'low' +---- + + +[discrete] +[[MlTransformAuthorization]] +=== MlTransformAuthorization + +[source,ts,subs=+macros] +---- +interface MlTransformAuthorization { + pass:[/**] @property api_key If an API key was used for the most recent update to the transform, its name and identifier are listed in the response. */ + api_key?: <<MlApiKeyAuthorization>> + pass:[/**] @property roles If a user ID was used for the most recent update to the transform, its roles at the time of the update are listed in the response. */ + roles?: string[] + pass:[/**] @property service_account If a service account was used for the most recent update to the transform, the account name is listed in the response. */ + service_account?: string +} +---- + + +[discrete] +[[MlValidationLoss]] +=== MlValidationLoss + +[source,ts,subs=+macros] +---- +interface MlValidationLoss { + pass:[/**] @property fold_values Validation loss values for every added decision tree during the forest growing procedure. */ + fold_values: string[] + pass:[/**] @property loss_type The type of the loss metric. For example, binomial_logistic. */ + loss_type: string +} +---- + + +[discrete] +[[MlVocabulary]] +=== MlVocabulary + +[source,ts,subs=+macros] +---- +interface MlVocabulary { + index: <<IndexName>> +} +---- + + +[discrete] +[[MlZeroShotClassificationInferenceOptions]] +=== MlZeroShotClassificationInferenceOptions + +[source,ts,subs=+macros] +---- +interface MlZeroShotClassificationInferenceOptions { + pass:[/**] @property tokenization The tokenization options to update when inferring */ + tokenization?: <<MlTokenizationConfigContainer>> + pass:[/**] @property hypothesis_template Hypothesis template used when tokenizing labels for prediction */ + hypothesis_template?: string + pass:[/**] @property classification_labels The zero shot classification labels indicating entailment, neutral, and contradiction Must contain exactly and only entailment, neutral, and contradiction */ + classification_labels: string[] + pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string + pass:[/**] @property multi_label Indicates if more than one true label exists. */ + multi_label?: boolean + pass:[/**] @property labels The labels to predict. */ + labels?: string[] +} +---- + + +[discrete] +[[MlZeroShotClassificationInferenceUpdateOptions]] +=== MlZeroShotClassificationInferenceUpdateOptions + +[source,ts,subs=+macros] +---- +interface MlZeroShotClassificationInferenceUpdateOptions { + pass:[/**] @property tokenization The tokenization options to update when inferring */ + tokenization?: <<MlNlpTokenizationUpdateOptions>> + pass:[/**] @property results_field The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string + pass:[/**] @property multi_label Update the configured multi label option. Indicates if more than one true label exists. Defaults to the configured value. */ + multi_label?: boolean + pass:[/**] @property labels The labels to predict. */ + labels: string[] +} +---- + + diff --git a/docs/reference/shared-types/nodes-types.asciidoc b/docs/reference/shared-types/nodes-types.asciidoc new file mode 100644 index 000000000..f75cedea4 --- /dev/null +++ b/docs/reference/shared-types/nodes-types.asciidoc @@ -0,0 +1,1260 @@ +[[reference-shared-types-nodes-types]] + +=== `Nodes` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[NodesAdaptiveSelection]] +=== NodesAdaptiveSelection + +[source,ts,subs=+macros] +---- +interface NodesAdaptiveSelection { + pass:[/**] @property avg_queue_size The exponentially weighted moving average queue size of search requests on the keyed node. */ + avg_queue_size?: <<long>> + pass:[/**] @property avg_response_time The exponentially weighted moving average response time of search requests on the keyed node. */ + avg_response_time?: <<Duration>> + pass:[/**] @property avg_response_time_ns The exponentially weighted moving average response time, in nanoseconds, of search requests on the keyed node. */ + avg_response_time_ns?: <<long>> + pass:[/**] @property avg_service_time The exponentially weighted moving average service time of search requests on the keyed node. */ + avg_service_time?: <<Duration>> + pass:[/**] @property avg_service_time_ns The exponentially weighted moving average service time, in nanoseconds, of search requests on the keyed node. */ + avg_service_time_ns?: <<long>> + pass:[/**] @property outgoing_searches The number of outstanding search requests to the keyed node from the node these stats are for. */ + outgoing_searches?: <<long>> + pass:[/**] @property rank The rank of this node; used for shard selection when routing search requests. */ + rank?: string +} +---- + + +[discrete] +[[NodesBreaker]] +=== NodesBreaker + +[source,ts,subs=+macros] +---- +interface NodesBreaker { + pass:[/**] @property estimated_size Estimated memory used for the operation. */ + estimated_size?: string + pass:[/**] @property estimated_size_in_bytes Estimated memory used, in bytes, for the operation. */ + estimated_size_in_bytes?: <<long>> + pass:[/**] @property limit_size Memory limit for the circuit breaker. */ + limit_size?: string + pass:[/**] @property limit_size_in_bytes Memory limit, in bytes, for the circuit breaker. */ + limit_size_in_bytes?: <<long>> + pass:[/**] @property overhead A constant that all estimates for the circuit breaker are multiplied with to calculate a final estimate. */ + overhead?: <<float>> + pass:[/**] @property tripped Total number of times the circuit breaker has been triggered and prevented an out of memory error. */ + tripped?: <<float>> +} +---- + + +[discrete] +[[NodesCgroup]] +=== NodesCgroup + +[source,ts,subs=+macros] +---- +interface NodesCgroup { + pass:[/**] @property cpuacct Contains statistics about `cpuacct` control group for the node. */ + cpuacct?: <<NodesCpuAcct>> + pass:[/**] @property cpu Contains statistics about `cpu` control group for the node. */ + cpu?: <<NodesCgroupCpu>> + pass:[/**] @property memory Contains statistics about the memory control group for the node. */ + memory?: <<NodesCgroupMemory>> +} +---- + + +[discrete] +[[NodesCgroupCpu]] +=== NodesCgroupCpu + +[source,ts,subs=+macros] +---- +interface NodesCgroupCpu { + pass:[/**] @property control_group The `cpu` control group to which the Elasticsearch process belongs. */ + control_group?: string + pass:[/**] @property cfs_period_micros The period of time, in microseconds, for how regularly all tasks in the same cgroup as the Elasticsearch process should have their access to CPU resources reallocated. */ + cfs_period_micros?: <<integer>> + pass:[/**] @property cfs_quota_micros The total amount of time, in microseconds, for which all tasks in the same cgroup as the Elasticsearch process can run during one period `cfs_period_micros`. */ + cfs_quota_micros?: <<integer>> + pass:[/**] @property stat Contains CPU statistics for the node. */ + stat?: <<NodesCgroupCpuStat>> +} +---- + + +[discrete] +[[NodesCgroupCpuStat]] +=== NodesCgroupCpuStat + +[source,ts,subs=+macros] +---- +interface NodesCgroupCpuStat { + pass:[/**] @property number_of_elapsed_periods The number of reporting periods (as specified by `cfs_period_micros`) that have elapsed. */ + number_of_elapsed_periods?: <<long>> + pass:[/**] @property number_of_times_throttled The number of times all tasks in the same cgroup as the Elasticsearch process have been throttled. */ + number_of_times_throttled?: <<long>> + pass:[/**] @property time_throttled_nanos The total amount of time, in nanoseconds, for which all tasks in the same cgroup as the Elasticsearch process have been throttled. */ + time_throttled_nanos?: <<DurationValue>><<<UnitNanos>>> +} +---- + + +[discrete] +[[NodesCgroupMemory]] +=== NodesCgroupMemory + +[source,ts,subs=+macros] +---- +interface NodesCgroupMemory { + pass:[/**] @property control_group The `memory` control group to which the Elasticsearch process belongs. */ + control_group?: string + pass:[/**] @property limit_in_bytes The maximum amount of user memory (including file cache) allowed for all tasks in the same cgroup as the Elasticsearch process. This value can be too big to store in a `<<long>>`, so is returned as a string so that the value returned can exactly match what the underlying operating system interface returns. Any value that is too large to parse into a `<<long>>` almost certainly means no limit has been set for the cgroup. */ + limit_in_bytes?: string + pass:[/**] @property usage_in_bytes The total current memory usage by processes in the cgroup, in bytes, by all tasks in the same cgroup as the Elasticsearch process. This value is stored as a string for consistency with `limit_in_bytes`. */ + usage_in_bytes?: string +} +---- + + +[discrete] +[[NodesClient]] +=== NodesClient + +[source,ts,subs=+macros] +---- +interface NodesClient { + pass:[/**] @property id Unique ID for the HTTP client. */ + id?: <<long>> + pass:[/**] @property agent Reported agent for the HTTP client. If unavailable, this property is not included in the response. */ + agent?: string + pass:[/**] @property local_address Local address for the HTTP connection. */ + local_address?: string + pass:[/**] @property remote_address Remote address for the HTTP connection. */ + remote_address?: string + pass:[/**] @property last_uri The URI of the client’s most recent request. */ + last_uri?: string + pass:[/**] @property opened_time_millis Time at which the client opened the connection. */ + opened_time_millis?: <<long>> + pass:[/**] @property closed_time_millis Time at which the client closed the connection if the connection is closed. */ + closed_time_millis?: <<long>> + pass:[/**] @property last_request_time_millis Time of the most recent request from this client. */ + last_request_time_millis?: <<long>> + pass:[/**] @property request_count Number of requests from this client. */ + request_count?: <<long>> + pass:[/**] @property request_size_bytes Cumulative size in bytes of all requests from this client. */ + request_size_bytes?: <<long>> + pass:[/**] @property x_opaque_id Value from the client’s `x-opaque-id` HTTP header. If unavailable, this property is not included in the response. */ + x_opaque_id?: string +} +---- + + +[discrete] +[[NodesClusterAppliedStats]] +=== NodesClusterAppliedStats + +[source,ts,subs=+macros] +---- +interface NodesClusterAppliedStats { + recordings?: <<NodesRecording>>[] +} +---- + + +[discrete] +[[NodesClusterStateQueue]] +=== NodesClusterStateQueue + +[source,ts,subs=+macros] +---- +interface NodesClusterStateQueue { + pass:[/**] @property total Total number of cluster states in queue. */ + total?: <<long>> + pass:[/**] @property pending Number of pending cluster states in queue. */ + pending?: <<long>> + pass:[/**] @property committed Number of committed cluster states in queue. */ + committed?: <<long>> +} +---- + + +[discrete] +[[NodesClusterStateUpdate]] +=== NodesClusterStateUpdate + +[source,ts,subs=+macros] +---- +interface NodesClusterStateUpdate { + pass:[/**] @property count The number of cluster state update attempts that did not change the cluster state since the node started. */ + count: <<long>> + pass:[/**] @property computation_time The cumulative amount of time spent computing no-op cluster state updates since the node started. */ + computation_time?: <<Duration>> + pass:[/**] @property computation_time_millis The cumulative amount of time, in milliseconds, spent computing no-op cluster state updates since the node started. */ + computation_time_millis?: <<DurationValue>><<<UnitMillis>>> + pass:[/**] @property publication_time The cumulative amount of time spent publishing cluster state updates which ultimately succeeded, which includes everything from the start of the publication (just after the computation of the new cluster state) until the publication has finished and the master node is ready to start processing the next state update. This includes the time measured by `context_construction_time`, `commit_time`, `completion_time` and `master_apply_time`. */ + publication_time?: <<Duration>> + pass:[/**] @property publication_time_millis The cumulative amount of time, in milliseconds, spent publishing cluster state updates which ultimately succeeded, which includes everything from the start of the publication (just after the computation of the new cluster state) until the publication has finished and the master node is ready to start processing the next state update. This includes the time measured by `context_construction_time`, `commit_time`, `completion_time` and `master_apply_time`. */ + publication_time_millis?: <<DurationValue>><<<UnitMillis>>> + pass:[/**] @property context_construction_time The cumulative amount of time spent constructing a publication context since the node started for publications that ultimately succeeded. This statistic includes the time spent computing the difference between the current and new cluster state preparing a serialized representation of this difference. */ + context_construction_time?: <<Duration>> + pass:[/**] @property context_construction_time_millis The cumulative amount of time, in milliseconds, spent constructing a publication context since the node started for publications that ultimately succeeded. This statistic includes the time spent computing the difference between the current and new cluster state preparing a serialized representation of this difference. */ + context_construction_time_millis?: <<DurationValue>><<<UnitMillis>>> + pass:[/**] @property commit_time The cumulative amount of time spent waiting for a successful cluster state update to commit, which measures the time from the start of each publication until a majority of the master-eligible nodes have written the state to disk and confirmed the write to the elected master. */ + commit_time?: <<Duration>> + pass:[/**] @property commit_time_millis The cumulative amount of time, in milliseconds, spent waiting for a successful cluster state update to commit, which measures the time from the start of each publication until a majority of the master-eligible nodes have written the state to disk and confirmed the write to the elected master. */ + commit_time_millis?: <<DurationValue>><<<UnitMillis>>> + pass:[/**] @property completion_time The cumulative amount of time spent waiting for a successful cluster state update to complete, which measures the time from the start of each publication until all the other nodes have notified the elected master that they have applied the cluster state. */ + completion_time?: <<Duration>> + pass:[/**] @property completion_time_millis The cumulative amount of time, in milliseconds, spent waiting for a successful cluster state update to complete, which measures the time from the start of each publication until all the other nodes have notified the elected master that they have applied the cluster state. */ + completion_time_millis?: <<DurationValue>><<<UnitMillis>>> + pass:[/**] @property master_apply_time The cumulative amount of time spent successfully applying cluster state updates on the elected master since the node started. */ + master_apply_time?: <<Duration>> + pass:[/**] @property master_apply_time_millis The cumulative amount of time, in milliseconds, spent successfully applying cluster state updates on the elected master since the node started. */ + master_apply_time_millis?: <<DurationValue>><<<UnitMillis>>> + pass:[/**] @property notification_time The cumulative amount of time spent notifying listeners of a no-op cluster state update since the node started. */ + notification_time?: <<Duration>> + pass:[/**] @property notification_time_millis The cumulative amount of time, in milliseconds, spent notifying listeners of a no-op cluster state update since the node started. */ + notification_time_millis?: <<DurationValue>><<<UnitMillis>>> +} +---- + + +[discrete] +[[NodesContext]] +=== NodesContext + +[source,ts,subs=+macros] +---- +interface NodesContext { + context?: string + compilations?: <<long>> + cache_evictions?: <<long>> + compilation_limit_triggered?: <<long>> +} +---- + + +[discrete] +[[NodesCpu]] +=== NodesCpu + +[source,ts,subs=+macros] +---- +interface NodesCpu { + percent?: <<integer>> + sys?: <<Duration>> + sys_in_millis?: <<DurationValue>><<<UnitMillis>>> + total?: <<Duration>> + total_in_millis?: <<DurationValue>><<<UnitMillis>>> + user?: <<Duration>> + user_in_millis?: <<DurationValue>><<<UnitMillis>>> + load_average?: Record<string, <<double>>> +} +---- + + +[discrete] +[[NodesCpuAcct]] +=== NodesCpuAcct + +[source,ts,subs=+macros] +---- +interface NodesCpuAcct { + pass:[/**] @property control_group The `cpuacct` control group to which the Elasticsearch process belongs. */ + control_group?: string + pass:[/**] @property usage_nanos The total CPU time, in nanoseconds, consumed by all tasks in the same cgroup as the Elasticsearch process. */ + usage_nanos?: <<DurationValue>><<<UnitNanos>>> +} +---- + + +[discrete] +[[NodesDataPathStats]] +=== NodesDataPathStats + +[source,ts,subs=+macros] +---- +interface NodesDataPathStats { + pass:[/**] @property available Total amount of disk space available to this Java virtual machine on this file store. */ + available?: string + pass:[/**] @property available_in_bytes Total number of bytes available to this Java virtual machine on this file store. */ + available_in_bytes?: <<long>> + disk_queue?: string + disk_reads?: <<long>> + disk_read_size?: string + disk_read_size_in_bytes?: <<long>> + disk_writes?: <<long>> + disk_write_size?: string + disk_write_size_in_bytes?: <<long>> + pass:[/**] @property free Total amount of unallocated disk space in the file store. */ + free?: string + pass:[/**] @property free_in_bytes Total number of unallocated bytes in the file store. */ + free_in_bytes?: <<long>> + pass:[/**] @property mount Mount point of the file store (for example: `/dev/sda2`). */ + mount?: string + pass:[/**] @property path Path to the file store. */ + path?: string + pass:[/**] @property total Total size of the file store. */ + total?: string + pass:[/**] @property total_in_bytes Total size of the file store in bytes. */ + total_in_bytes?: <<long>> + pass:[/**] @property type Type of the file store (ex: ext4). */ + type?: string +} +---- + + +[discrete] +[[NodesDiscovery]] +=== NodesDiscovery + +[source,ts,subs=+macros] +---- +interface NodesDiscovery { + pass:[/**] @property cluster_state_queue Contains statistics for the cluster state queue of the node. */ + cluster_state_queue?: <<NodesClusterStateQueue>> + pass:[/**] @property published_cluster_states Contains statistics for the published cluster states of the node. */ + published_cluster_states?: <<NodesPublishedClusterStates>> + pass:[/**] @property cluster_state_update Contains low-level statistics about how <<long>> various activities took during cluster state updates while the node was the elected master. Omitted if the node is not master-eligible. Every field whose name ends in `_time` within this object is also represented as a raw number of milliseconds in a field whose name ends in `_time_millis`. The human-readable fields with a `_time` suffix are only returned if requested with the `?human=true` query parameter. */ + cluster_state_update?: Record<string, <<NodesClusterStateUpdate>>> + serialized_cluster_states?: <<NodesSerializedClusterState>> + cluster_applier_stats?: <<NodesClusterAppliedStats>> +} +---- + + +[discrete] +[[NodesExtendedMemoryStats]] +=== NodesExtendedMemoryStats + +[source,ts,subs=+macros] +---- +interface NodesExtendedMemoryStats extends <<NodesMemoryStats>> { + pass:[/**] @property free_percent <<Percentage>> of free memory. */ + free_percent?: <<integer>> + pass:[/**] @property used_percent <<Percentage>> of used memory. */ + used_percent?: <<integer>> +} +---- + + +[discrete] +[[NodesFileSystem]] +=== NodesFileSystem + +[source,ts,subs=+macros] +---- +interface NodesFileSystem { + pass:[/**] @property data List of all file stores. */ + data?: <<NodesDataPathStats>>[] + pass:[/**] @property timestamp Last time the file stores statistics were refreshed. Recorded in milliseconds since the Unix Epoch. */ + timestamp?: <<long>> + pass:[/**] @property total Contains statistics for all file stores of the node. */ + total?: <<NodesFileSystemTotal>> + pass:[/**] @property io_stats Contains I/O statistics for the node. */ + io_stats?: <<NodesIoStats>> +} +---- + + +[discrete] +[[NodesFileSystemTotal]] +=== NodesFileSystemTotal + +[source,ts,subs=+macros] +---- +interface NodesFileSystemTotal { + pass:[/**] @property available Total disk space available to this Java virtual machine on all file stores. Depending on OS or process level restrictions, this might appear less than `free`. This is the actual amount of free disk space the Elasticsearch node can utilise. */ + available?: string + pass:[/**] @property available_in_bytes Total number of bytes available to this Java virtual machine on all file stores. Depending on OS or process level restrictions, this might appear less than `free_in_bytes`. This is the actual amount of free disk space the Elasticsearch node can utilise. */ + available_in_bytes?: <<long>> + pass:[/**] @property free Total unallocated disk space in all file stores. */ + free?: string + pass:[/**] @property free_in_bytes Total number of unallocated bytes in all file stores. */ + free_in_bytes?: <<long>> + pass:[/**] @property total Total size of all file stores. */ + total?: string + pass:[/**] @property total_in_bytes Total size of all file stores in bytes. */ + total_in_bytes?: <<long>> +} +---- + + +[discrete] +[[NodesGarbageCollector]] +=== NodesGarbageCollector + +[source,ts,subs=+macros] +---- +interface NodesGarbageCollector { + pass:[/**] @property collectors Contains statistics about JVM garbage collectors for the node. */ + collectors?: Record<string, <<NodesGarbageCollectorTotal>>> +} +---- + + +[discrete] +[[NodesGarbageCollectorTotal]] +=== NodesGarbageCollectorTotal + +[source,ts,subs=+macros] +---- +interface NodesGarbageCollectorTotal { + pass:[/**] @property collection_count Total number of JVM garbage collectors that collect objects. */ + collection_count?: <<long>> + pass:[/**] @property collection_time Total time spent by JVM collecting objects. */ + collection_time?: string + pass:[/**] @property collection_time_in_millis Total time, in milliseconds, spent by JVM collecting objects. */ + collection_time_in_millis?: <<long>> +} +---- + + +[discrete] +[[NodesHttp]] +=== NodesHttp + +[source,ts,subs=+macros] +---- +interface NodesHttp { + pass:[/**] @property current_open Current number of open HTTP connections for the node. */ + current_open?: <<integer>> + pass:[/**] @property total_opened Total number of HTTP connections opened for the node. */ + total_opened?: <<long>> + pass:[/**] @property clients Information on current and recently-closed HTTP client connections. Clients that have been closed longer than the `http.client_stats.closed_channels.max_age` setting will not be represented here. */ + clients?: <<NodesClient>>[] + pass:[/**] @property routes Detailed HTTP stats broken down by route */ + routes: Record<string, <<NodesHttpRoute>>> +} +---- + + +[discrete] +[[NodesHttpRoute]] +=== NodesHttpRoute + +[source,ts,subs=+macros] +---- +interface NodesHttpRoute { + requests: <<NodesHttpRouteRequests>> + responses: <<NodesHttpRouteResponses>> +} +---- + + +[discrete] +[[NodesHttpRouteRequests]] +=== NodesHttpRouteRequests + +[source,ts,subs=+macros] +---- +interface NodesHttpRouteRequests { + count: <<long>> + total_size_in_bytes: <<long>> + size_histogram: <<NodesSizeHttpHistogram>>[] +} +---- + + +[discrete] +[[NodesHttpRouteResponses]] +=== NodesHttpRouteResponses + +[source,ts,subs=+macros] +---- +interface NodesHttpRouteResponses { + count: <<long>> + total_size_in_bytes: <<long>> + handling_time_histogram: <<NodesTimeHttpHistogram>>[] + size_histogram: <<NodesSizeHttpHistogram>>[] +} +---- + + +[discrete] +[[NodesIndexingPressure]] +=== NodesIndexingPressure + +[source,ts,subs=+macros] +---- +interface NodesIndexingPressure { + pass:[/**] @property memory Contains statistics for memory consumption from indexing load. */ + memory?: <<NodesIndexingPressureMemory>> +} +---- + + +[discrete] +[[NodesIndexingPressureMemory]] +=== NodesIndexingPressureMemory + +[source,ts,subs=+macros] +---- +interface NodesIndexingPressureMemory { + pass:[/**] @property limit Configured memory limit for the indexing requests. Replica requests have an automatic limit that is 1.5x this value. */ + limit?: <<ByteSize>> + pass:[/**] @property limit_in_bytes Configured memory limit, in bytes, for the indexing requests. Replica requests have an automatic limit that is 1.5x this value. */ + limit_in_bytes?: <<long>> + pass:[/**] @property current Contains statistics for current indexing load. */ + current?: <<NodesPressureMemory>> + pass:[/**] @property total Contains statistics for the cumulative indexing load since the node started. */ + total?: <<NodesPressureMemory>> +} +---- + + +[discrete] +[[NodesIngest]] +=== NodesIngest + +[source,ts,subs=+macros] +---- +interface NodesIngest { + pass:[/**] @property pipelines Contains statistics about ingest pipelines for the node. */ + pipelines?: Record<string, <<NodesIngestStats>>> + pass:[/**] @property total Contains statistics about ingest operations for the node. */ + total?: <<NodesIngestTotal>> +} +---- + + +[discrete] +[[NodesIngestStats]] +=== NodesIngestStats + +[source,ts,subs=+macros] +---- +interface NodesIngestStats { + pass:[/**] @property count Total number of documents ingested during the lifetime of this node. */ + count: <<long>> + pass:[/**] @property current Total number of documents currently being ingested. */ + current: <<long>> + pass:[/**] @property failed Total number of failed ingest operations during the lifetime of this node. */ + failed: <<long>> + pass:[/**] @property processors Total number of ingest processors. */ + processors: Record<string, <<NodesKeyedProcessor>>>[] + pass:[/**] @property time_in_millis Total time, in milliseconds, spent preprocessing ingest documents during the lifetime of this node. */ + time_in_millis: <<DurationValue>><<<UnitMillis>>> + pass:[/**] @property ingested_as_first_pipeline_in_bytes Total number of bytes of all documents ingested by the pipeline. This field is only present on pipelines which are the first to process a document. Thus, it is not present on pipelines which only serve as a final pipeline after a default pipeline, a pipeline run after a reroute processor, or pipelines in pipeline processors. */ + ingested_as_first_pipeline_in_bytes: <<long>> + pass:[/**] @property produced_as_first_pipeline_in_bytes Total number of bytes of all documents produced by the pipeline. This field is only present on pipelines which are the first to process a document. Thus, it is not present on pipelines which only serve as a final pipeline after a default pipeline, a pipeline run after a reroute processor, or pipelines in pipeline processors. In situations where there are subsequent pipelines, the value represents the size of the document after all pipelines have run. */ + produced_as_first_pipeline_in_bytes: <<long>> +} +---- + + +[discrete] +[[NodesIngestTotal]] +=== NodesIngestTotal + +[source,ts,subs=+macros] +---- +interface NodesIngestTotal { + pass:[/**] @property count Total number of documents ingested during the lifetime of this node. */ + count: <<long>> + pass:[/**] @property current Total number of documents currently being ingested. */ + current: <<long>> + pass:[/**] @property failed Total number of failed ingest operations during the lifetime of this node. */ + failed: <<long>> + pass:[/**] @property time_in_millis Total time, in milliseconds, spent preprocessing ingest documents during the lifetime of this node. */ + time_in_millis: <<DurationValue>><<<UnitMillis>>> +} +---- + + +[discrete] +[[NodesIoStatDevice]] +=== NodesIoStatDevice + +[source,ts,subs=+macros] +---- +interface NodesIoStatDevice { + pass:[/**] @property device_name The Linux device name. */ + device_name?: string + pass:[/**] @property operations The total number of read and write operations for the device completed since starting Elasticsearch. */ + operations?: <<long>> + pass:[/**] @property read_kilobytes The total number of kilobytes read for the device since starting Elasticsearch. */ + read_kilobytes?: <<long>> + pass:[/**] @property read_operations The total number of read operations for the device completed since starting Elasticsearch. */ + read_operations?: <<long>> + pass:[/**] @property write_kilobytes The total number of kilobytes written for the device since starting Elasticsearch. */ + write_kilobytes?: <<long>> + pass:[/**] @property write_operations The total number of write operations for the device completed since starting Elasticsearch. */ + write_operations?: <<long>> +} +---- + + +[discrete] +[[NodesIoStats]] +=== NodesIoStats + +[source,ts,subs=+macros] +---- +interface NodesIoStats { + pass:[/**] @property devices Array of disk metrics for each device that is backing an Elasticsearch data path. These disk metrics are probed periodically and averages between the last probe and the current probe are computed. */ + devices?: <<NodesIoStatDevice>>[] + pass:[/**] @property total The sum of the disk metrics for all devices that back an Elasticsearch data path. */ + total?: <<NodesIoStatDevice>> +} +---- + + +[discrete] +[[NodesJvm]] +=== NodesJvm + +[source,ts,subs=+macros] +---- +interface NodesJvm { + pass:[/**] @property buffer_pools Contains statistics about JVM buffer pools for the node. */ + buffer_pools?: Record<string, <<NodesNodeBufferPool>>> + pass:[/**] @property classes Contains statistics about classes loaded by JVM for the node. */ + classes?: <<NodesJvmClasses>> + pass:[/**] @property gc Contains statistics about JVM garbage collectors for the node. */ + gc?: <<NodesGarbageCollector>> + pass:[/**] @property mem Contains JVM memory usage statistics for the node. */ + mem?: <<NodesJvmMemoryStats>> + pass:[/**] @property threads Contains statistics about JVM thread usage for the node. */ + threads?: <<NodesJvmThreads>> + pass:[/**] @property timestamp Last time JVM statistics were refreshed. */ + timestamp?: <<long>> + pass:[/**] @property uptime Human-readable JVM uptime. Only returned if the `human` query parameter is `true`. */ + uptime?: string + pass:[/**] @property uptime_in_millis JVM uptime in milliseconds. */ + uptime_in_millis?: <<long>> +} +---- + + +[discrete] +[[NodesJvmClasses]] +=== NodesJvmClasses + +[source,ts,subs=+macros] +---- +interface NodesJvmClasses { + pass:[/**] @property current_loaded_count Number of classes currently loaded by JVM. */ + current_loaded_count?: <<long>> + pass:[/**] @property total_loaded_count Total number of classes loaded since the JVM started. */ + total_loaded_count?: <<long>> + pass:[/**] @property total_unloaded_count Total number of classes unloaded since the JVM started. */ + total_unloaded_count?: <<long>> +} +---- + + +[discrete] +[[NodesJvmMemoryStats]] +=== NodesJvmMemoryStats + +[source,ts,subs=+macros] +---- +interface NodesJvmMemoryStats { + pass:[/**] @property heap_used_in_bytes Memory, in bytes, currently in use by the heap. */ + heap_used_in_bytes?: <<long>> + pass:[/**] @property heap_used_percent <<Percentage>> of memory currently in use by the heap. */ + heap_used_percent?: <<long>> + pass:[/**] @property heap_committed_in_bytes Amount of memory, in bytes, available for use by the heap. */ + heap_committed_in_bytes?: <<long>> + pass:[/**] @property heap_max_in_bytes Maximum amount of memory, in bytes, available for use by the heap. */ + heap_max_in_bytes?: <<long>> + pass:[/**] @property non_heap_used_in_bytes Non-heap memory used, in bytes. */ + non_heap_used_in_bytes?: <<long>> + pass:[/**] @property non_heap_committed_in_bytes Amount of non-heap memory available, in bytes. */ + non_heap_committed_in_bytes?: <<long>> + pass:[/**] @property pools Contains statistics about heap memory usage for the node. */ + pools?: Record<string, <<NodesPool>>> +} +---- + + +[discrete] +[[NodesJvmThreads]] +=== NodesJvmThreads + +[source,ts,subs=+macros] +---- +interface NodesJvmThreads { + pass:[/**] @property count Number of active threads in use by JVM. */ + count?: <<long>> + pass:[/**] @property peak_count Highest number of threads used by JVM. */ + peak_count?: <<long>> +} +---- + + +[discrete] +[[NodesKeyedProcessor]] +=== NodesKeyedProcessor + +[source,ts,subs=+macros] +---- +interface NodesKeyedProcessor { + stats?: <<NodesProcessor>> + type?: string +} +---- + + +[discrete] +[[NodesMemoryStats]] +=== NodesMemoryStats + +[source,ts,subs=+macros] +---- +interface NodesMemoryStats { + pass:[/**] @property adjusted_total_in_bytes If the amount of physical memory has been overridden using the `es`.`total_memory_bytes` system property then this reports the overridden value in bytes. Otherwise it reports the same value as `total_in_bytes`. */ + adjusted_total_in_bytes?: <<long>> + resident?: string + resident_in_bytes?: <<long>> + share?: string + share_in_bytes?: <<long>> + total_virtual?: string + total_virtual_in_bytes?: <<long>> + pass:[/**] @property total_in_bytes Total amount of physical memory in bytes. */ + total_in_bytes?: <<long>> + pass:[/**] @property free_in_bytes Amount of free physical memory in bytes. */ + free_in_bytes?: <<long>> + pass:[/**] @property used_in_bytes Amount of used physical memory in bytes. */ + used_in_bytes?: <<long>> +} +---- + + +[discrete] +[[NodesNodeBufferPool]] +=== NodesNodeBufferPool + +[source,ts,subs=+macros] +---- +interface NodesNodeBufferPool { + pass:[/**] @property count Number of buffer pools. */ + count?: <<long>> + pass:[/**] @property total_capacity Total capacity of buffer pools. */ + total_capacity?: string + pass:[/**] @property total_capacity_in_bytes Total capacity of buffer pools in bytes. */ + total_capacity_in_bytes?: <<long>> + pass:[/**] @property used Size of buffer pools. */ + used?: string + pass:[/**] @property used_in_bytes Size of buffer pools in bytes. */ + used_in_bytes?: <<long>> +} +---- + + +[discrete] +[[NodesNodeReloadError]] +=== NodesNodeReloadError + +[source,ts,subs=+macros] +---- +interface NodesNodeReloadError { + name: <<Name>> + reload_exception?: <<ErrorCause>> +} +---- + + +[discrete] +[[NodesNodeReloadResult]] +=== NodesNodeReloadResult + +[source,ts,subs=+macros] +---- +type NodesNodeReloadResult = <<NodesStats>> | <<NodesNodeReloadError>> +---- + + +[discrete] +[[NodesNodesResponseBase]] +=== NodesNodesResponseBase + +[source,ts,subs=+macros] +---- +interface NodesNodesResponseBase { + pass:[/**] @property _nodes Contains statistics about the number of nodes selected by the request’s node filters. */ + _nodes?: <<NodeStatistics>> +} +---- + + +[discrete] +[[NodesOperatingSystem]] +=== NodesOperatingSystem + +[source,ts,subs=+macros] +---- +interface NodesOperatingSystem { + cpu?: <<NodesCpu>> + mem?: <<NodesExtendedMemoryStats>> + swap?: <<NodesMemoryStats>> + cgroup?: <<NodesCgroup>> + timestamp?: <<long>> +} +---- + + +[discrete] +[[NodesPool]] +=== NodesPool + +[source,ts,subs=+macros] +---- +interface NodesPool { + pass:[/**] @property used_in_bytes Memory, in bytes, used by the heap. */ + used_in_bytes?: <<long>> + pass:[/**] @property max_in_bytes Maximum amount of memory, in bytes, available for use by the heap. */ + max_in_bytes?: <<long>> + pass:[/**] @property peak_used_in_bytes Largest amount of memory, in bytes, historically used by the heap. */ + peak_used_in_bytes?: <<long>> + pass:[/**] @property peak_max_in_bytes Largest amount of memory, in bytes, historically used by the heap. */ + peak_max_in_bytes?: <<long>> +} +---- + + +[discrete] +[[NodesPressureMemory]] +=== NodesPressureMemory + +[source,ts,subs=+macros] +---- +interface NodesPressureMemory { + pass:[/**] @property all Memory consumed by indexing requests in the coordinating, primary, or replica stage. */ + all?: <<ByteSize>> + pass:[/**] @property all_in_bytes Memory consumed, in bytes, by indexing requests in the coordinating, primary, or replica stage. */ + all_in_bytes?: <<long>> + pass:[/**] @property combined_coordinating_and_primary Memory consumed by indexing requests in the coordinating or primary stage. This value is not the sum of coordinating and primary as a node can reuse the coordinating memory if the primary stage is executed locally. */ + combined_coordinating_and_primary?: <<ByteSize>> + pass:[/**] @property combined_coordinating_and_primary_in_bytes Memory consumed, in bytes, by indexing requests in the coordinating or primary stage. This value is not the sum of coordinating and primary as a node can reuse the coordinating memory if the primary stage is executed locally. */ + combined_coordinating_and_primary_in_bytes?: <<long>> + pass:[/**] @property coordinating Memory consumed by indexing requests in the coordinating stage. */ + coordinating?: <<ByteSize>> + pass:[/**] @property coordinating_in_bytes Memory consumed, in bytes, by indexing requests in the coordinating stage. */ + coordinating_in_bytes?: <<long>> + pass:[/**] @property primary Memory consumed by indexing requests in the primary stage. */ + primary?: <<ByteSize>> + pass:[/**] @property primary_in_bytes Memory consumed, in bytes, by indexing requests in the primary stage. */ + primary_in_bytes?: <<long>> + pass:[/**] @property replica Memory consumed by indexing requests in the replica stage. */ + replica?: <<ByteSize>> + pass:[/**] @property replica_in_bytes Memory consumed, in bytes, by indexing requests in the replica stage. */ + replica_in_bytes?: <<long>> + pass:[/**] @property coordinating_rejections Number of indexing requests rejected in the coordinating stage. */ + coordinating_rejections?: <<long>> + pass:[/**] @property primary_rejections Number of indexing requests rejected in the primary stage. */ + primary_rejections?: <<long>> + pass:[/**] @property replica_rejections Number of indexing requests rejected in the replica stage. */ + replica_rejections?: <<long>> +} +---- + + +[discrete] +[[NodesProcess]] +=== NodesProcess + +[source,ts,subs=+macros] +---- +interface NodesProcess { + pass:[/**] @property cpu Contains CPU statistics for the node. */ + cpu?: <<NodesCpu>> + pass:[/**] @property mem Contains virtual memory statistics for the node. */ + mem?: <<NodesMemoryStats>> + pass:[/**] @property open_file_descriptors Number of opened file descriptors associated with the current or `-1` if not supported. */ + open_file_descriptors?: <<integer>> + pass:[/**] @property max_file_descriptors Maximum number of file descriptors allowed on the system, or `-1` if not supported. */ + max_file_descriptors?: <<integer>> + pass:[/**] @property timestamp Last time the statistics were refreshed. Recorded in milliseconds since the Unix Epoch. */ + timestamp?: <<long>> +} +---- + + +[discrete] +[[NodesProcessor]] +=== NodesProcessor + +[source,ts,subs=+macros] +---- +interface NodesProcessor { + pass:[/**] @property count Number of documents transformed by the processor. */ + count?: <<long>> + pass:[/**] @property current Number of documents currently being transformed by the processor. */ + current?: <<long>> + pass:[/**] @property failed Number of failed operations for the processor. */ + failed?: <<long>> + pass:[/**] @property time_in_millis Time, in milliseconds, spent by the processor transforming documents. */ + time_in_millis?: <<DurationValue>><<<UnitMillis>>> +} +---- + + +[discrete] +[[NodesPublishedClusterStates]] +=== NodesPublishedClusterStates + +[source,ts,subs=+macros] +---- +interface NodesPublishedClusterStates { + pass:[/**] @property full_states Number of published cluster states. */ + full_states?: <<long>> + pass:[/**] @property incompatible_diffs Number of incompatible differences between published cluster states. */ + incompatible_diffs?: <<long>> + pass:[/**] @property compatible_diffs Number of compatible differences between published cluster states. */ + compatible_diffs?: <<long>> +} +---- + + +[discrete] +[[NodesRecording]] +=== NodesRecording + +[source,ts,subs=+macros] +---- +interface NodesRecording { + name?: string + cumulative_execution_count?: <<long>> + cumulative_execution_time?: <<Duration>> + cumulative_execution_time_millis?: <<DurationValue>><<<UnitMillis>>> +} +---- + + +[discrete] +[[NodesRepositoryLocation]] +=== NodesRepositoryLocation + +[source,ts,subs=+macros] +---- +interface NodesRepositoryLocation { + base_path: string + pass:[/**] @property container Container name (Azure) */ + container?: string + pass:[/**] @property bucket Bucket name (GCP, S3) */ + bucket?: string +} +---- + + +[discrete] +[[NodesRepositoryMeteringInformation]] +=== NodesRepositoryMeteringInformation + +[source,ts,subs=+macros] +---- +interface NodesRepositoryMeteringInformation { + pass:[/**] @property repository_name Repository name. */ + repository_name: <<Name>> + pass:[/**] @property repository_type Repository type. */ + repository_type: string + pass:[/**] @property repository_location Represents an unique location within the repository. */ + repository_location: <<NodesRepositoryLocation>> + pass:[/**] @property repository_ephemeral_id An identifier that changes every time the repository is updated. */ + repository_ephemeral_id: <<Id>> + pass:[/**] @property repository_started_at Time the repository was created or updated. Recorded in milliseconds since the Unix Epoch. */ + repository_started_at: <<EpochTime>><<<UnitMillis>>> + pass:[/**] @property repository_stopped_at Time the repository was deleted or updated. Recorded in milliseconds since the Unix Epoch. */ + repository_stopped_at?: <<EpochTime>><<<UnitMillis>>> + pass:[/**] @property archived A flag that tells whether or not this object has been archived. When a repository is closed or updated the repository metering information is archived and kept for a certain period of time. This allows retrieving the repository metering information of previous repository instantiations. */ + archived: boolean + pass:[/**] @property cluster_version The cluster state version when this object was archived, this field can be used as a logical timestamp to delete all the archived metrics up to an observed version. This field is only present for archived repository metering information objects. The main purpose of this field is to avoid possible race conditions during repository metering information deletions, i.e. deleting archived repositories metering information that we haven’t observed yet. */ + cluster_version?: <<VersionNumber>> + pass:[/**] @property request_counts An object with the number of request performed against the repository grouped by request type. */ + request_counts: <<NodesRequestCounts>> +} +---- + + +[discrete] +[[NodesRequestCounts]] +=== NodesRequestCounts + +[source,ts,subs=+macros] +---- +interface NodesRequestCounts { + pass:[/**] @property GetBlobProperties Number of Get Blob Properties requests (Azure) */ + GetBlobProperties?: <<long>> + pass:[/**] @property GetBlob Number of Get Blob requests (Azure) */ + GetBlob?: <<long>> + pass:[/**] @property ListBlobs Number of List Blobs requests (Azure) */ + ListBlobs?: <<long>> + pass:[/**] @property PutBlob Number of Put Blob requests (Azure) */ + PutBlob?: <<long>> + pass:[/**] @property PutBlock Number of Put Block (Azure) */ + PutBlock?: <<long>> + pass:[/**] @property PutBlockList Number of Put Block List requests */ + PutBlockList?: <<long>> + pass:[/**] @property GetObject Number of get object requests (GCP, S3) */ + GetObject?: <<long>> + pass:[/**] @property ListObjects Number of list objects requests (GCP, S3) */ + ListObjects?: <<long>> + pass:[/**] @property InsertObject Number of insert object requests, including simple, multipart and resumable uploads. Resumable uploads can perform multiple http requests to insert a single object but they are considered as a single request since they are billed as an individual operation. (GCP) */ + InsertObject?: <<long>> + pass:[/**] @property PutObject Number of PutObject requests (S3) */ + PutObject?: <<long>> + pass:[/**] @property PutMultipartObject Number of Multipart requests, including CreateMultipartUpload, UploadPart and CompleteMultipartUpload requests (S3) */ + PutMultipartObject?: <<long>> +} +---- + + +[discrete] +[[NodesScriptCache]] +=== NodesScriptCache + +[source,ts,subs=+macros] +---- +interface NodesScriptCache { + pass:[/**] @property cache_evictions Total number of times the script cache has evicted old data. */ + cache_evictions?: <<long>> + pass:[/**] @property compilation_limit_triggered Total number of times the script compilation circuit breaker has limited inline script compilations. */ + compilation_limit_triggered?: <<long>> + pass:[/**] @property compilations Total number of inline script compilations performed by the node. */ + compilations?: <<long>> + context?: string +} +---- + + +[discrete] +[[NodesScripting]] +=== NodesScripting + +[source,ts,subs=+macros] +---- +interface NodesScripting { + pass:[/**] @property cache_evictions Total number of times the script cache has evicted old data. */ + cache_evictions?: <<long>> + pass:[/**] @property compilations Total number of inline script compilations performed by the node. */ + compilations?: <<long>> + pass:[/**] @property compilations_history Contains this recent history of script compilations. */ + compilations_history?: Record<string, <<long>>> + pass:[/**] @property compilation_limit_triggered Total number of times the script compilation circuit breaker has limited inline script compilations. */ + compilation_limit_triggered?: <<long>> + contexts?: <<NodesContext>>[] +} +---- + + +[discrete] +[[NodesSerializedClusterState]] +=== NodesSerializedClusterState + +[source,ts,subs=+macros] +---- +interface NodesSerializedClusterState { + pass:[/**] @property full_states Number of published cluster states. */ + full_states?: <<NodesSerializedClusterStateDetail>> + diffs?: <<NodesSerializedClusterStateDetail>> +} +---- + + +[discrete] +[[NodesSerializedClusterStateDetail]] +=== NodesSerializedClusterStateDetail + +[source,ts,subs=+macros] +---- +interface NodesSerializedClusterStateDetail { + count?: <<long>> + uncompressed_size?: string + uncompressed_size_in_bytes?: <<long>> + compressed_size?: string + compressed_size_in_bytes?: <<long>> +} +---- + + +[discrete] +[[NodesSizeHttpHistogram]] +=== NodesSizeHttpHistogram + +[source,ts,subs=+macros] +---- +interface NodesSizeHttpHistogram { + count: <<long>> + ge_bytes?: <<long>> + lt_bytes?: <<long>> +} +---- + + +[discrete] +[[NodesStats]] +=== NodesStats + +[source,ts,subs=+macros] +---- +interface NodesStats { + pass:[/**] @property adaptive_selection Statistics about adaptive replica selection. */ + adaptive_selection?: Record<string, <<NodesAdaptiveSelection>>> + pass:[/**] @property breakers Statistics about the field data circuit breaker. */ + breakers?: Record<string, <<NodesBreaker>>> + pass:[/**] @property fs File system information, data path, free disk space, read/write stats. */ + fs?: <<NodesFileSystem>> + pass:[/**] @property host Network host for the node, based on the network host setting. */ + host?: <<Host>> + pass:[/**] @property http HTTP connection information. */ + http?: <<NodesHttp>> + pass:[/**] @property ingest Statistics about ingest preprocessing. */ + ingest?: <<NodesIngest>> + pass:[/**] @property ip IP address and port for the node. */ + ip?: <<Ip>> | <<Ip>>[] + pass:[/**] @property jvm JVM stats, memory pool information, garbage collection, buffer pools, number of loaded/unloaded classes. */ + jvm?: <<NodesJvm>> + pass:[/**] @property name Human-readable identifier for the node. Based on the node name setting. */ + name?: <<Name>> + pass:[/**] @property os Operating system stats, load average, mem, swap. */ + os?: <<NodesOperatingSystem>> + pass:[/**] @property process Process statistics, memory consumption, cpu usage, open file descriptors. */ + process?: <<NodesProcess>> + pass:[/**] @property roles Roles assigned to the node. */ + roles?: <<NodeRoles>> + pass:[/**] @property script Contains script statistics for the node. */ + script?: <<NodesScripting>> + script_cache?: Record<string, <<NodesScriptCache>> | <<NodesScriptCache>>[]> + pass:[/**] @property thread_pool Statistics about each thread pool, including current size, queue and rejected tasks. */ + thread_pool?: Record<string, <<NodesThreadCount>>> + timestamp?: <<long>> + pass:[/**] @property transport Transport statistics about sent and received bytes in cluster communication. */ + transport?: <<NodesTransport>> + pass:[/**] @property transport_address <<Host>> and port for the transport layer, used for internal communication between nodes in a cluster. */ + transport_address?: <<TransportAddress>> + pass:[/**] @property attributes Contains a list of attributes for the node. */ + attributes?: Record<<<Field>>, string> + pass:[/**] @property discovery Contains node discovery statistics for the node. */ + discovery?: <<NodesDiscovery>> + pass:[/**] @property indexing_pressure Contains indexing pressure statistics for the node. */ + indexing_pressure?: <<NodesIndexingPressure>> + pass:[/**] @property indices <<Indices>> stats about size, document count, indexing and deletion times, search times, field cache size, merges and flushes. */ + indices?: IndicesStatsShardStats +} +---- + + +[discrete] +[[NodesThreadCount]] +=== NodesThreadCount + +[source,ts,subs=+macros] +---- +interface NodesThreadCount { + pass:[/**] @property active Number of active threads in the thread pool. */ + active?: <<long>> + pass:[/**] @property completed Number of tasks completed by the thread pool executor. */ + completed?: <<long>> + pass:[/**] @property largest Highest number of active threads in the thread pool. */ + largest?: <<long>> + pass:[/**] @property queue Number of tasks in queue for the thread pool. */ + queue?: <<long>> + pass:[/**] @property rejected Number of tasks rejected by the thread pool executor. */ + rejected?: <<long>> + pass:[/**] @property threads Number of threads in the thread pool. */ + threads?: <<long>> +} +---- + + +[discrete] +[[NodesTimeHttpHistogram]] +=== NodesTimeHttpHistogram + +[source,ts,subs=+macros] +---- +interface NodesTimeHttpHistogram { + count: <<long>> + ge_millis?: <<long>> + lt_millis?: <<long>> +} +---- + + +[discrete] +[[NodesTransport]] +=== NodesTransport + +[source,ts,subs=+macros] +---- +interface NodesTransport { + pass:[/**] @property inbound_handling_time_histogram The distribution of the time spent handling each inbound message on a transport thread, represented as a histogram. */ + inbound_handling_time_histogram?: <<NodesTransportHistogram>>[] + pass:[/**] @property outbound_handling_time_histogram The distribution of the time spent sending each outbound transport message on a transport thread, represented as a histogram. */ + outbound_handling_time_histogram?: <<NodesTransportHistogram>>[] + pass:[/**] @property rx_count Total number of RX (receive) packets received by the node during internal cluster communication. */ + rx_count?: <<long>> + pass:[/**] @property rx_size Size of RX packets received by the node during internal cluster communication. */ + rx_size?: string + pass:[/**] @property rx_size_in_bytes Size, in bytes, of RX packets received by the node during internal cluster communication. */ + rx_size_in_bytes?: <<long>> + pass:[/**] @property server_open Current number of inbound TCP connections used for internal communication between nodes. */ + server_open?: <<integer>> + pass:[/**] @property tx_count Total number of TX (transmit) packets sent by the node during internal cluster communication. */ + tx_count?: <<long>> + pass:[/**] @property tx_size Size of TX packets sent by the node during internal cluster communication. */ + tx_size?: string + pass:[/**] @property tx_size_in_bytes Size, in bytes, of TX packets sent by the node during internal cluster communication. */ + tx_size_in_bytes?: <<long>> + pass:[/**] @property total_outbound_connections The cumulative number of outbound transport connections that this node has opened since it started. Each transport connection may comprise multiple TCP connections but is only counted once in this statistic. Transport connections are typically <<long>>-lived so this statistic should remain constant in a stable cluster. */ + total_outbound_connections?: <<long>> +} +---- + + +[discrete] +[[NodesTransportHistogram]] +=== NodesTransportHistogram + +[source,ts,subs=+macros] +---- +interface NodesTransportHistogram { + pass:[/**] @property count The number of times a transport thread took a period of time within the bounds of this bucket to handle an inbound message. */ + count?: <<long>> + pass:[/**] @property lt_millis The exclusive upper bound of the bucket in milliseconds. May be omitted on the last bucket if this bucket has no upper bound. */ + lt_millis?: <<long>> + pass:[/**] @property ge_millis The inclusive lower bound of the bucket in milliseconds. May be omitted on the first bucket if this bucket has no lower bound. */ + ge_millis?: <<long>> +} +---- + + diff --git a/docs/reference/shared-types/query-rules-types.asciidoc b/docs/reference/shared-types/query-rules-types.asciidoc new file mode 100644 index 000000000..0b91dd483 --- /dev/null +++ b/docs/reference/shared-types/query-rules-types.asciidoc @@ -0,0 +1,115 @@ +[[reference-shared-types-query-rules-types]] + +=== `QueryRules` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[QueryRulesQueryRule]] +=== QueryRulesQueryRule + +[source,ts,subs=+macros] +---- +interface QueryRulesQueryRule { + rule_id: <<Id>> + type: <<QueryRulesQueryRuleType>> + criteria: <<QueryRulesQueryRuleCriteria>> | <<QueryRulesQueryRuleCriteria>>[] + actions: <<QueryRulesQueryRuleActions>> + priority?: <<integer>> +} +---- + + +[discrete] +[[QueryRulesQueryRuleActions]] +=== QueryRulesQueryRuleActions + +[source,ts,subs=+macros] +---- +interface QueryRulesQueryRuleActions { + ids?: <<Id>>[] + docs?: <<QueryDslPinnedDoc>>[] +} +---- + + +[discrete] +[[QueryRulesQueryRuleCriteria]] +=== QueryRulesQueryRuleCriteria + +[source,ts,subs=+macros] +---- +interface QueryRulesQueryRuleCriteria { + type: <<QueryRulesQueryRuleCriteriaType>> + metadata?: string + values?: any[] +} +---- + + +[discrete] +[[QueryRulesQueryRuleCriteriaType]] +=== QueryRulesQueryRuleCriteriaType + +[source,ts,subs=+macros] +---- +type QueryRulesQueryRuleCriteriaType = 'global' | 'exact' | 'exact_fuzzy' | 'fuzzy' | 'prefix' | 'suffix' | 'contains' | 'lt' | 'lte' | 'gt' | 'gte' | 'always' +---- + + +[discrete] +[[QueryRulesQueryRuleType]] +=== QueryRulesQueryRuleType + +[source,ts,subs=+macros] +---- +type QueryRulesQueryRuleType = 'pinned' | 'exclude' +---- + + +[discrete] +[[QueryRulesQueryRuleset]] +=== QueryRulesQueryRuleset + +[source,ts,subs=+macros] +---- +interface QueryRulesQueryRuleset { + pass:[/**] @property ruleset_id Query Ruleset unique identifier */ + ruleset_id: <<Id>> + pass:[/**] @property rules Rules associated with the query ruleset */ + rules: <<QueryRulesQueryRule>>[] +} +---- + + diff --git a/docs/reference/shared-types/rollup-types.asciidoc b/docs/reference/shared-types/rollup-types.asciidoc new file mode 100644 index 000000000..639191ecc --- /dev/null +++ b/docs/reference/shared-types/rollup-types.asciidoc @@ -0,0 +1,130 @@ +[[reference-shared-types-rollup-types]] + +=== `Rollup` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[RollupDateHistogramGrouping]] +=== RollupDateHistogramGrouping + +[source,ts,subs=+macros] +---- +interface RollupDateHistogramGrouping { + pass:[/**] @property delay How <<long>> to wait before rolling up new documents. By default, the indexer attempts to roll up all data that is available. However, it is not uncommon for data to arrive out of order. The indexer is unable to deal with data that arrives after a time-span has been rolled up. You need to specify a delay that matches the longest period of time you expect out-of-order data to arrive. */ + delay?: <<Duration>> + pass:[/**] @property field The date field that is to be rolled up. */ + field: <<Field>> + format?: string + interval?: <<Duration>> + pass:[/**] @property calendar_interval The interval of time buckets to be generated when rolling up. */ + calendar_interval?: <<Duration>> + pass:[/**] @property fixed_interval The interval of time buckets to be generated when rolling up. */ + fixed_interval?: <<Duration>> + pass:[/**] @property time_zone Defines what `time_zone` the rollup documents are stored as. Unlike raw data, which can shift timezones on the fly, rolled documents have to be stored with a specific timezone. By default, rollup documents are stored in `UTC`. */ + time_zone?: <<TimeZone>> +} +---- + + +[discrete] +[[RollupFieldMetric]] +=== RollupFieldMetric + +[source,ts,subs=+macros] +---- +interface RollupFieldMetric { + pass:[/**] @property field The field to collect metrics for. This must be a numeric of some kind. */ + field: <<Field>> + pass:[/**] @property metrics An array of metrics to collect for the field. At least one metric must be configured. */ + metrics: <<RollupMetric>>[] +} +---- + + +[discrete] +[[RollupGroupings]] +=== RollupGroupings + +[source,ts,subs=+macros] +---- +interface RollupGroupings { + pass:[/**] @property date_histogram A date histogram group aggregates a date field into time-based buckets. This group is mandatory; you currently cannot roll up documents without a timestamp and a `date_histogram` group. */ + date_histogram?: <<RollupDateHistogramGrouping>> + pass:[/**] @property histogram The histogram group aggregates one or more numeric fields into numeric histogram intervals. */ + histogram?: <<RollupHistogramGrouping>> + pass:[/**] @property terms The terms group can be used on keyword or numeric fields to allow bucketing via the terms aggregation at a later point. The indexer enumerates and stores all values of a field for each time-period. This can be potentially costly for high-cardinality groups such as IP addresses, especially if the time-bucket is particularly sparse. */ + terms?: <<RollupTermsGrouping>> +} +---- + + +[discrete] +[[RollupHistogramGrouping]] +=== RollupHistogramGrouping + +[source,ts,subs=+macros] +---- +interface RollupHistogramGrouping { + pass:[/**] @property fields The set of fields that you wish to build histograms for. All fields specified must be some kind of numeric. Order does not matter. */ + fields: <<Fields>> + pass:[/**] @property interval The interval of histogram buckets to be generated when rolling up. For example, a value of `5` creates buckets that are five units wide (`0-5`, `5-10`, etc). Note that only one interval can be specified in the histogram group, meaning that all fields being grouped via the histogram must share the same interval. */ + interval: <<long>> +} +---- + + +[discrete] +[[RollupMetric]] +=== RollupMetric + +[source,ts,subs=+macros] +---- +type RollupMetric = 'min' | 'max' | 'sum' | 'avg' | 'value_count' +---- + + +[discrete] +[[RollupTermsGrouping]] +=== RollupTermsGrouping + +[source,ts,subs=+macros] +---- +interface RollupTermsGrouping { + pass:[/**] @property fields The set of fields that you wish to collect terms for. This array can contain fields that are both keyword and numerics. Order does not matter. */ + fields: <<Fields>> +} +---- + + diff --git a/docs/reference/shared-types/search-application-types.asciidoc b/docs/reference/shared-types/search-application-types.asciidoc new file mode 100644 index 000000000..f781f1c79 --- /dev/null +++ b/docs/reference/shared-types/search-application-types.asciidoc @@ -0,0 +1,107 @@ +[[reference-shared-types-search-application-types]] + +=== `SearchApplication` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[SearchApplicationAnalyticsCollection]] +=== SearchApplicationAnalyticsCollection + +[source,ts,subs=+macros] +---- +interface SearchApplicationAnalyticsCollection { + pass:[/**] @property event_data_stream Data stream for the collection. */ + event_data_stream: <<SearchApplicationEventDataStream>> +} +---- + + +[discrete] +[[SearchApplicationEventDataStream]] +=== SearchApplicationEventDataStream + +[source,ts,subs=+macros] +---- +interface SearchApplicationEventDataStream { + name: <<IndexName>> +} +---- + + +[discrete] +[[SearchApplicationSearchApplication]] +=== SearchApplicationSearchApplication + +[source,ts,subs=+macros] +---- +interface SearchApplicationSearchApplication extends <<SearchApplicationSearchApplicationParameters>> { + pass:[/**] @property name Search Application name */ + name: <<Name>> + pass:[/**] @property updated_at_millis Last time the Search Application was updated. */ + updated_at_millis: <<EpochTime>><<<UnitMillis>>> +} +---- + + +[discrete] +[[SearchApplicationSearchApplicationParameters]] +=== SearchApplicationSearchApplicationParameters + +[source,ts,subs=+macros] +---- +interface SearchApplicationSearchApplicationParameters { + pass:[/**] @property indices <<Indices>> that are part of the Search Application. */ + indices: <<IndexName>>[] + pass:[/**] @property analytics_collection_name Analytics collection associated to the Search Application. */ + analytics_collection_name?: <<Name>> + pass:[/**] @property template Search template to use on search operations. */ + template?: <<SearchApplicationSearchApplicationTemplate>> +} +---- + + +[discrete] +[[SearchApplicationSearchApplicationTemplate]] +=== SearchApplicationSearchApplicationTemplate + +[source,ts,subs=+macros] +---- +interface SearchApplicationSearchApplicationTemplate { + pass:[/**] @property script The associated mustache template. */ + script: <<Script>> | string +} +---- + + diff --git a/docs/reference/shared-types/searchable-snapshots-types.asciidoc b/docs/reference/shared-types/searchable-snapshots-types.asciidoc new file mode 100644 index 000000000..48639757c --- /dev/null +++ b/docs/reference/shared-types/searchable-snapshots-types.asciidoc @@ -0,0 +1,47 @@ +[[reference-shared-types-searchable-snapshots-types]] + +=== `SearchableSnapshots` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[SearchableSnapshotsStatsLevel]] +=== SearchableSnapshotsStatsLevel + +[source,ts,subs=+macros] +---- +type SearchableSnapshotsStatsLevel = 'cluster' | 'indices' | 'shards' +---- + + diff --git a/docs/reference/shared-types/security-types.asciidoc b/docs/reference/shared-types/security-types.asciidoc new file mode 100644 index 000000000..21648a2e9 --- /dev/null +++ b/docs/reference/shared-types/security-types.asciidoc @@ -0,0 +1,674 @@ +[[reference-shared-types-security-types]] + +=== `Security` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[SecurityAccess]] +=== SecurityAccess + +[source,ts,subs=+macros] +---- +interface SecurityAccess { + pass:[/**] @property replication A list of indices permission entries for cross-cluster replication. */ + replication?: <<SecurityReplicationAccess>>[] + pass:[/**] @property search A list of indices permission entries for cross-cluster search. */ + search?: <<SecuritySearchAccess>>[] +} +---- + + +[discrete] +[[SecurityApiKey]] +=== SecurityApiKey + +[source,ts,subs=+macros] +---- +interface SecurityApiKey { + pass:[/**] @property id <<Id>> for the API key */ + id: <<Id>> + pass:[/**] @property name <<Name>> of the API key. */ + name: <<Name>> + pass:[/**] @property type The type of the API key (e.g. `rest` or `cross_cluster`). */ + type: <<SecurityApiKeyType>> + pass:[/**] @property creation Creation time for the API key in milliseconds. */ + creation: <<EpochTime>><<<UnitMillis>>> + pass:[/**] @property expiration Expiration time for the API key in milliseconds. */ + expiration?: <<EpochTime>><<<UnitMillis>>> + pass:[/**] @property invalidated Invalidation status for the API key. If the key has been invalidated, it has a value of `true`. Otherwise, it is `false`. */ + invalidated: boolean + pass:[/**] @property invalidation If the key has been invalidated, invalidation time in milliseconds. */ + invalidation?: <<EpochTime>><<<UnitMillis>>> + pass:[/**] @property username Principal for which this API key was created */ + username: <<Username>> + pass:[/**] @property realm Realm name of the principal for which this API key was created. */ + realm: string + pass:[/**] @property realm_type Realm type of the principal for which this API key was created */ + realm_type?: string + pass:[/**] @property metadata <<Metadata>> of the API key */ + metadata: <<Metadata>> + pass:[/**] @property role_descriptors The role descriptors assigned to this API key when it was created or last updated. An empty role descriptor means the API key inherits the owner user’s permissions. */ + role_descriptors?: Record<string, <<SecurityRoleDescriptor>>> + pass:[/**] @property limited_by The owner user’s permissions associated with the API key. It is a point-in-time snapshot captured at creation and subsequent updates. An API key’s effective permissions are an intersection of its assigned privileges and the owner user’s permissions. */ + limited_by?: Record<string, <<SecurityRoleDescriptor>>>[] + pass:[/**] @property access The access granted to cross-cluster API keys. The access is composed of permissions for cross cluster search and cross cluster replication. At least one of them must be specified. When specified, the new access assignment fully replaces the previously assigned access. */ + access?: <<SecurityAccess>> + pass:[/**] @property profile_uid The profile uid for the API key owner principal, if requested and if it exists */ + profile_uid?: string + pass:[/**] @property _sort Sorting values when using the `sort` parameter with the `security.query_api_keys` API. */ + _sort?: <<SortResults>> +} +---- + + +[discrete] +[[SecurityApiKeyType]] +=== SecurityApiKeyType + +[source,ts,subs=+macros] +---- +type SecurityApiKeyType = 'rest' | 'cross_cluster' +---- + + +[discrete] +[[SecurityApplicationGlobalUserPrivileges]] +=== SecurityApplicationGlobalUserPrivileges + +[source,ts,subs=+macros] +---- +interface SecurityApplicationGlobalUserPrivileges { + manage: <<SecurityManageUserPrivileges>> +} +---- + + +[discrete] +[[SecurityApplicationPrivileges]] +=== SecurityApplicationPrivileges + +[source,ts,subs=+macros] +---- +interface SecurityApplicationPrivileges { + pass:[/**] @property application The name of the application to which this entry applies. */ + application: string + pass:[/**] @property privileges A list of strings, where each element is the name of an application privilege or action. */ + privileges: string[] + pass:[/**] @property resources A list resources to which the privileges are applied. */ + resources: string[] +} +---- + + +[discrete] +[[SecurityBulkError]] +=== SecurityBulkError + +[source,ts,subs=+macros] +---- +interface SecurityBulkError { + pass:[/**] @property count The number of errors */ + count: <<integer>> + pass:[/**] @property details Details about the errors, keyed by role name */ + details: Record<string, <<ErrorCause>>> +} +---- + + +[discrete] +[[SecurityClusterNode]] +=== SecurityClusterNode + +[source,ts,subs=+macros] +---- +interface SecurityClusterNode { + name: <<Name>> +} +---- + + +[discrete] +[[SecurityClusterPrivilege]] +=== SecurityClusterPrivilege + +[source,ts,subs=+macros] +---- +type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'cross_cluster_replication' | 'cross_cluster_search' | 'delegate_pki' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_autoscaling' | 'manage_behavioral_analytics' | 'manage_ccr' | 'manage_data_frame_transforms' | 'manage_data_stream_global_retention' | 'manage_enrich' | 'manage_ilm' | 'manage_index_templates' | 'manage_inference' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_search_application' | 'manage_search_query_rules' | 'manage_search_synonyms' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_data_frame_transforms' | 'monitor_data_stream_global_retention' | 'monitor_enrich' | 'monitor_inference' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_stats' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'none' | 'post_behavioral_analytics_event' | 'read_ccr' | 'read_fleet_secrets' | 'read_ilm' | 'read_pipeline' | 'read_security' | 'read_slm' | 'transport_client' | 'write_connector_secrets' | 'write_fleet_secrets' | string +---- + + +[discrete] +[[SecurityCreatedStatus]] +=== SecurityCreatedStatus + +[source,ts,subs=+macros] +---- +interface SecurityCreatedStatus { + created: boolean +} +---- + + +[discrete] +[[SecurityFieldRule]] +=== SecurityFieldRule + +[source,ts,subs=+macros] +---- +interface SecurityFieldRule { + username?: <<Names>> + dn?: <<Names>> + groups?: <<Names>> +} +---- + + +[discrete] +[[SecurityFieldSecurity]] +=== SecurityFieldSecurity + +[source,ts,subs=+macros] +---- +interface SecurityFieldSecurity { + except?: <<Fields>> + grant?: <<Fields>> +} +---- + + +[discrete] +[[SecurityGlobalPrivilege]] +=== SecurityGlobalPrivilege + +[source,ts,subs=+macros] +---- +interface SecurityGlobalPrivilege { + application: <<SecurityApplicationGlobalUserPrivileges>> +} +---- + + +[discrete] +[[SecurityGrantType]] +=== SecurityGrantType + +[source,ts,subs=+macros] +---- +type SecurityGrantType = 'password' | 'access_token' +---- + + +[discrete] +[[SecurityIndexPrivilege]] +=== SecurityIndexPrivilege + +[source,ts,subs=+macros] +---- +type SecurityIndexPrivilege = 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'cross_cluster_replication' | 'cross_cluster_replication_internal' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_data_stream_lifecycle' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'none' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write' | string +---- + + +[discrete] +[[SecurityIndicesPrivileges]] +=== SecurityIndicesPrivileges + +[source,ts,subs=+macros] +---- +interface SecurityIndicesPrivileges { + pass:[/**] @property field_security The document fields that the owners of the role have read access to. */ + field_security?: <<SecurityFieldSecurity>> + pass:[/**] @property names A list of indices (or index name patterns) to which the permissions in this entry apply. */ + names: <<IndexName>> | <<IndexName>>[] + pass:[/**] @property privileges The index level privileges that owners of the role have on the specified indices. */ + privileges: <<SecurityIndexPrivilege>>[] + pass:[/**] @property query A search query that defines the documents the owners of the role have access to. A document within the specified indices must match this query for it to be accessible by the owners of the role. */ + query?: <<SecurityIndicesPrivilegesQuery>> + pass:[/**] @property allow_restricted_indices Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. */ + allow_restricted_indices?: boolean +} +---- + + +[discrete] +[[SecurityIndicesPrivilegesQuery]] +=== SecurityIndicesPrivilegesQuery + +[source,ts,subs=+macros] +---- +type SecurityIndicesPrivilegesQuery = string | <<QueryDslQueryContainer>> | <<SecurityRoleTemplateQuery>> +---- + + +[discrete] +[[SecurityManageUserPrivileges]] +=== SecurityManageUserPrivileges + +[source,ts,subs=+macros] +---- +interface SecurityManageUserPrivileges { + applications: string[] +} +---- + + +[discrete] +[[SecurityRealmInfo]] +=== SecurityRealmInfo + +[source,ts,subs=+macros] +---- +interface SecurityRealmInfo { + name: <<Name>> + type: string +} +---- + + +[discrete] +[[SecurityRemoteClusterPrivilege]] +=== SecurityRemoteClusterPrivilege + +[source,ts,subs=+macros] +---- +type SecurityRemoteClusterPrivilege = 'monitor_enrich' | 'monitor_stats' +---- + + +[discrete] +[[SecurityRemoteClusterPrivileges]] +=== SecurityRemoteClusterPrivileges + +[source,ts,subs=+macros] +---- +interface SecurityRemoteClusterPrivileges { + pass:[/**] @property clusters A list of cluster aliases to which the permissions in this entry apply. */ + clusters: <<Names>> + pass:[/**] @property privileges The cluster level privileges that owners of the role have on the remote cluster. */ + privileges: <<SecurityRemoteClusterPrivilege>>[] +} +---- + + +[discrete] +[[SecurityRemoteIndicesPrivileges]] +=== SecurityRemoteIndicesPrivileges + +[source,ts,subs=+macros] +---- +interface SecurityRemoteIndicesPrivileges { + pass:[/**] @property clusters A list of cluster aliases to which the permissions in this entry apply. */ + clusters: <<Names>> + pass:[/**] @property field_security The document fields that the owners of the role have read access to. */ + field_security?: <<SecurityFieldSecurity>> + pass:[/**] @property names A list of indices (or index name patterns) to which the permissions in this entry apply. */ + names: <<IndexName>> | <<IndexName>>[] + pass:[/**] @property privileges The index level privileges that owners of the role have on the specified indices. */ + privileges: <<SecurityIndexPrivilege>>[] + pass:[/**] @property query A search query that defines the documents the owners of the role have access to. A document within the specified indices must match this query for it to be accessible by the owners of the role. */ + query?: <<SecurityIndicesPrivilegesQuery>> + pass:[/**] @property allow_restricted_indices Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. */ + allow_restricted_indices?: boolean +} +---- + + +[discrete] +[[SecurityReplicationAccess]] +=== SecurityReplicationAccess + +[source,ts,subs=+macros] +---- +interface SecurityReplicationAccess { + pass:[/**] @property names A list of indices (or index name patterns) to which the permissions in this entry apply. */ + names: <<IndexName>> | <<IndexName>>[] + pass:[/**] @property allow_restricted_indices This needs to be set to true if the patterns in the names field should cover system indices. */ + allow_restricted_indices?: boolean +} +---- + + +[discrete] +[[SecurityRestriction]] +=== SecurityRestriction + +[source,ts,subs=+macros] +---- +interface SecurityRestriction { + workflows: <<SecurityRestrictionWorkflow>>[] +} +---- + + +[discrete] +[[SecurityRestrictionWorkflow]] +=== SecurityRestrictionWorkflow + +[source,ts,subs=+macros] +---- +type SecurityRestrictionWorkflow = 'search_application_query' | string +---- + + +[discrete] +[[SecurityRoleDescriptor]] +=== SecurityRoleDescriptor + +[source,ts,subs=+macros] +---- +interface SecurityRoleDescriptor { + pass:[/**] @property cluster A list of cluster privileges. These privileges define the cluster level actions that API keys are able to execute. */ + cluster?: <<SecurityClusterPrivilege>>[] + pass:[/**] @property indices A list of indices permissions entries. */ + indices?: <<SecurityIndicesPrivileges>>[] + pass:[/**] @property index A list of indices permissions entries. */ + index?: <<SecurityIndicesPrivileges>>[] + pass:[/**] @property remote_indices A list of indices permissions for remote clusters. */ + remote_indices?: <<SecurityRemoteIndicesPrivileges>>[] + pass:[/**] @property remote_cluster A list of cluster permissions for remote clusters. Note - this is limited a subset of the cluster permissions. */ + remote_cluster?: <<SecurityRemoteClusterPrivileges>>[] + pass:[/**] @property global An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. */ + global?: <<SecurityGlobalPrivilege>>[] | <<SecurityGlobalPrivilege>> + pass:[/**] @property applications A list of application privilege entries */ + applications?: <<SecurityApplicationPrivileges>>[] + pass:[/**] @property metadata Optional meta-data. Within the metadata object, keys that begin with `_` are reserved for system usage. */ + metadata?: <<Metadata>> + pass:[/**] @property run_as A list of users that the API keys can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. */ + run_as?: string[] + pass:[/**] @property description Optional description of the role descriptor */ + description?: string + pass:[/**] @property restriction Restriction for when the role descriptor is allowed to be effective. */ + restriction?: <<SecurityRestriction>> + transient_metadata?: Record<string, any> +} +---- + + +[discrete] +[[SecurityRoleDescriptorRead]] +=== SecurityRoleDescriptorRead + +[source,ts,subs=+macros] +---- +interface SecurityRoleDescriptorRead { + pass:[/**] @property cluster A list of cluster privileges. These privileges define the cluster level actions that API keys are able to execute. */ + cluster: <<SecurityClusterPrivilege>>[] + pass:[/**] @property indices A list of indices permissions entries. */ + indices: <<SecurityIndicesPrivileges>>[] + pass:[/**] @property index A list of indices permissions entries. */ + index: <<SecurityIndicesPrivileges>>[] + pass:[/**] @property remote_indices A list of indices permissions for remote clusters. */ + remote_indices?: <<SecurityRemoteIndicesPrivileges>>[] + pass:[/**] @property remote_cluster A list of cluster permissions for remote clusters. Note - this is limited a subset of the cluster permissions. */ + remote_cluster?: <<SecurityRemoteClusterPrivileges>>[] + pass:[/**] @property global An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. */ + global?: <<SecurityGlobalPrivilege>>[] | <<SecurityGlobalPrivilege>> + pass:[/**] @property applications A list of application privilege entries */ + applications?: <<SecurityApplicationPrivileges>>[] + pass:[/**] @property metadata Optional meta-data. Within the metadata object, keys that begin with `_` are reserved for system usage. */ + metadata?: <<Metadata>> + pass:[/**] @property run_as A list of users that the API keys can impersonate. */ + run_as?: string[] + pass:[/**] @property description Optional description of the role descriptor */ + description?: string + pass:[/**] @property restriction Restriction for when the role descriptor is allowed to be effective. */ + restriction?: <<SecurityRestriction>> + transient_metadata?: Record<string, any> +} +---- + + +[discrete] +[[SecurityRoleMapping]] +=== SecurityRoleMapping + +[source,ts,subs=+macros] +---- +interface SecurityRoleMapping { + enabled: boolean + metadata: <<Metadata>> + roles?: string[] + role_templates?: <<SecurityRoleTemplate>>[] + rules: <<SecurityRoleMappingRule>> +} +---- + + +[discrete] +[[SecurityRoleMappingRule]] +=== SecurityRoleMappingRule + +[source,ts,subs=+macros] +---- +interface SecurityRoleMappingRule { + any?: <<SecurityRoleMappingRule>>[] + all?: <<SecurityRoleMappingRule>>[] + field?: <<SecurityFieldRule>> + except?: <<SecurityRoleMappingRule>> +} +---- + + +[discrete] +[[SecurityRoleTemplate]] +=== SecurityRoleTemplate + +[source,ts,subs=+macros] +---- +interface SecurityRoleTemplate { + format?: <<SecurityTemplateFormat>> + template: <<Script>> | string +} +---- + + +[discrete] +[[SecurityRoleTemplateInlineQuery]] +=== SecurityRoleTemplateInlineQuery + +[source,ts,subs=+macros] +---- +type SecurityRoleTemplateInlineQuery = string | <<QueryDslQueryContainer>> +---- + + +[discrete] +[[SecurityRoleTemplateQuery]] +=== SecurityRoleTemplateQuery + +[source,ts,subs=+macros] +---- +interface SecurityRoleTemplateQuery { + pass:[/**] @property template When you create a role, you can specify a query that defines the document level security permissions. You can optionally use Mustache templates in the role query to insert the username of the current authenticated user into the role. Like other places in Elasticsearch that support templating or scripting, you can specify inline, stored, or file-based templates and define custom parameters. You access the details for the current authenticated user through the _user parameter. */ + template?: <<SecurityRoleTemplateScript>> | <<SecurityRoleTemplateInlineQuery>> +} +---- + + +[discrete] +[[SecurityRoleTemplateScript]] +=== SecurityRoleTemplateScript + +[source,ts,subs=+macros] +---- +interface SecurityRoleTemplateScript { + source?: <<SecurityRoleTemplateInlineQuery>> + pass:[/**] @property id The `id` for a stored script. */ + id?: <<Id>> + pass:[/**] @property params Specifies any named parameters that are passed into the script as variables. Use parameters instead of hard-coded values to decrease compile time. */ + params?: Record<string, any> + pass:[/**] @property lang Specifies the language the script is written in. */ + lang?: <<ScriptLanguage>> + options?: Record<string, string> +} +---- + + +[discrete] +[[SecuritySearchAccess]] +=== SecuritySearchAccess + +[source,ts,subs=+macros] +---- +interface SecuritySearchAccess { + pass:[/**] @property field_security The document fields that the owners of the role have read access to. */ + field_security?: <<SecurityFieldSecurity>> + pass:[/**] @property names A list of indices (or index name patterns) to which the permissions in this entry apply. */ + names: <<IndexName>> | <<IndexName>>[] + pass:[/**] @property query A search query that defines the documents the owners of the role have access to. A document within the specified indices must match this query for it to be accessible by the owners of the role. */ + query?: <<SecurityIndicesPrivilegesQuery>> + pass:[/**] @property allow_restricted_indices Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. */ + allow_restricted_indices?: boolean +} +---- + + +[discrete] +[[SecurityTemplateFormat]] +=== SecurityTemplateFormat + +[source,ts,subs=+macros] +---- +type SecurityTemplateFormat = 'string' | 'json' +---- + + +[discrete] +[[SecurityUser]] +=== SecurityUser + +[source,ts,subs=+macros] +---- +interface SecurityUser { + email?: string | null + full_name?: <<Name>> | null + metadata: <<Metadata>> + roles: string[] + username: <<Username>> + enabled: boolean + profile_uid?: <<SecurityUserProfileId>> +} +---- + + +[discrete] +[[SecurityUserIndicesPrivileges]] +=== SecurityUserIndicesPrivileges + +[source,ts,subs=+macros] +---- +interface SecurityUserIndicesPrivileges { + pass:[/**] @property field_security The document fields that the owners of the role have read access to. */ + field_security?: <<SecurityFieldSecurity>>[] + pass:[/**] @property names A list of indices (or index name patterns) to which the permissions in this entry apply. */ + names: <<IndexName>> | <<IndexName>>[] + pass:[/**] @property privileges The index level privileges that owners of the role have on the specified indices. */ + privileges: <<SecurityIndexPrivilege>>[] + pass:[/**] @property query Search queries that define the documents the user has access to. A document within the specified indices must match these queries for it to be accessible by the owners of the role. */ + query?: <<SecurityIndicesPrivilegesQuery>>[] + pass:[/**] @property allow_restricted_indices Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. */ + allow_restricted_indices: boolean +} +---- + + +[discrete] +[[SecurityUserProfile]] +=== SecurityUserProfile + +[source,ts,subs=+macros] +---- +interface SecurityUserProfile { + uid: <<SecurityUserProfileId>> + user: <<SecurityUserProfileUser>> + data: Record<string, any> + labels: Record<string, any> + enabled?: boolean +} +---- + + +[discrete] +[[SecurityUserProfileHitMetadata]] +=== SecurityUserProfileHitMetadata + +[source,ts,subs=+macros] +---- +interface SecurityUserProfileHitMetadata { + _primary_term: <<long>> + _seq_no: <<SequenceNumber>> +} +---- + + +[discrete] +[[SecurityUserProfileId]] +=== SecurityUserProfileId + +[source,ts,subs=+macros] +---- +type SecurityUserProfileId = string +---- + + +[discrete] +[[SecurityUserProfileUser]] +=== SecurityUserProfileUser + +[source,ts,subs=+macros] +---- +interface SecurityUserProfileUser { + email?: string | null + full_name?: <<Name>> | null + realm_name: <<Name>> + realm_domain?: <<Name>> + roles: string[] + username: <<Username>> +} +---- + + +[discrete] +[[SecurityUserProfileWithMetadata]] +=== SecurityUserProfileWithMetadata + +[source,ts,subs=+macros] +---- +interface SecurityUserProfileWithMetadata extends <<SecurityUserProfile>> { + last_synchronized: <<long>> + _doc: <<SecurityUserProfileHitMetadata>> +} +---- + + diff --git a/docs/reference/shared-types/shutdown-types.asciidoc b/docs/reference/shared-types/shutdown-types.asciidoc new file mode 100644 index 000000000..dd2ecb0ac --- /dev/null +++ b/docs/reference/shared-types/shutdown-types.asciidoc @@ -0,0 +1,47 @@ +[[reference-shared-types-shutdown-types]] + +=== `Shutdown` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[ShutdownType]] +=== ShutdownType + +[source,ts,subs=+macros] +---- +type ShutdownType = 'restart' | 'remove' | 'replace' +---- + + diff --git a/docs/reference/shared-types/slm-types.asciidoc b/docs/reference/shared-types/slm-types.asciidoc new file mode 100644 index 000000000..cbb0b291f --- /dev/null +++ b/docs/reference/shared-types/slm-types.asciidoc @@ -0,0 +1,167 @@ +[[reference-shared-types-slm-types]] + +=== `Slm` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[SlmConfiguration]] +=== SlmConfiguration + +[source,ts,subs=+macros] +---- +interface SlmConfiguration { + pass:[/**] @property ignore_unavailable If false, the snapshot fails if any data stream or index in indices is missing or closed. If true, the snapshot ignores missing or closed data streams and indices. */ + ignore_unavailable?: boolean + pass:[/**] @property indices A comma-separated list of data streams and indices to include in the snapshot. Multi-index syntax is supported. By default, a snapshot includes all data streams and indices in the cluster. If this argument is provided, the snapshot only includes the specified data streams and clusters. */ + indices?: <<Indices>> + pass:[/**] @property include_global_state If true, the current global state is included in the snapshot. */ + include_global_state?: boolean + pass:[/**] @property feature_states A list of feature states to be included in this snapshot. A list of features available for inclusion in the snapshot and their descriptions be can be retrieved using the get features API. Each feature state includes one or more system indices containing data necessary for the function of that feature. Providing an empty array will include no feature states in the snapshot, regardless of the value of include_global_state. By default, all available feature states will be included in the snapshot if include_global_state is true, or no feature states if include_global_state is false. */ + feature_states?: string[] + pass:[/**] @property metadata Attaches arbitrary metadata to the snapshot, such as a record of who took the snapshot, why it was taken, or any other useful data. <<Metadata>> must be less than 1024 bytes. */ + metadata?: <<Metadata>> + pass:[/**] @property partial If false, the entire snapshot will fail if one or more indices included in the snapshot do not have all primary shards available. */ + partial?: boolean +} +---- + + +[discrete] +[[SlmInProgress]] +=== SlmInProgress + +[source,ts,subs=+macros] +---- +interface SlmInProgress { + name: <<Name>> + start_time_millis: <<EpochTime>><<<UnitMillis>>> + state: string + uuid: <<Uuid>> +} +---- + + +[discrete] +[[SlmInvocation]] +=== SlmInvocation + +[source,ts,subs=+macros] +---- +interface SlmInvocation { + snapshot_name: <<Name>> + time: <<DateTime>> +} +---- + + +[discrete] +[[SlmPolicy]] +=== SlmPolicy + +[source,ts,subs=+macros] +---- +interface SlmPolicy { + config?: <<SlmConfiguration>> + name: <<Name>> + repository: string + retention?: <<SlmRetention>> + schedule: <<WatcherCronExpression>> +} +---- + + +[discrete] +[[SlmRetention]] +=== SlmRetention + +[source,ts,subs=+macros] +---- +interface SlmRetention { + pass:[/**] @property expire_after Time period after which a snapshot is considered expired and eligible for deletion. SLM deletes expired snapshots based on the slm.retention_schedule. */ + expire_after: <<Duration>> + pass:[/**] @property max_count Maximum number of snapshots to retain, even if the snapshots have not yet expired. If the number of snapshots in the repository exceeds this limit, the policy retains the most recent snapshots and deletes older snapshots. */ + max_count: <<integer>> + pass:[/**] @property min_count Minimum number of snapshots to retain, even if the snapshots have expired. */ + min_count: <<integer>> +} +---- + + +[discrete] +[[SlmSnapshotLifecycle]] +=== SlmSnapshotLifecycle + +[source,ts,subs=+macros] +---- +interface SlmSnapshotLifecycle { + in_progress?: <<SlmInProgress>> + last_failure?: <<SlmInvocation>> + last_success?: <<SlmInvocation>> + modified_date?: <<DateTime>> + modified_date_millis: <<EpochTime>><<<UnitMillis>>> + next_execution?: <<DateTime>> + next_execution_millis: <<EpochTime>><<<UnitMillis>>> + policy: <<SlmPolicy>> + version: <<VersionNumber>> + stats: <<SlmStatistics>> +} +---- + + +[discrete] +[[SlmStatistics]] +=== SlmStatistics + +[source,ts,subs=+macros] +---- +interface SlmStatistics { + retention_deletion_time?: <<Duration>> + retention_deletion_time_millis?: <<DurationValue>><<<UnitMillis>>> + retention_failed?: <<long>> + retention_runs?: <<long>> + retention_timed_out?: <<long>> + policy?: <<Id>> + total_snapshots_deleted?: <<long>> + snapshots_deleted?: <<long>> + total_snapshot_deletion_failures?: <<long>> + snapshot_deletion_failures?: <<long>> + total_snapshots_failed?: <<long>> + snapshots_failed?: <<long>> + total_snapshots_taken?: <<long>> + snapshots_taken?: <<long>> +} +---- + + diff --git a/docs/reference/shared-types/snapshot-types.asciidoc b/docs/reference/shared-types/snapshot-types.asciidoc new file mode 100644 index 000000000..bfc5164ef --- /dev/null +++ b/docs/reference/shared-types/snapshot-types.asciidoc @@ -0,0 +1,465 @@ +[[reference-shared-types-snapshot-types]] + +=== `Snapshot` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[SnapshotAzureRepository]] +=== SnapshotAzureRepository + +[source,ts,subs=+macros] +---- +interface SnapshotAzureRepository extends <<SnapshotRepositoryBase>> { + type: 'azure' + settings: <<SnapshotAzureRepositorySettings>> +} +---- + + +[discrete] +[[SnapshotAzureRepositorySettings]] +=== SnapshotAzureRepositorySettings + +[source,ts,subs=+macros] +---- +interface SnapshotAzureRepositorySettings extends <<SnapshotRepositorySettingsBase>> { + client?: string + container?: string + base_path?: string + readonly?: boolean + location_mode?: string +} +---- + + +[discrete] +[[SnapshotFileCountSnapshotStats]] +=== SnapshotFileCountSnapshotStats + +[source,ts,subs=+macros] +---- +interface SnapshotFileCountSnapshotStats { + file_count: <<integer>> + size_in_bytes: <<long>> +} +---- + + +[discrete] +[[SnapshotGcsRepository]] +=== SnapshotGcsRepository + +[source,ts,subs=+macros] +---- +interface SnapshotGcsRepository extends <<SnapshotRepositoryBase>> { + type: 'gcs' + settings: <<SnapshotGcsRepositorySettings>> +} +---- + + +[discrete] +[[SnapshotGcsRepositorySettings]] +=== SnapshotGcsRepositorySettings + +[source,ts,subs=+macros] +---- +interface SnapshotGcsRepositorySettings extends <<SnapshotRepositorySettingsBase>> { + bucket: string + client?: string + base_path?: string + readonly?: boolean + application_name?: string +} +---- + + +[discrete] +[[SnapshotIndexDetails]] +=== SnapshotIndexDetails + +[source,ts,subs=+macros] +---- +interface SnapshotIndexDetails { + shard_count: <<integer>> + size?: <<ByteSize>> + size_in_bytes: <<long>> + max_segments_per_shard: <<long>> +} +---- + + +[discrete] +[[SnapshotInfoFeatureState]] +=== SnapshotInfoFeatureState + +[source,ts,subs=+macros] +---- +interface SnapshotInfoFeatureState { + feature_name: string + indices: <<Indices>> +} +---- + + +[discrete] +[[SnapshotReadOnlyUrlRepository]] +=== SnapshotReadOnlyUrlRepository + +[source,ts,subs=+macros] +---- +interface SnapshotReadOnlyUrlRepository extends <<SnapshotRepositoryBase>> { + type: 'url' + settings: <<SnapshotReadOnlyUrlRepositorySettings>> +} +---- + + +[discrete] +[[SnapshotReadOnlyUrlRepositorySettings]] +=== SnapshotReadOnlyUrlRepositorySettings + +[source,ts,subs=+macros] +---- +interface SnapshotReadOnlyUrlRepositorySettings extends <<SnapshotRepositorySettingsBase>> { + http_max_retries?: <<integer>> + http_socket_timeout?: <<Duration>> + max_number_of_snapshots?: <<integer>> + url: string +} +---- + + +[discrete] +[[SnapshotRepository]] +=== SnapshotRepository + +[source,ts,subs=+macros] +---- +type SnapshotRepository = <<SnapshotAzureRepository>> | <<SnapshotGcsRepository>> | SnapshotS3Repository | <<SnapshotSharedFileSystemRepository>> | <<SnapshotReadOnlyUrlRepository>> | <<SnapshotSourceOnlyRepository>> +---- + + +[discrete] +[[SnapshotRepositoryBase]] +=== SnapshotRepositoryBase + +[source,ts,subs=+macros] +---- +interface SnapshotRepositoryBase { + uuid?: <<Uuid>> +} +---- + + +[discrete] +[[SnapshotRepositorySettingsBase]] +=== SnapshotRepositorySettingsBase + +[source,ts,subs=+macros] +---- +interface SnapshotRepositorySettingsBase { + chunk_size?: <<ByteSize>> + compress?: boolean + max_restore_bytes_per_sec?: <<ByteSize>> + max_snapshot_bytes_per_sec?: <<ByteSize>> +} +---- + + +[discrete] +[[SnapshotS3Repository]] +=== SnapshotS3Repository + +[source,ts,subs=+macros] +---- +interface SnapshotS3Repository extends <<SnapshotRepositoryBase>> { + type: 's3' + settings: SnapshotS3RepositorySettings +} +---- + + +[discrete] +[[SnapshotS3RepositorySettings]] +=== SnapshotS3RepositorySettings + +[source,ts,subs=+macros] +---- +interface SnapshotS3RepositorySettings extends <<SnapshotRepositorySettingsBase>> { + bucket: string + client?: string + base_path?: string + readonly?: boolean + server_side_encryption?: boolean + buffer_size?: <<ByteSize>> + canned_acl?: string + storage_class?: string +} +---- + + +[discrete] +[[SnapshotShardsStats]] +=== SnapshotShardsStats + +[source,ts,subs=+macros] +---- +interface SnapshotShardsStats { + done: <<long>> + failed: <<long>> + finalizing: <<long>> + initializing: <<long>> + started: <<long>> + total: <<long>> +} +---- + + +[discrete] +[[SnapshotShardsStatsStage]] +=== SnapshotShardsStatsStage + +[source,ts,subs=+macros] +---- +type SnapshotShardsStatsStage = 'DONE' | 'FAILURE' | 'FINALIZE' | 'INIT' | 'STARTED' +---- + + +[discrete] +[[SnapshotShardsStatsSummary]] +=== SnapshotShardsStatsSummary + +[source,ts,subs=+macros] +---- +interface SnapshotShardsStatsSummary { + incremental: <<SnapshotShardsStatsSummaryItem>> + total: <<SnapshotShardsStatsSummaryItem>> + start_time_in_millis: <<EpochTime>><<<UnitMillis>>> + time?: <<Duration>> + time_in_millis: <<DurationValue>><<<UnitMillis>>> +} +---- + + +[discrete] +[[SnapshotShardsStatsSummaryItem]] +=== SnapshotShardsStatsSummaryItem + +[source,ts,subs=+macros] +---- +interface SnapshotShardsStatsSummaryItem { + file_count: <<long>> + size_in_bytes: <<long>> +} +---- + + +[discrete] +[[SnapshotSharedFileSystemRepository]] +=== SnapshotSharedFileSystemRepository + +[source,ts,subs=+macros] +---- +interface SnapshotSharedFileSystemRepository extends <<SnapshotRepositoryBase>> { + type: 'fs' + settings: <<SnapshotSharedFileSystemRepositorySettings>> +} +---- + + +[discrete] +[[SnapshotSharedFileSystemRepositorySettings]] +=== SnapshotSharedFileSystemRepositorySettings + +[source,ts,subs=+macros] +---- +interface SnapshotSharedFileSystemRepositorySettings extends <<SnapshotRepositorySettingsBase>> { + location: string + max_number_of_snapshots?: <<integer>> + readonly?: boolean +} +---- + + +[discrete] +[[SnapshotSnapshotIndexStats]] +=== SnapshotSnapshotIndexStats + +[source,ts,subs=+macros] +---- +interface SnapshotSnapshotIndexStats { + shards: Record<string, <<SnapshotSnapshotShardsStatus>>> + shards_stats: <<SnapshotShardsStats>> + stats: <<SnapshotSnapshotStats>> +} +---- + + +[discrete] +[[SnapshotSnapshotInfo]] +=== SnapshotSnapshotInfo + +[source,ts,subs=+macros] +---- +interface SnapshotSnapshotInfo { + data_streams: string[] + duration?: <<Duration>> + duration_in_millis?: <<DurationValue>><<<UnitMillis>>> + end_time?: <<DateTime>> + end_time_in_millis?: <<EpochTime>><<<UnitMillis>>> + failures?: <<SnapshotSnapshotShardFailure>>[] + include_global_state?: boolean + indices?: <<IndexName>>[] + index_details?: Record<<<IndexName>>, <<SnapshotIndexDetails>>> + metadata?: <<Metadata>> + reason?: string + repository?: <<Name>> + snapshot: <<Name>> + shards?: <<ShardStatistics>> + start_time?: <<DateTime>> + start_time_in_millis?: <<EpochTime>><<<UnitMillis>>> + state?: string + uuid: <<Uuid>> + version?: <<VersionString>> + version_id?: <<VersionNumber>> + feature_states?: <<SnapshotInfoFeatureState>>[] +} +---- + + +[discrete] +[[SnapshotSnapshotShardFailure]] +=== SnapshotSnapshotShardFailure + +[source,ts,subs=+macros] +---- +interface SnapshotSnapshotShardFailure { + index: <<IndexName>> + node_id?: <<Id>> + reason: string + shard_id: <<Id>> + index_uuid: <<Id>> + status: string +} +---- + + +[discrete] +[[SnapshotSnapshotShardsStatus]] +=== SnapshotSnapshotShardsStatus + +[source,ts,subs=+macros] +---- +interface SnapshotSnapshotShardsStatus { + stage: <<SnapshotShardsStatsStage>> + stats: <<SnapshotShardsStatsSummary>> +} +---- + + +[discrete] +[[SnapshotSnapshotSort]] +=== SnapshotSnapshotSort + +[source,ts,subs=+macros] +---- +type SnapshotSnapshotSort = 'start_time' | 'duration' | 'name' | 'index_count' | 'repository' | 'shard_count' | 'failed_shard_count' +---- + + +[discrete] +[[SnapshotSnapshotStats]] +=== SnapshotSnapshotStats + +[source,ts,subs=+macros] +---- +interface SnapshotSnapshotStats { + incremental: <<SnapshotFileCountSnapshotStats>> + start_time_in_millis: <<EpochTime>><<<UnitMillis>>> + time?: <<Duration>> + time_in_millis: <<DurationValue>><<<UnitMillis>>> + total: <<SnapshotFileCountSnapshotStats>> +} +---- + + +[discrete] +[[SnapshotSourceOnlyRepository]] +=== SnapshotSourceOnlyRepository + +[source,ts,subs=+macros] +---- +interface SnapshotSourceOnlyRepository extends <<SnapshotRepositoryBase>> { + type: 'source' + settings: <<SnapshotSourceOnlyRepositorySettings>> +} +---- + + +[discrete] +[[SnapshotSourceOnlyRepositorySettings]] +=== SnapshotSourceOnlyRepositorySettings + +[source,ts,subs=+macros] +---- +interface SnapshotSourceOnlyRepositorySettings extends <<SnapshotRepositorySettingsBase>> { + delegate_type?: string + max_number_of_snapshots?: <<integer>> + read_only?: boolean + readonly?: boolean +} +---- + + +[discrete] +[[SnapshotStatus]] +=== SnapshotStatus + +[source,ts,subs=+macros] +---- +interface SnapshotStatus { + include_global_state: boolean + indices: Record<string, <<SnapshotSnapshotIndexStats>>> + repository: string + shards_stats: <<SnapshotShardsStats>> + snapshot: string + state: string + stats: <<SnapshotSnapshotStats>> + uuid: <<Uuid>> +} +---- + + diff --git a/docs/reference/shared-types/synonyms-types.asciidoc b/docs/reference/shared-types/synonyms-types.asciidoc new file mode 100644 index 000000000..1cbffa38b --- /dev/null +++ b/docs/reference/shared-types/synonyms-types.asciidoc @@ -0,0 +1,92 @@ +[[reference-shared-types-synonyms-types]] + +=== `Synonyms` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[SynonymsSynonymRule]] +=== SynonymsSynonymRule + +[source,ts,subs=+macros] +---- +interface SynonymsSynonymRule { + pass:[/**] @property id Synonym Rule identifier */ + id?: <<Id>> + pass:[/**] @property synonyms Synonyms, in Solr format, that conform the synonym rule. See */ + synonyms: <<SynonymsSynonymString>> +} +---- + + +[discrete] +[[SynonymsSynonymRuleRead]] +=== SynonymsSynonymRuleRead + +[source,ts,subs=+macros] +---- +interface SynonymsSynonymRuleRead { + pass:[/**] @property id Synonym Rule identifier */ + id: <<Id>> + pass:[/**] @property synonyms Synonyms, in Solr format, that conform the synonym rule. See */ + synonyms: <<SynonymsSynonymString>> +} +---- + + +[discrete] +[[SynonymsSynonymString]] +=== SynonymsSynonymString + +[source,ts,subs=+macros] +---- +type SynonymsSynonymString = string +---- + + +[discrete] +[[SynonymsSynonymsUpdateResult]] +=== SynonymsSynonymsUpdateResult + +[source,ts,subs=+macros] +---- +interface SynonymsSynonymsUpdateResult { + pass:[/**] @property result Update operation result */ + result: <<Result>> + pass:[/**] @property reload_analyzers_details Updating synonyms in a synonym set reloads the associated analyzers. This is the analyzers reloading result */ + reload_analyzers_details: IndicesReloadSearchAnalyzersReloadResult +} +---- + + diff --git a/docs/reference/shared-types/tasks-types.asciidoc b/docs/reference/shared-types/tasks-types.asciidoc new file mode 100644 index 000000000..70d94f008 --- /dev/null +++ b/docs/reference/shared-types/tasks-types.asciidoc @@ -0,0 +1,129 @@ +[[reference-shared-types-tasks-types]] + +=== `Tasks` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[TasksGroupBy]] +=== TasksGroupBy + +[source,ts,subs=+macros] +---- +type TasksGroupBy = 'nodes' | 'parents' | 'none' +---- + + +[discrete] +[[TasksNodeTasks]] +=== TasksNodeTasks + +[source,ts,subs=+macros] +---- +interface TasksNodeTasks { + name?: <<NodeId>> + transport_address?: <<TransportAddress>> + host?: <<Host>> + ip?: <<Ip>> + roles?: string[] + attributes?: Record<string, string> + tasks: Record<<<TaskId>>, <<TasksTaskInfo>>> +} +---- + + +[discrete] +[[TasksParentTaskInfo]] +=== TasksParentTaskInfo + +[source,ts,subs=+macros] +---- +interface TasksParentTaskInfo extends <<TasksTaskInfo>> { + children?: <<TasksTaskInfo>>[] +} +---- + + +[discrete] +[[TasksTaskInfo]] +=== TasksTaskInfo + +[source,ts,subs=+macros] +---- +interface TasksTaskInfo { + action: string + cancelled?: boolean + cancellable: boolean + description?: string + headers: Record<string, string> + id: <<long>> + node: <<NodeId>> + running_time?: <<Duration>> + running_time_in_nanos: <<DurationValue>><<<UnitNanos>>> + start_time_in_millis: <<EpochTime>><<<UnitMillis>>> + pass:[/**] @property status Task status information can vary wildly from task to task. */ + status?: any + type: string + parent_task_id?: <<TaskId>> +} +---- + + +[discrete] +[[TasksTaskInfos]] +=== TasksTaskInfos + +[source,ts,subs=+macros] +---- +type TasksTaskInfos = <<TasksTaskInfo>>[] | Record<string, <<TasksParentTaskInfo>>> +---- + + +[discrete] +[[TasksTaskListResponseBase]] +=== TasksTaskListResponseBase + +[source,ts,subs=+macros] +---- +interface TasksTaskListResponseBase { + node_failures?: <<ErrorCause>>[] + task_failures?: <<TaskFailure>>[] + pass:[/**] @property nodes Task information grouped by node, if `group_by` was set to `node` (the default). */ + nodes?: Record<string, <<TasksNodeTasks>>> + pass:[/**] @property tasks Either a flat list of tasks if `group_by` was set to `none`, or grouped by parents if `group_by` was set to `parents`. */ + tasks?: <<TasksTaskInfos>> +} +---- + + diff --git a/docs/reference/shared-types/transform-types.asciidoc b/docs/reference/shared-types/transform-types.asciidoc new file mode 100644 index 000000000..9e5086b83 --- /dev/null +++ b/docs/reference/shared-types/transform-types.asciidoc @@ -0,0 +1,195 @@ +[[reference-shared-types-transform-types]] + +=== `Transform` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[TransformDestination]] +=== TransformDestination + +[source,ts,subs=+macros] +---- +interface TransformDestination { + pass:[/**] @property index The destination index for the transform. The mappings of the destination index are deduced based on the source fields when possible. If alternate mappings are required, use the create index API prior to starting the transform. */ + index?: <<IndexName>> + pass:[/**] @property pipeline The unique identifier for an ingest pipeline. */ + pipeline?: string +} +---- + + +[discrete] +[[TransformLatest]] +=== TransformLatest + +[source,ts,subs=+macros] +---- +interface TransformLatest { + pass:[/**] @property sort Specifies the date field that is used to identify the latest documents. */ + sort: <<Field>> + pass:[/**] @property unique_key Specifies an array of one or more fields that are used to group the data. */ + unique_key: <<Field>>[] +} +---- + + +[discrete] +[[TransformPivot]] +=== TransformPivot + +[source,ts,subs=+macros] +---- +interface TransformPivot { + pass:[/**] @property aggregations Defines how to aggregate the grouped data. The following aggregations are currently supported: average, bucket script, bucket selector, cardinality, filter, geo bounds, geo centroid, geo line, max, median absolute deviation, min, missing, percentiles, rare terms, scripted metric, stats, sum, terms, top metrics, value count, weighted average. */ + aggregations?: Record<string, <<AggregationsAggregationContainer>>> + pass:[/**] @property aggs Defines how to aggregate the grouped data. The following aggregations are currently supported: average, bucket script, bucket selector, cardinality, filter, geo bounds, geo centroid, geo line, max, median absolute deviation, min, missing, percentiles, rare terms, scripted metric, stats, sum, terms, top metrics, value count, weighted average. */ + aggs?: Record<string, <<AggregationsAggregationContainer>>> + pass:[/**] @property group_by Defines how to group the data. More than one grouping can be defined per pivot. The following groupings are currently supported: date histogram, geotile grid, histogram, terms. */ + group_by?: Record<string, <<TransformPivotGroupByContainer>>> +} +---- + + +[discrete] +[[TransformPivotGroupByContainer]] +=== TransformPivotGroupByContainer + +[source,ts,subs=+macros] +---- +interface TransformPivotGroupByContainer { + date_histogram?: <<AggregationsDateHistogramAggregation>> + geotile_grid?: <<AggregationsGeoTileGridAggregation>> + histogram?: <<AggregationsHistogramAggregation>> + terms?: <<AggregationsTermsAggregation>> +} +---- + + +[discrete] +[[TransformRetentionPolicy]] +=== TransformRetentionPolicy + +[source,ts,subs=+macros] +---- +interface TransformRetentionPolicy { + pass:[/**] @property field The date field that is used to calculate the age of the document. */ + field: <<Field>> + pass:[/**] @property max_age Specifies the maximum age of a document in the destination index. Documents that are older than the configured value are removed from the destination index. */ + max_age: <<Duration>> +} +---- + + +[discrete] +[[TransformRetentionPolicyContainer]] +=== TransformRetentionPolicyContainer + +[source,ts,subs=+macros] +---- +interface TransformRetentionPolicyContainer { + pass:[/**] @property time Specifies that the transform uses a time field to set the retention policy. */ + time?: <<TransformRetentionPolicy>> +} +---- + + +[discrete] +[[TransformSettings]] +=== TransformSettings + +[source,ts,subs=+macros] +---- +interface TransformSettings { + pass:[/**] @property align_checkpoints Specifies whether the transform checkpoint ranges should be optimized for performance. Such optimization can align checkpoint ranges with the date histogram interval when date histogram is specified as a group source in the transform config. As a result, less document updates in the destination index will be performed thus improving overall performance. */ + align_checkpoints?: boolean + pass:[/**] @property dates_as_epoch_millis Defines if dates in the ouput should be written as ISO formatted string or as millis since epoch. epoch_millis was the default for transforms created before version 7.11. For compatible output set this value to `true`. */ + dates_as_epoch_millis?: boolean + pass:[/**] @property deduce_mappings Specifies whether the transform should deduce the destination index mappings from the transform configuration. */ + deduce_mappings?: boolean + pass:[/**] @property docs_per_second Specifies a limit on the number of input documents per second. This setting throttles the transform by adding a wait time between search requests. The default value is null, which disables throttling. */ + docs_per_second?: <<float>> + pass:[/**] @property max_page_search_size Defines the initial page size to use for the composite aggregation for each checkpoint. If circuit breaker exceptions occur, the page size is dynamically adjusted to a lower value. The minimum value is `10` and the maximum is `65,536`. */ + max_page_search_size?: <<integer>> + pass:[/**] @property unattended If `true`, the transform runs in unattended mode. In unattended mode, the transform retries indefinitely in case of an error which means the transform never fails. Setting the number of retries other than infinite fails in validation. */ + unattended?: boolean +} +---- + + +[discrete] +[[TransformSource]] +=== TransformSource + +[source,ts,subs=+macros] +---- +interface TransformSource { + pass:[/**] @property index The source indices for the transform. It can be a single index, an index pattern (for example, `"my-index-*""`), an array of indices (for example, `["my-index-000001", "my-index-000002"]`), or an array of index patterns (for example, `["my-index-*", "my-other-index-*"]`. For remote indices use the syntax `"remote_name:index_name"`. If any indices are in remote clusters then the master node and at least one transform node must have the `remote_cluster_client` node role. */ + index: <<Indices>> + pass:[/**] @property query A query clause that retrieves a subset of data from the source index. */ + query?: <<QueryDslQueryContainer>> + pass:[/**] @property runtime_mappings Definitions of search-time runtime fields that can be used by the transform. For search runtime fields all data nodes, including remote nodes, must be 7.12 or later. */ + runtime_mappings?: <<MappingRuntimeFields>> +} +---- + + +[discrete] +[[TransformSyncContainer]] +=== TransformSyncContainer + +[source,ts,subs=+macros] +---- +interface TransformSyncContainer { + pass:[/**] @property time Specifies that the transform uses a time field to synchronize the source and destination indices. */ + time?: <<TransformTimeSync>> +} +---- + + +[discrete] +[[TransformTimeSync]] +=== TransformTimeSync + +[source,ts,subs=+macros] +---- +interface TransformTimeSync { + pass:[/**] @property delay The time delay between the current time and the latest input data time. */ + delay?: <<Duration>> + pass:[/**] @property field The date field that is used to identify new documents in the source. In general, it’s a good idea to use a field that contains the ingest timestamp. If you use a different field, you might need to set the delay such that it accounts for data transmission delays. */ + field: <<Field>> +} +---- + + diff --git a/docs/reference/shared-types/watcher-types.asciidoc b/docs/reference/shared-types/watcher-types.asciidoc new file mode 100644 index 000000000..1c136408b --- /dev/null +++ b/docs/reference/shared-types/watcher-types.asciidoc @@ -0,0 +1,1301 @@ +[[reference-shared-types-watcher-types]] + +=== `Watcher` types + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[WatcherAcknowledgeState]] +=== WatcherAcknowledgeState + +[source,ts,subs=+macros] +---- +interface WatcherAcknowledgeState { + state: <<WatcherAcknowledgementOptions>> + timestamp: <<DateTime>> +} +---- + + +[discrete] +[[WatcherAcknowledgementOptions]] +=== WatcherAcknowledgementOptions + +[source,ts,subs=+macros] +---- +type WatcherAcknowledgementOptions = 'awaits_successful_execution' | 'ackable' | 'acked' +---- + + +[discrete] +[[WatcherAction]] +=== WatcherAction + +[source,ts,subs=+macros] +---- +interface WatcherAction { + action_type?: <<WatcherActionType>> + condition?: <<WatcherConditionContainer>> + foreach?: string + max_iterations?: <<integer>> + name?: <<Name>> + throttle_period?: <<Duration>> + throttle_period_in_millis?: <<DurationValue>><<<UnitMillis>>> + transform?: <<TransformContainer>> + index?: <<WatcherIndexAction>> + logging?: <<WatcherLoggingAction>> + email?: <<WatcherEmailAction>> + pagerduty?: <<WatcherPagerDutyAction>> + slack?: <<WatcherSlackAction>> + webhook?: <<WatcherWebhookAction>> +} +---- + + +[discrete] +[[WatcherActionExecutionMode]] +=== WatcherActionExecutionMode + +[source,ts,subs=+macros] +---- +type WatcherActionExecutionMode = 'simulate' | 'force_simulate' | 'execute' | 'force_execute' | 'skip' +---- + + +[discrete] +[[WatcherActionStatus]] +=== WatcherActionStatus + +[source,ts,subs=+macros] +---- +interface WatcherActionStatus { + ack: <<WatcherAcknowledgeState>> + last_execution?: <<WatcherExecutionState>> + last_successful_execution?: <<WatcherExecutionState>> + last_throttle?: <<WatcherThrottleState>> +} +---- + + +[discrete] +[[WatcherActionStatusOptions]] +=== WatcherActionStatusOptions + +[source,ts,subs=+macros] +---- +type WatcherActionStatusOptions = 'success' | 'failure' | 'simulated' | 'throttled' +---- + + +[discrete] +[[WatcherActionType]] +=== WatcherActionType + +[source,ts,subs=+macros] +---- +type WatcherActionType = 'email' | 'webhook' | 'index' | 'logging' | 'slack' | 'pagerduty' +---- + + +[discrete] +[[WatcherActions]] +=== WatcherActions + +[source,ts,subs=+macros] +---- +type WatcherActions = Record<<<IndexName>>, <<WatcherActionStatus>>> +---- + + +[discrete] +[[WatcherActivationState]] +=== WatcherActivationState + +[source,ts,subs=+macros] +---- +interface WatcherActivationState { + active: boolean + timestamp: <<DateTime>> +} +---- + + +[discrete] +[[WatcherActivationStatus]] +=== WatcherActivationStatus + +[source,ts,subs=+macros] +---- +interface WatcherActivationStatus { + actions: <<WatcherActions>> + state: <<WatcherActivationState>> + version: <<VersionNumber>> +} +---- + + +[discrete] +[[WatcherAlwaysCondition]] +=== WatcherAlwaysCondition + +[source,ts,subs=+macros] +---- +interface WatcherAlwaysCondition {} +---- + + +[discrete] +[[WatcherArrayCompareCondition]] +=== WatcherArrayCompareCondition + +[source,ts,subs=+macros] +---- +interface WatcherArrayCompareConditionKeys { + path: string +} +type WatcherArrayCompareCondition = WatcherArrayCompareConditionKeys + & { [property: string]: <<WatcherArrayCompareOpParams>> | string } +---- + + +[discrete] +[[WatcherArrayCompareOpParams]] +=== WatcherArrayCompareOpParams + +[source,ts,subs=+macros] +---- +interface WatcherArrayCompareOpParams { + quantifier: <<WatcherQuantifier>> + value: <<FieldValue>> +} +---- + + +[discrete] +[[WatcherChainInput]] +=== WatcherChainInput + +[source,ts,subs=+macros] +---- +interface WatcherChainInput { + inputs: Partial<Record<string, <<WatcherInputContainer>>>>[] +} +---- + + +[discrete] +[[WatcherConditionContainer]] +=== WatcherConditionContainer + +[source,ts,subs=+macros] +---- +interface WatcherConditionContainer { + always?: <<WatcherAlwaysCondition>> + array_compare?: Partial<Record<string, <<WatcherArrayCompareCondition>>>> + compare?: Partial<Record<string, Partial<Record<<<WatcherConditionOp>>, <<FieldValue>>>>>> + never?: <<WatcherNeverCondition>> + script?: <<WatcherScriptCondition>> +} +---- + + +[discrete] +[[WatcherConditionOp]] +=== WatcherConditionOp + +[source,ts,subs=+macros] +---- +type WatcherConditionOp = 'not_eq' | 'eq' | 'lt' | 'gt' | 'lte' | 'gte' +---- + + +[discrete] +[[WatcherConditionType]] +=== WatcherConditionType + +[source,ts,subs=+macros] +---- +type WatcherConditionType = 'always' | 'never' | 'script' | 'compare' | 'array_compare' +---- + + +[discrete] +[[WatcherConnectionScheme]] +=== WatcherConnectionScheme + +[source,ts,subs=+macros] +---- +type WatcherConnectionScheme = 'http' | 'https' +---- + + +[discrete] +[[WatcherCronExpression]] +=== WatcherCronExpression + +[source,ts,subs=+macros] +---- +type WatcherCronExpression = string +---- + + +[discrete] +[[WatcherDailySchedule]] +=== WatcherDailySchedule + +[source,ts,subs=+macros] +---- +interface WatcherDailySchedule { + at: <<WatcherScheduleTimeOfDay>>[] +} +---- + + +[discrete] +[[WatcherDataAttachmentFormat]] +=== WatcherDataAttachmentFormat + +[source,ts,subs=+macros] +---- +type WatcherDataAttachmentFormat = 'json' | 'yaml' +---- + + +[discrete] +[[WatcherDataEmailAttachment]] +=== WatcherDataEmailAttachment + +[source,ts,subs=+macros] +---- +interface WatcherDataEmailAttachment { + format?: <<WatcherDataAttachmentFormat>> +} +---- + + +[discrete] +[[WatcherDay]] +=== WatcherDay + +[source,ts,subs=+macros] +---- +type WatcherDay = 'sunday' | 'monday' | 'tuesday' | 'wednesday' | 'thursday' | 'friday' | 'saturday' +---- + + +[discrete] +[[WatcherEmail]] +=== WatcherEmail + +[source,ts,subs=+macros] +---- +interface WatcherEmail { + id?: <<Id>> + bcc?: string[] + body?: <<WatcherEmailBody>> + cc?: string[] + from?: string + priority?: <<WatcherEmailPriority>> + reply_to?: string[] + sent_date?: <<DateTime>> + subject: string + to: string[] + attachments?: Record<string, <<WatcherEmailAttachmentContainer>>> +} +---- + + +[discrete] +[[WatcherEmailAction]] +=== WatcherEmailAction + +[source,ts,subs=+macros] +---- +interface WatcherEmailAction extends <<WatcherEmail>> {} +---- + + +[discrete] +[[WatcherEmailAttachmentContainer]] +=== WatcherEmailAttachmentContainer + +[source,ts,subs=+macros] +---- +interface WatcherEmailAttachmentContainer { + http?: <<WatcherHttpEmailAttachment>> + reporting?: <<WatcherReportingEmailAttachment>> + data?: <<WatcherDataEmailAttachment>> +} +---- + + +[discrete] +[[WatcherEmailBody]] +=== WatcherEmailBody + +[source,ts,subs=+macros] +---- +interface WatcherEmailBody { + html?: string + text?: string +} +---- + + +[discrete] +[[WatcherEmailPriority]] +=== WatcherEmailPriority + +[source,ts,subs=+macros] +---- +type WatcherEmailPriority = 'lowest' | 'low' | 'normal' | 'high' | 'highest' +---- + + +[discrete] +[[WatcherEmailResult]] +=== WatcherEmailResult + +[source,ts,subs=+macros] +---- +interface WatcherEmailResult { + account?: string + message: <<WatcherEmail>> + reason?: string +} +---- + + +[discrete] +[[WatcherExecutionPhase]] +=== WatcherExecutionPhase + +[source,ts,subs=+macros] +---- +type WatcherExecutionPhase = 'awaits_execution' | 'started' | 'input' | 'condition' | 'actions' | 'watch_transform' | 'aborted' | 'finished' +---- + + +[discrete] +[[WatcherExecutionResult]] +=== WatcherExecutionResult + +[source,ts,subs=+macros] +---- +interface WatcherExecutionResult { + actions: <<WatcherExecutionResultAction>>[] + condition: <<WatcherExecutionResultCondition>> + execution_duration: <<DurationValue>><<<UnitMillis>>> + execution_time: <<DateTime>> + input: <<WatcherExecutionResultInput>> +} +---- + + +[discrete] +[[WatcherExecutionResultAction]] +=== WatcherExecutionResultAction + +[source,ts,subs=+macros] +---- +interface WatcherExecutionResultAction { + email?: <<WatcherEmailResult>> + id: <<Id>> + index?: <<WatcherIndexResult>> + logging?: <<WatcherLoggingResult>> + pagerduty?: <<WatcherPagerDutyResult>> + reason?: string + slack?: <<WatcherSlackResult>> + status: <<WatcherActionStatusOptions>> + type: <<WatcherActionType>> + webhook?: <<WatcherWebhookResult>> + error?: <<ErrorCause>> +} +---- + + +[discrete] +[[WatcherExecutionResultCondition]] +=== WatcherExecutionResultCondition + +[source,ts,subs=+macros] +---- +interface WatcherExecutionResultCondition { + met: boolean + status: <<WatcherActionStatusOptions>> + type: <<WatcherConditionType>> +} +---- + + +[discrete] +[[WatcherExecutionResultInput]] +=== WatcherExecutionResultInput + +[source,ts,subs=+macros] +---- +interface WatcherExecutionResultInput { + payload: Record<string, any> + status: <<WatcherActionStatusOptions>> + type: <<WatcherInputType>> +} +---- + + +[discrete] +[[WatcherExecutionState]] +=== WatcherExecutionState + +[source,ts,subs=+macros] +---- +interface WatcherExecutionState { + successful: boolean + timestamp: <<DateTime>> + reason?: string +} +---- + + +[discrete] +[[WatcherExecutionStatus]] +=== WatcherExecutionStatus + +[source,ts,subs=+macros] +---- +type WatcherExecutionStatus = 'awaits_execution' | 'checking' | 'execution_not_needed' | 'throttled' | 'executed' | 'failed' | 'deleted_while_queued' | 'not_executed_already_queued' +---- + + +[discrete] +[[WatcherExecutionThreadPool]] +=== WatcherExecutionThreadPool + +[source,ts,subs=+macros] +---- +interface WatcherExecutionThreadPool { + max_size: <<long>> + queue_size: <<long>> +} +---- + + +[discrete] +[[WatcherHourAndMinute]] +=== WatcherHourAndMinute + +[source,ts,subs=+macros] +---- +interface WatcherHourAndMinute { + hour: <<integer>>[] + minute: <<integer>>[] +} +---- + + +[discrete] +[[WatcherHourlySchedule]] +=== WatcherHourlySchedule + +[source,ts,subs=+macros] +---- +interface WatcherHourlySchedule { + minute: <<integer>>[] +} +---- + + +[discrete] +[[WatcherHttpEmailAttachment]] +=== WatcherHttpEmailAttachment + +[source,ts,subs=+macros] +---- +interface WatcherHttpEmailAttachment { + content_type?: string + inline?: boolean + request?: <<WatcherHttpInputRequestDefinition>> +} +---- + + +[discrete] +[[WatcherHttpInput]] +=== WatcherHttpInput + +[source,ts,subs=+macros] +---- +interface WatcherHttpInput { + extract?: string[] + request?: <<WatcherHttpInputRequestDefinition>> + response_content_type?: <<WatcherResponseContentType>> +} +---- + + +[discrete] +[[WatcherHttpInputAuthentication]] +=== WatcherHttpInputAuthentication + +[source,ts,subs=+macros] +---- +interface WatcherHttpInputAuthentication { + basic: <<WatcherHttpInputBasicAuthentication>> +} +---- + + +[discrete] +[[WatcherHttpInputBasicAuthentication]] +=== WatcherHttpInputBasicAuthentication + +[source,ts,subs=+macros] +---- +interface WatcherHttpInputBasicAuthentication { + password: <<Password>> + username: <<Username>> +} +---- + + +[discrete] +[[WatcherHttpInputMethod]] +=== WatcherHttpInputMethod + +[source,ts,subs=+macros] +---- +type WatcherHttpInputMethod = 'head' | 'get' | 'post' | 'put' | 'delete' +---- + + +[discrete] +[[WatcherHttpInputProxy]] +=== WatcherHttpInputProxy + +[source,ts,subs=+macros] +---- +interface WatcherHttpInputProxy { + host: <<Host>> + port: <<uint>> +} +---- + + +[discrete] +[[WatcherHttpInputRequestDefinition]] +=== WatcherHttpInputRequestDefinition + +[source,ts,subs=+macros] +---- +interface WatcherHttpInputRequestDefinition { + auth?: <<WatcherHttpInputAuthentication>> + body?: string + connection_timeout?: <<Duration>> + headers?: Record<string, string> + host?: <<Host>> + method?: <<WatcherHttpInputMethod>> + params?: Record<string, string> + path?: string + port?: <<uint>> + proxy?: <<WatcherHttpInputProxy>> + read_timeout?: <<Duration>> + scheme?: <<WatcherConnectionScheme>> + url?: string +} +---- + + +[discrete] +[[WatcherHttpInputRequestResult]] +=== WatcherHttpInputRequestResult + +[source,ts,subs=+macros] +---- +interface WatcherHttpInputRequestResult extends <<WatcherHttpInputRequestDefinition>> {} +---- + + +[discrete] +[[WatcherHttpInputResponseResult]] +=== WatcherHttpInputResponseResult + +[source,ts,subs=+macros] +---- +interface WatcherHttpInputResponseResult { + body: string + headers: <<HttpHeaders>> + status: <<integer>> +} +---- + + +[discrete] +[[WatcherIndexAction]] +=== WatcherIndexAction + +[source,ts,subs=+macros] +---- +interface WatcherIndexAction { + index: <<IndexName>> + doc_id?: <<Id>> + refresh?: <<Refresh>> + op_type?: <<OpType>> + timeout?: <<Duration>> + execution_time_field?: <<Field>> +} +---- + + +[discrete] +[[WatcherIndexResult]] +=== WatcherIndexResult + +[source,ts,subs=+macros] +---- +interface WatcherIndexResult { + response: <<WatcherIndexResultSummary>> +} +---- + + +[discrete] +[[WatcherIndexResultSummary]] +=== WatcherIndexResultSummary + +[source,ts,subs=+macros] +---- +interface WatcherIndexResultSummary { + created: boolean + id: <<Id>> + index: <<IndexName>> + result: <<Result>> + version: <<VersionNumber>> +} +---- + + +[discrete] +[[WatcherInputContainer]] +=== WatcherInputContainer + +[source,ts,subs=+macros] +---- +interface WatcherInputContainer { + chain?: <<WatcherChainInput>> + http?: <<WatcherHttpInput>> + search?: <<WatcherSearchInput>> + simple?: Record<string, any> +} +---- + + +[discrete] +[[WatcherInputType]] +=== WatcherInputType + +[source,ts,subs=+macros] +---- +type WatcherInputType = 'http' | 'search' | 'simple' +---- + + +[discrete] +[[WatcherLoggingAction]] +=== WatcherLoggingAction + +[source,ts,subs=+macros] +---- +interface WatcherLoggingAction { + level?: string + text: string + category?: string +} +---- + + +[discrete] +[[WatcherLoggingResult]] +=== WatcherLoggingResult + +[source,ts,subs=+macros] +---- +interface WatcherLoggingResult { + logged_text: string +} +---- + + +[discrete] +[[WatcherMonth]] +=== WatcherMonth + +[source,ts,subs=+macros] +---- +type WatcherMonth = 'january' | 'february' | 'march' | 'april' | 'may' | 'june' | 'july' | 'august' | 'september' | 'october' | 'november' | 'december' +---- + + +[discrete] +[[WatcherNeverCondition]] +=== WatcherNeverCondition + +[source,ts,subs=+macros] +---- +interface WatcherNeverCondition {} +---- + + +[discrete] +[[WatcherPagerDutyAction]] +=== WatcherPagerDutyAction + +[source,ts,subs=+macros] +---- +interface WatcherPagerDutyAction extends <<WatcherPagerDutyEvent>> {} +---- + + +[discrete] +[[WatcherPagerDutyContext]] +=== WatcherPagerDutyContext + +[source,ts,subs=+macros] +---- +interface WatcherPagerDutyContext { + href?: string + src?: string + type: <<WatcherPagerDutyContextType>> +} +---- + + +[discrete] +[[WatcherPagerDutyContextType]] +=== WatcherPagerDutyContextType + +[source,ts,subs=+macros] +---- +type WatcherPagerDutyContextType = 'link' | 'image' +---- + + +[discrete] +[[WatcherPagerDutyEvent]] +=== WatcherPagerDutyEvent + +[source,ts,subs=+macros] +---- +interface WatcherPagerDutyEvent { + account?: string + attach_payload: boolean + client?: string + client_url?: string + contexts?: <<WatcherPagerDutyContext>>[] + context?: <<WatcherPagerDutyContext>>[] + description: string + event_type?: <<WatcherPagerDutyEventType>> + incident_key: string + proxy?: <<WatcherPagerDutyEventProxy>> +} +---- + + +[discrete] +[[WatcherPagerDutyEventProxy]] +=== WatcherPagerDutyEventProxy + +[source,ts,subs=+macros] +---- +interface WatcherPagerDutyEventProxy { + host?: <<Host>> + port?: <<integer>> +} +---- + + +[discrete] +[[WatcherPagerDutyEventType]] +=== WatcherPagerDutyEventType + +[source,ts,subs=+macros] +---- +type WatcherPagerDutyEventType = 'trigger' | 'resolve' | 'acknowledge' +---- + + +[discrete] +[[WatcherPagerDutyResult]] +=== WatcherPagerDutyResult + +[source,ts,subs=+macros] +---- +interface WatcherPagerDutyResult { + event: <<WatcherPagerDutyEvent>> + reason?: string + request?: <<WatcherHttpInputRequestResult>> + response?: <<WatcherHttpInputResponseResult>> +} +---- + + +[discrete] +[[WatcherQuantifier]] +=== WatcherQuantifier + +[source,ts,subs=+macros] +---- +type WatcherQuantifier = 'some' | 'all' +---- + + +[discrete] +[[WatcherQueryWatch]] +=== WatcherQueryWatch + +[source,ts,subs=+macros] +---- +interface WatcherQueryWatch { + _id: <<Id>> + status?: <<WatcherWatchStatus>> + watch?: <<WatcherWatch>> + _primary_term?: <<integer>> + _seq_no?: <<SequenceNumber>> +} +---- + + +[discrete] +[[WatcherReportingEmailAttachment]] +=== WatcherReportingEmailAttachment + +[source,ts,subs=+macros] +---- +interface WatcherReportingEmailAttachment { + url: string + inline?: boolean + retries?: <<integer>> + interval?: <<Duration>> + request?: <<WatcherHttpInputRequestDefinition>> +} +---- + + +[discrete] +[[WatcherResponseContentType]] +=== WatcherResponseContentType + +[source,ts,subs=+macros] +---- +type WatcherResponseContentType = 'json' | 'yaml' | 'text' +---- + + +[discrete] +[[WatcherScheduleContainer]] +=== WatcherScheduleContainer + +[source,ts,subs=+macros] +---- +interface WatcherScheduleContainer { + timezone?: string + cron?: <<WatcherCronExpression>> + daily?: <<WatcherDailySchedule>> + hourly?: <<WatcherHourlySchedule>> + interval?: <<Duration>> + monthly?: <<WatcherTimeOfMonth>> | <<WatcherTimeOfMonth>>[] + weekly?: <<WatcherTimeOfWeek>> | <<WatcherTimeOfWeek>>[] + yearly?: <<WatcherTimeOfYear>> | <<WatcherTimeOfYear>>[] +} +---- + + +[discrete] +[[WatcherScheduleTimeOfDay]] +=== WatcherScheduleTimeOfDay + +[source,ts,subs=+macros] +---- +type WatcherScheduleTimeOfDay = string | <<WatcherHourAndMinute>> +---- + + +[discrete] +[[WatcherScheduleTriggerEvent]] +=== WatcherScheduleTriggerEvent + +[source,ts,subs=+macros] +---- +interface WatcherScheduleTriggerEvent { + scheduled_time: <<DateTime>> + triggered_time?: <<DateTime>> +} +---- + + +[discrete] +[[WatcherScriptCondition]] +=== WatcherScriptCondition + +[source,ts,subs=+macros] +---- +interface WatcherScriptCondition { + lang?: string + params?: Record<string, any> + source?: string + id?: string +} +---- + + +[discrete] +[[WatcherSearchInput]] +=== WatcherSearchInput + +[source,ts,subs=+macros] +---- +interface WatcherSearchInput { + extract?: string[] + request: <<WatcherSearchInputRequestDefinition>> + timeout?: <<Duration>> +} +---- + + +[discrete] +[[WatcherSearchInputRequestBody]] +=== WatcherSearchInputRequestBody + +[source,ts,subs=+macros] +---- +interface WatcherSearchInputRequestBody { + query: <<QueryDslQueryContainer>> +} +---- + + +[discrete] +[[WatcherSearchInputRequestDefinition]] +=== WatcherSearchInputRequestDefinition + +[source,ts,subs=+macros] +---- +interface WatcherSearchInputRequestDefinition { + body?: <<WatcherSearchInputRequestBody>> + indices?: <<IndexName>>[] + indices_options?: <<IndicesOptions>> + search_type?: <<SearchType>> + template?: <<WatcherSearchTemplateRequestBody>> + rest_total_hits_as_int?: boolean +} +---- + + +[discrete] +[[WatcherSearchTemplateRequestBody]] +=== WatcherSearchTemplateRequestBody + +[source,ts,subs=+macros] +---- +interface WatcherSearchTemplateRequestBody { + explain?: boolean + pass:[/**] @property id ID of the search template to use. If no source is specified, this parameter is required. */ + id?: <<Id>> + params?: Record<string, any> + profile?: boolean + pass:[/**] @property source An inline search template. Supports the same parameters as the search API's request body. Also supports Mustache variables. If no id is specified, this parameter is required. */ + source?: string +} +---- + + +[discrete] +[[WatcherSimulatedActions]] +=== WatcherSimulatedActions + +[source,ts,subs=+macros] +---- +interface WatcherSimulatedActions { + actions: string[] + all: <<WatcherSimulatedActions>> + use_all: boolean +} +---- + + +[discrete] +[[WatcherSlackAction]] +=== WatcherSlackAction + +[source,ts,subs=+macros] +---- +interface WatcherSlackAction { + account?: string + message: <<WatcherSlackMessage>> +} +---- + + +[discrete] +[[WatcherSlackAttachment]] +=== WatcherSlackAttachment + +[source,ts,subs=+macros] +---- +interface WatcherSlackAttachment { + author_icon?: string + author_link?: string + author_name: string + color?: string + fallback?: string + fields?: <<WatcherSlackAttachmentField>>[] + footer?: string + footer_icon?: string + image_url?: string + pretext?: string + text?: string + thumb_url?: string + title: string + title_link?: string + ts?: <<EpochTime>><<<UnitSeconds>>> +} +---- + + +[discrete] +[[WatcherSlackAttachmentField]] +=== WatcherSlackAttachmentField + +[source,ts,subs=+macros] +---- +interface WatcherSlackAttachmentField { + <<short>>: boolean + title: string + value: string +} +---- + + +[discrete] +[[WatcherSlackDynamicAttachment]] +=== WatcherSlackDynamicAttachment + +[source,ts,subs=+macros] +---- +interface WatcherSlackDynamicAttachment { + attachment_template: <<WatcherSlackAttachment>> + list_path: string +} +---- + + +[discrete] +[[WatcherSlackMessage]] +=== WatcherSlackMessage + +[source,ts,subs=+macros] +---- +interface WatcherSlackMessage { + attachments: <<WatcherSlackAttachment>>[] + dynamic_attachments?: <<WatcherSlackDynamicAttachment>> + from: string + icon?: string + text: string + to: string[] +} +---- + + +[discrete] +[[WatcherSlackResult]] +=== WatcherSlackResult + +[source,ts,subs=+macros] +---- +interface WatcherSlackResult { + account?: string + message: <<WatcherSlackMessage>> +} +---- + + +[discrete] +[[WatcherThrottleState]] +=== WatcherThrottleState + +[source,ts,subs=+macros] +---- +interface WatcherThrottleState { + reason: string + timestamp: <<DateTime>> +} +---- + + +[discrete] +[[WatcherTimeOfMonth]] +=== WatcherTimeOfMonth + +[source,ts,subs=+macros] +---- +interface WatcherTimeOfMonth { + at: string[] + on: <<integer>>[] +} +---- + + +[discrete] +[[WatcherTimeOfWeek]] +=== WatcherTimeOfWeek + +[source,ts,subs=+macros] +---- +interface WatcherTimeOfWeek { + at: string[] + on: <<WatcherDay>>[] +} +---- + + +[discrete] +[[WatcherTimeOfYear]] +=== WatcherTimeOfYear + +[source,ts,subs=+macros] +---- +interface WatcherTimeOfYear { + at: string[] + int: <<WatcherMonth>>[] + on: <<integer>>[] +} +---- + + +[discrete] +[[WatcherTriggerContainer]] +=== WatcherTriggerContainer + +[source,ts,subs=+macros] +---- +interface WatcherTriggerContainer { + schedule?: <<WatcherScheduleContainer>> +} +---- + + +[discrete] +[[WatcherTriggerEventContainer]] +=== WatcherTriggerEventContainer + +[source,ts,subs=+macros] +---- +interface WatcherTriggerEventContainer { + schedule?: <<WatcherScheduleTriggerEvent>> +} +---- + + +[discrete] +[[WatcherTriggerEventResult]] +=== WatcherTriggerEventResult + +[source,ts,subs=+macros] +---- +interface WatcherTriggerEventResult { + manual: <<WatcherTriggerEventContainer>> + triggered_time: <<DateTime>> + type: string +} +---- + + +[discrete] +[[WatcherWatch]] +=== WatcherWatch + +[source,ts,subs=+macros] +---- +interface WatcherWatch { + actions: Record<<<IndexName>>, <<WatcherAction>>> + condition: <<WatcherConditionContainer>> + input: <<WatcherInputContainer>> + metadata?: <<Metadata>> + status?: <<WatcherWatchStatus>> + throttle_period?: <<Duration>> + throttle_period_in_millis?: <<DurationValue>><<<UnitMillis>>> + transform?: <<TransformContainer>> + trigger: <<WatcherTriggerContainer>> +} +---- + + +[discrete] +[[WatcherWatchStatus]] +=== WatcherWatchStatus + +[source,ts,subs=+macros] +---- +interface WatcherWatchStatus { + actions: <<WatcherActions>> + last_checked?: <<DateTime>> + last_met_condition?: <<DateTime>> + state: <<WatcherActivationState>> + version: <<VersionNumber>> + execution_state?: string +} +---- + + +[discrete] +[[WatcherWebhookAction]] +=== WatcherWebhookAction + +[source,ts,subs=+macros] +---- +interface WatcherWebhookAction extends <<WatcherHttpInputRequestDefinition>> {} +---- + + +[discrete] +[[WatcherWebhookResult]] +=== WatcherWebhookResult + +[source,ts,subs=+macros] +---- +interface WatcherWebhookResult { + request: <<WatcherHttpInputRequestResult>> + response?: <<WatcherHttpInputResponseResult>> +} +---- + + diff --git a/docs/reference/shutdown.asciidoc b/docs/reference/shutdown.asciidoc new file mode 100644 index 000000000..1f7274dc9 --- /dev/null +++ b/docs/reference/shutdown.asciidoc @@ -0,0 +1,159 @@ +[[reference-shutdown]] +== client.shutdown + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.shutdown.deleteNode]] +== `client.shutdown.deleteNode()` + +Removes a node from the shutdown list. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + +https://www.elastic.co/guide/en/elasticsearch/reference/current[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ShutdownDeleteNodeRequest, options?: TransportRequestOptions) => Promise<ShutdownDeleteNodeResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ShutdownDeleteNodeRequest extends <<RequestBase>> { + node_id: <<NodeId>> + master_timeout?: <<TimeUnit>> + timeout?: <<TimeUnit>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type ShutdownDeleteNodeResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.shutdown.getNode]] +== `client.shutdown.getNode()` + +Retrieve status of a node or nodes that are currently marked as shutting down. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + +https://www.elastic.co/guide/en/elasticsearch/reference/current[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ShutdownGetNodeRequest, options?: TransportRequestOptions) => Promise<ShutdownGetNodeResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ShutdownGetNodeRequest extends <<RequestBase>> { + node_id?: <<NodeIds>> + master_timeout?: <<TimeUnit>> + timeout?: <<TimeUnit>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface ShutdownGetNodeResponse { + nodes: ShutdownGetNodeNodeShutdownStatus[] +} + +---- + + +[discrete] +[[client.shutdown.putNode]] +== `client.shutdown.putNode()` + +Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + +https://www.elastic.co/guide/en/elasticsearch/reference/current[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: ShutdownPutNodeRequest, options?: TransportRequestOptions) => Promise<ShutdownPutNodeResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface ShutdownPutNodeRequest extends <<RequestBase>> { + node_id: <<NodeId>> + master_timeout?: <<TimeUnit>> + timeout?: <<TimeUnit>> + type: <<ShutdownType>> + reason: string + allocation_delay?: string + target_node_name?: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type ShutdownPutNodeResponse = <<AcknowledgedResponseBase>> + +---- + + diff --git a/docs/reference/simulate.asciidoc b/docs/reference/simulate.asciidoc new file mode 100644 index 000000000..6c751eceb --- /dev/null +++ b/docs/reference/simulate.asciidoc @@ -0,0 +1,51 @@ +[[reference-simulate]] +== client.simulate + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.simulate.ingest]] +== `client.simulate.ingest()` + +Simulates running ingest with example documents. + +{ref}/simulate-ingest-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SimulateIngestRequest, options?: TransportRequestOptions) => Promise<SimulateIngestResponse> +---- + diff --git a/docs/reference/slm.asciidoc b/docs/reference/slm.asciidoc new file mode 100644 index 000000000..8d3a82b57 --- /dev/null +++ b/docs/reference/slm.asciidoc @@ -0,0 +1,381 @@ +[[reference-slm]] +== client.slm + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.slm.deleteLifecycle]] +== `client.slm.deleteLifecycle()` + +Deletes an existing snapshot lifecycle policy. + +{ref}/slm-api-delete-policy.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SlmDeleteLifecycleRequest, options?: TransportRequestOptions) => Promise<SlmDeleteLifecycleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SlmDeleteLifecycleRequest extends <<RequestBase>> { + policy_id: <<Name>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SlmDeleteLifecycleResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.slm.executeLifecycle]] +== `client.slm.executeLifecycle()` + +Immediately creates a snapshot according to the lifecycle policy, without waiting for the scheduled time. + +{ref}/slm-api-execute-lifecycle.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SlmExecuteLifecycleRequest, options?: TransportRequestOptions) => Promise<SlmExecuteLifecycleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SlmExecuteLifecycleRequest extends <<RequestBase>> { + policy_id: <<Name>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SlmExecuteLifecycleResponse { + snapshot_name: <<Name>> +} + +---- + + +[discrete] +[[client.slm.executeRetention]] +== `client.slm.executeRetention()` + +Deletes any snapshots that are expired according to the policy's retention rules. + +{ref}/slm-api-execute-retention.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SlmExecuteRetentionRequest, options?: TransportRequestOptions) => Promise<SlmExecuteRetentionResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SlmExecuteRetentionRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SlmExecuteRetentionResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.slm.getLifecycle]] +== `client.slm.getLifecycle()` + +Retrieves one or more snapshot lifecycle policy definitions and information about the latest snapshot attempts. + +{ref}/slm-api-get-policy.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SlmGetLifecycleRequest, options?: TransportRequestOptions) => Promise<SlmGetLifecycleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SlmGetLifecycleRequest extends <<RequestBase>> { + policy_id?: <<Names>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SlmGetLifecycleResponse = Record<<<Id>>, <<SlmSnapshotLifecycle>>> + +---- + + +[discrete] +[[client.slm.getStats]] +== `client.slm.getStats()` + +Returns global and policy-level statistics about actions taken by snapshot lifecycle management. + +{ref}/slm-api-get-stats.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SlmGetStatsRequest, options?: TransportRequestOptions) => Promise<SlmGetStatsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SlmGetStatsRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SlmGetStatsResponse { + retention_deletion_time: <<Duration>> + retention_deletion_time_millis: <<DurationValue>><<<UnitMillis>>> + retention_failed: <<long>> + retention_runs: <<long>> + retention_timed_out: <<long>> + total_snapshots_deleted: <<long>> + total_snapshot_deletion_failures: <<long>> + total_snapshots_failed: <<long>> + total_snapshots_taken: <<long>> + policy_stats: string[] +} + +---- + + +[discrete] +[[client.slm.getStatus]] +== `client.slm.getStatus()` + +Retrieves the status of snapshot lifecycle management (SLM). + +{ref}/slm-api-get-status.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SlmGetStatusRequest, options?: TransportRequestOptions) => Promise<SlmGetStatusResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SlmGetStatusRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SlmGetStatusResponse { + operation_mode: <<LifecycleOperationMode>> +} + +---- + + +[discrete] +[[client.slm.putLifecycle]] +== `client.slm.putLifecycle()` + +Creates or updates a snapshot lifecycle policy. + +{ref}/slm-api-put-policy.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SlmPutLifecycleRequest, options?: TransportRequestOptions) => Promise<SlmPutLifecycleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SlmPutLifecycleRequest extends <<RequestBase>> { + policy_id: <<Name>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> + config?: <<SlmConfiguration>> + name?: <<Name>> + repository?: string + retention?: <<SlmRetention>> + schedule?: <<WatcherCronExpression>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SlmPutLifecycleResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.slm.start]] +== `client.slm.start()` + +Turns on snapshot lifecycle management (SLM). + +{ref}/slm-api-start.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SlmStartRequest, options?: TransportRequestOptions) => Promise<SlmStartResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SlmStartRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SlmStartResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.slm.stop]] +== `client.slm.stop()` + +Turns off snapshot lifecycle management (SLM). + +{ref}/slm-api-stop.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SlmStopRequest, options?: TransportRequestOptions) => Promise<SlmStopResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SlmStopRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SlmStopResponse = <<AcknowledgedResponseBase>> + +---- + + diff --git a/docs/reference/snapshot.asciidoc b/docs/reference/snapshot.asciidoc new file mode 100644 index 000000000..34f3c2450 --- /dev/null +++ b/docs/reference/snapshot.asciidoc @@ -0,0 +1,533 @@ +[[reference-snapshot]] +== client.snapshot + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.snapshot.cleanupRepository]] +== `client.snapshot.cleanupRepository()` + +Triggers the review of a snapshot repository’s contents and deletes any stale data not referenced by existing snapshots. + +{ref}/clean-up-snapshot-repo-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions) => Promise<SnapshotCleanupRepositoryResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SnapshotCleanupRepositoryRequest extends <<RequestBase>> { + name: <<Name>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SnapshotCleanupRepositoryResponse { + results: SnapshotCleanupRepositoryCleanupRepositoryResults +} + +---- + + +[discrete] +[[client.snapshot.clone]] +== `client.snapshot.clone()` + +Clones indices from one snapshot into another snapshot in the same repository. + +{ref}/modules-snapshots.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SnapshotCloneRequest, options?: TransportRequestOptions) => Promise<SnapshotCloneResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SnapshotCloneRequest extends <<RequestBase>> { + repository: <<Name>> + snapshot: <<Name>> + target_snapshot: <<Name>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> + indices: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SnapshotCloneResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.snapshot.create]] +== `client.snapshot.create()` + +Creates a snapshot in a repository. + +{ref}/modules-snapshots.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SnapshotCreateRequest, options?: TransportRequestOptions) => Promise<SnapshotCreateResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SnapshotCreateRequest extends <<RequestBase>> { + repository: <<Name>> + snapshot: <<Name>> + master_timeout?: <<Duration>> + wait_for_completion?: boolean + ignore_unavailable?: boolean + include_global_state?: boolean + indices?: <<Indices>> + feature_states?: string[] + metadata?: <<Metadata>> + partial?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SnapshotCreateResponse { + accepted?: boolean + snapshot?: <<SnapshotSnapshotInfo>> +} + +---- + + +[discrete] +[[client.snapshot.createRepository]] +== `client.snapshot.createRepository()` + +Creates a repository. + +{ref}/modules-snapshots.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SnapshotCreateRepositoryRequest, options?: TransportRequestOptions) => Promise<SnapshotCreateRepositoryResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SnapshotCreateRepositoryRequest extends <<RequestBase>> { + name: <<Name>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> + verify?: boolean + repository?: <<SnapshotRepository>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SnapshotCreateRepositoryResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.snapshot.delete]] +== `client.snapshot.delete()` + +Deletes one or more snapshots. + +{ref}/modules-snapshots.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SnapshotDeleteRequest, options?: TransportRequestOptions) => Promise<SnapshotDeleteResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SnapshotDeleteRequest extends <<RequestBase>> { + repository: <<Name>> + snapshot: <<Name>> + master_timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SnapshotDeleteResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.snapshot.deleteRepository]] +== `client.snapshot.deleteRepository()` + +Deletes a repository. + +{ref}/modules-snapshots.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions) => Promise<SnapshotDeleteRepositoryResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SnapshotDeleteRepositoryRequest extends <<RequestBase>> { + name: <<Names>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SnapshotDeleteRepositoryResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.snapshot.get]] +== `client.snapshot.get()` + +Returns information about a snapshot. + +{ref}/modules-snapshots.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SnapshotGetRequest, options?: TransportRequestOptions) => Promise<SnapshotGetResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SnapshotGetRequest extends <<RequestBase>> { + repository: <<Name>> + snapshot: <<Names>> + ignore_unavailable?: boolean + master_timeout?: <<Duration>> + verbose?: boolean + index_details?: boolean + index_names?: boolean + include_repository?: boolean + sort?: <<SnapshotSnapshotSort>> + size?: <<integer>> + order?: <<SortOrder>> + after?: string + offset?: <<integer>> + from_sort_value?: string + slm_policy_filter?: <<Name>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SnapshotGetResponse { + responses?: SnapshotGetSnapshotResponseItem[] + snapshots?: <<SnapshotSnapshotInfo>>[] + total: <<integer>> + remaining: <<integer>> +} + +---- + + +[discrete] +[[client.snapshot.getRepository]] +== `client.snapshot.getRepository()` + +Returns information about a repository. + +{ref}/modules-snapshots.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SnapshotGetRepositoryRequest, options?: TransportRequestOptions) => Promise<SnapshotGetRepositoryResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SnapshotGetRepositoryRequest extends <<RequestBase>> { + name?: <<Names>> + local?: boolean + master_timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SnapshotGetRepositoryResponse = Record<string, <<SnapshotRepository>>> + +---- + + +[discrete] +[[client.snapshot.repositoryAnalyze]] +== `client.snapshot.repositoryAnalyze()` + +Analyzes a repository for correctness and performance + +{ref}/modules-snapshots.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptions) => Promise<SnapshotRepositoryAnalyzeResponse> +---- + +[discrete] +[[client.snapshot.restore]] +== `client.snapshot.restore()` + +Restores a snapshot. + +{ref}/modules-snapshots.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SnapshotRestoreRequest, options?: TransportRequestOptions) => Promise<SnapshotRestoreResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SnapshotRestoreRequest extends <<RequestBase>> { + repository: <<Name>> + snapshot: <<Name>> + master_timeout?: <<Duration>> + wait_for_completion?: boolean + feature_states?: string[] + ignore_index_settings?: string[] + ignore_unavailable?: boolean + include_aliases?: boolean + include_global_state?: boolean + index_settings?: <<IndicesIndexSettings>> + indices?: <<Indices>> + partial?: boolean + rename_pattern?: string + rename_replacement?: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SnapshotRestoreResponse { + accepted?: boolean + snapshot?: SnapshotRestoreSnapshotRestore +} + +---- + + +[discrete] +[[client.snapshot.status]] +== `client.snapshot.status()` + +Returns information about the status of a snapshot. + +{ref}/modules-snapshots.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SnapshotStatusRequest, options?: TransportRequestOptions) => Promise<SnapshotStatusResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SnapshotStatusRequest extends <<RequestBase>> { + repository?: <<Name>> + snapshot?: <<Names>> + ignore_unavailable?: boolean + master_timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SnapshotStatusResponse { + snapshots: <<SnapshotStatus>>[] +} + +---- + + +[discrete] +[[client.snapshot.verifyRepository]] +== `client.snapshot.verifyRepository()` + +Verifies a repository. + +{ref}/modules-snapshots.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions) => Promise<SnapshotVerifyRepositoryResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SnapshotVerifyRepositoryRequest extends <<RequestBase>> { + name: <<Name>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SnapshotVerifyRepositoryResponse { + nodes: Record<string, SnapshotVerifyRepositoryCompactNodeInfo> +} + +---- + + diff --git a/docs/reference/sql.asciidoc b/docs/reference/sql.asciidoc new file mode 100644 index 000000000..de39c1dd6 --- /dev/null +++ b/docs/reference/sql.asciidoc @@ -0,0 +1,311 @@ +[[reference-sql]] +== client.sql + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.sql.clearCursor]] +== `client.sql.clearCursor()` + +Clear an SQL search cursor. + +{ref}/clear-sql-cursor-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SqlClearCursorRequest, options?: TransportRequestOptions) => Promise<SqlClearCursorResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SqlClearCursorRequest extends <<RequestBase>> { + cursor: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SqlClearCursorResponse { + succeeded: boolean +} + +---- + + +[discrete] +[[client.sql.deleteAsync]] +== `client.sql.deleteAsync()` + +Delete an async SQL search. Delete an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it. + +{ref}/delete-async-sql-search-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SqlDeleteAsyncRequest, options?: TransportRequestOptions) => Promise<SqlDeleteAsyncResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SqlDeleteAsyncRequest extends <<RequestBase>> { + id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SqlDeleteAsyncResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.sql.getAsync]] +== `client.sql.getAsync()` + +Get async SQL search results. Get the current status and available results for an async SQL search or stored synchronous SQL search. + +{ref}/get-async-sql-search-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SqlGetAsyncRequest, options?: TransportRequestOptions) => Promise<SqlGetAsyncResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SqlGetAsyncRequest extends <<RequestBase>> { + id: <<Id>> + delimiter?: string + format?: string + keep_alive?: <<Duration>> + wait_for_completion_timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SqlGetAsyncResponse { + id: <<Id>> + is_running: boolean + is_partial: boolean + columns?: SqlColumn[] + cursor?: string + rows: SqlRow[] +} + +---- + + +[discrete] +[[client.sql.getAsyncStatus]] +== `client.sql.getAsyncStatus()` + +Get the async SQL search status. Get the current status of an async SQL search or a stored synchronous SQL search. + +{ref}/get-async-sql-search-status-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SqlGetAsyncStatusRequest, options?: TransportRequestOptions) => Promise<SqlGetAsyncStatusResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SqlGetAsyncStatusRequest extends <<RequestBase>> { + id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SqlGetAsyncStatusResponse { + id: string + is_running: boolean + is_partial: boolean + start_time_in_millis: <<EpochTime>><<<UnitMillis>>> + expiration_time_in_millis: <<EpochTime>><<<UnitMillis>>> + completion_status?: <<uint>> +} + +---- + + +[discrete] +[[client.sql.query]] +== `client.sql.query()` + +Get SQL search results. Run an SQL request. + +{ref}/sql-search-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SqlQueryRequest, options?: TransportRequestOptions) => Promise<SqlQueryResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SqlQueryRequest extends <<RequestBase>> { + format?: SqlQuerySqlFormat + catalog?: string + columnar?: boolean + cursor?: string + fetch_size?: <<integer>> + filter?: <<QueryDslQueryContainer>> + query?: string + request_timeout?: <<Duration>> + page_timeout?: <<Duration>> + time_zone?: <<TimeZone>> + field_multi_value_leniency?: boolean + runtime_mappings?: <<MappingRuntimeFields>> + wait_for_completion_timeout?: <<Duration>> + params?: Record<string, any> + keep_alive?: <<Duration>> + keep_on_completion?: boolean + index_using_frozen?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SqlQueryResponse { + id?: <<Id>> + is_running?: boolean + is_partial?: boolean + columns?: SqlColumn[] + cursor?: string + rows: SqlRow[] +} + +---- + + +[discrete] +[[client.sql.translate]] +== `client.sql.translate()` + +Translate SQL into Elasticsearch queries. Translate an SQL search into a search API request containing Query DSL. + +{ref}/sql-translate-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SqlTranslateRequest, options?: TransportRequestOptions) => Promise<SqlTranslateResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SqlTranslateRequest extends <<RequestBase>> { + fetch_size?: <<integer>> + filter?: <<QueryDslQueryContainer>> + query: string + time_zone?: <<TimeZone>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SqlTranslateResponse { + aggregations?: Record<string, <<AggregationsAggregationContainer>>> + size?: <<long>> + _source?: <<SearchSourceConfig>> + fields?: (<<QueryDslFieldAndFormat>> | <<Field>>)[] + query?: <<QueryDslQueryContainer>> + sort?: <<Sort>> +} + +---- + + diff --git a/docs/reference/ssl.asciidoc b/docs/reference/ssl.asciidoc new file mode 100644 index 000000000..640b248ba --- /dev/null +++ b/docs/reference/ssl.asciidoc @@ -0,0 +1,71 @@ +[[reference-ssl]] +== client.ssl + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.ssl.certificates]] +== `client.ssl.certificates()` + +Get SSL certificates. Get information about the X.509 certificates that are used to encrypt communications in the cluster. The API returns a list that includes certificates from all TLS contexts including: - Settings for transport and HTTP interfaces - TLS settings that are used within authentication realms - TLS settings for remote monitoring exporters The list includes certificates that are used for configuring trust, such as those configured in the `xpack.security.transport.ssl.truststore` and `xpack.security.transport.ssl.certificate_authorities` settings. It also includes certificates that are used for configuring server identity, such as `xpack.security.http.ssl.keystore` and `xpack.security.http.ssl.certificate settings`. The list does not include certificates that are sourced from the default SSL context of the Java Runtime Environment (JRE), even if those certificates are in use within Elasticsearch. NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the API returns all the certificates that are included in the PKCS#11 token irrespective of whether these are used in the Elasticsearch TLS configuration. If Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster. + +{ref}/security-api-ssl.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SslCertificatesRequest, options?: TransportRequestOptions) => Promise<SslCertificatesResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SslCertificatesRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SslCertificatesResponse = SslCertificatesCertificateInformation[] + +---- + + diff --git a/docs/reference/synonyms.asciidoc b/docs/reference/synonyms.asciidoc new file mode 100644 index 000000000..fa1a981e6 --- /dev/null +++ b/docs/reference/synonyms.asciidoc @@ -0,0 +1,312 @@ +[[reference-synonyms]] +== client.synonyms + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.synonyms.deleteSynonym]] +== `client.synonyms.deleteSynonym()` + +Delete a synonym set. + +{ref}/delete-synonyms-set.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SynonymsDeleteSynonymRequest, options?: TransportRequestOptions) => Promise<SynonymsDeleteSynonymResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SynonymsDeleteSynonymRequest extends <<RequestBase>> { + id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SynonymsDeleteSynonymResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.synonyms.deleteSynonymRule]] +== `client.synonyms.deleteSynonymRule()` + +Delete a synonym rule. Delete a synonym rule from a synonym set. + +{ref}/delete-synonym-rule.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptions) => Promise<SynonymsDeleteSynonymRuleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SynonymsDeleteSynonymRuleRequest extends <<RequestBase>> { + set_id: <<Id>> + rule_id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SynonymsDeleteSynonymRuleResponse = <<SynonymsSynonymsUpdateResult>> + +---- + + +[discrete] +[[client.synonyms.getSynonym]] +== `client.synonyms.getSynonym()` + +Get a synonym set. + +{ref}/get-synonyms-set.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SynonymsGetSynonymRequest, options?: TransportRequestOptions) => Promise<SynonymsGetSynonymResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SynonymsGetSynonymRequest extends <<RequestBase>> { + id: <<Id>> + from?: <<integer>> + size?: <<integer>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SynonymsGetSynonymResponse { + count: <<integer>> + synonyms_set: <<SynonymsSynonymRuleRead>>[] +} + +---- + + +[discrete] +[[client.synonyms.getSynonymRule]] +== `client.synonyms.getSynonymRule()` + +Get a synonym rule. Get a synonym rule from a synonym set. + +{ref}/get-synonym-rule.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SynonymsGetSynonymRuleRequest, options?: TransportRequestOptions) => Promise<SynonymsGetSynonymRuleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SynonymsGetSynonymRuleRequest extends <<RequestBase>> { + set_id: <<Id>> + rule_id: <<Id>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SynonymsGetSynonymRuleResponse = <<SynonymsSynonymRuleRead>> + +---- + + +[discrete] +[[client.synonyms.getSynonymsSets]] +== `client.synonyms.getSynonymsSets()` + +Get all synonym sets. Get a summary of all defined synonym sets. + +{ref}/list-synonyms-sets.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptions) => Promise<SynonymsGetSynonymsSetsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SynonymsGetSynonymsSetsRequest extends <<RequestBase>> { + from?: <<integer>> + size?: <<integer>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SynonymsGetSynonymsSetsResponse { + count: <<integer>> + results: SynonymsGetSynonymsSetsSynonymsSetItem[] +} + +---- + + +[discrete] +[[client.synonyms.putSynonym]] +== `client.synonyms.putSynonym()` + +Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create multiple synonym sets. + +{ref}/put-synonyms-set.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SynonymsPutSynonymRequest, options?: TransportRequestOptions) => Promise<SynonymsPutSynonymResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SynonymsPutSynonymRequest extends <<RequestBase>> { + id: <<Id>> + synonyms_set: <<SynonymsSynonymRule>> | <<SynonymsSynonymRule>>[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface SynonymsPutSynonymResponse { + result: <<Result>> + reload_analyzers_details: IndicesReloadSearchAnalyzersReloadResult +} + +---- + + +[discrete] +[[client.synonyms.putSynonymRule]] +== `client.synonyms.putSynonymRule()` + +Create or update a synonym rule. Create or update a synonym rule in a synonym set. + +{ref}/put-synonym-rule.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: SynonymsPutSynonymRuleRequest, options?: TransportRequestOptions) => Promise<SynonymsPutSynonymRuleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface SynonymsPutSynonymRuleRequest extends <<RequestBase>> { + set_id: <<Id>> + rule_id: <<Id>> + synonyms: <<SynonymsSynonymString>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type SynonymsPutSynonymRuleResponse = <<SynonymsSynonymsUpdateResult>> + +---- + + diff --git a/docs/reference/tasks.asciidoc b/docs/reference/tasks.asciidoc new file mode 100644 index 000000000..48aed6963 --- /dev/null +++ b/docs/reference/tasks.asciidoc @@ -0,0 +1,165 @@ +[[reference-tasks]] +== client.tasks + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.tasks.cancel]] +== `client.tasks.cancel()` + +Cancels a task, if it can be cancelled through an API. + +{ref}/tasks.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: TasksCancelRequest, options?: TransportRequestOptions) => Promise<TasksCancelResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface TasksCancelRequest extends <<RequestBase>> { + task_id?: <<TaskId>> + actions?: string | string[] + nodes?: string[] + parent_task_id?: string + wait_for_completion?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type TasksCancelResponse = <<TasksTaskListResponseBase>> + +---- + + +[discrete] +[[client.tasks.get]] +== `client.tasks.get()` + +Get task information. Returns information about the tasks currently executing in the cluster. + +{ref}/tasks.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: TasksGetRequest, options?: TransportRequestOptions) => Promise<TasksGetResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface TasksGetRequest extends <<RequestBase>> { + task_id: <<Id>> + timeout?: <<Duration>> + wait_for_completion?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface TasksGetResponse { + completed: boolean + task: <<TasksTaskInfo>> + response?: any + error?: <<ErrorCause>> +} + +---- + + +[discrete] +[[client.tasks.list]] +== `client.tasks.list()` + +The task management API returns information about tasks currently executing on one or more nodes in the cluster. + +{ref}/tasks.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: TasksListRequest, options?: TransportRequestOptions) => Promise<TasksListResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface TasksListRequest extends <<RequestBase>> { + actions?: string | string[] + detailed?: boolean + group_by?: <<TasksGroupBy>> + nodes?: <<NodeIds>> + parent_task_id?: <<Id>> + master_timeout?: <<Duration>> + timeout?: <<Duration>> + wait_for_completion?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type TasksListResponse = <<TasksTaskListResponseBase>> + +---- + + diff --git a/docs/reference/terms_enum.asciidoc b/docs/reference/terms_enum.asciidoc new file mode 100644 index 000000000..bbfbc2a9f --- /dev/null +++ b/docs/reference/terms_enum.asciidoc @@ -0,0 +1,84 @@ +[[reference-terms_enum]] +== client.termsEnum + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.termsEnum]] +== `client.termsEnum()` + +Get terms in an index. Discover terms that match a partial string in an index. This "terms enum" API is designed for low-latency look-ups used in auto-complete scenarios. If the `complete` property in the response is false, the returned terms set may be incomplete and should be treated as approximate. This can occur due to a few reasons, such as a request timeout or a node error. NOTE: The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. + +{ref}/search-terms-enum.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: TermsEnumRequest, options?: TransportRequestOptions) => Promise<TermsEnumResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface TermsEnumRequest extends <<RequestBase>> { + index: <<IndexName>> + field: <<Field>> + size?: <<integer>> + timeout?: <<Duration>> + case_insensitive?: boolean + index_filter?: <<QueryDslQueryContainer>> + string?: string + search_after?: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface TermsEnumResponse { + _shards: <<ShardStatistics>> + terms: string[] + complete: boolean +} + +---- + + diff --git a/docs/reference/termvectors.asciidoc b/docs/reference/termvectors.asciidoc new file mode 100644 index 000000000..c23ed346b --- /dev/null +++ b/docs/reference/termvectors.asciidoc @@ -0,0 +1,95 @@ +[[reference-termvectors]] +== client.termvectors + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.termvectors]] +== `client.termvectors()` + +Get term vector information. Get information and statistics about terms in the fields of a particular document. + +{ref}/docs-termvectors.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: TermvectorsRequest, options?: TransportRequestOptions) => Promise<TermvectorsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface TermvectorsRequest<TDocument = unknown> extends <<RequestBase>> { + index: <<IndexName>> + id?: <<Id>> + fields?: <<Fields>> + field_statistics?: boolean + offsets?: boolean + payloads?: boolean + positions?: boolean + preference?: string + realtime?: boolean + routing?: <<Routing>> + term_statistics?: boolean + version?: <<VersionNumber>> + version_type?: <<VersionType>> + doc?: TDocument + filter?: <<TermvectorsFilter>> + per_field_analyzer?: Record<<<Field>>, string> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface TermvectorsResponse { + found: boolean + _id?: <<Id>> + _index: <<IndexName>> + term_vectors?: Record<<<Field>>, <<TermvectorsTermVector>>> + took: <<long>> + _version: <<VersionNumber>> +} + +---- + + diff --git a/docs/reference/text_structure.asciidoc b/docs/reference/text_structure.asciidoc new file mode 100644 index 000000000..e2c42b22a --- /dev/null +++ b/docs/reference/text_structure.asciidoc @@ -0,0 +1,182 @@ +[[reference-text_structure]] +== client.textStructure + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.textStructure.findFieldStructure]] +== `client.textStructure.findFieldStructure()` + +Finds the structure of a text field in an index. + +{ref}/find-field-structure.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: TextStructureFindFieldStructureRequest, options?: TransportRequestOptions) => Promise<TextStructureFindFieldStructureResponse> +---- + +[discrete] +[[client.textStructure.findMessageStructure]] +== `client.textStructure.findMessageStructure()` + +Finds the structure of a list of messages. The messages must contain data that is suitable to be ingested into Elasticsearch. + +{ref}/find-message-structure.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: TextStructureFindMessageStructureRequest, options?: TransportRequestOptions) => Promise<TextStructureFindMessageStructureResponse> +---- + +[discrete] +[[client.textStructure.findStructure]] +== `client.textStructure.findStructure()` + +Finds the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. + +{ref}/find-structure.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: TextStructureFindStructureRequest, options?: TransportRequestOptions) => Promise<TextStructureFindStructureResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface TextStructureFindStructureRequest<TJsonDocument = unknown> { + charset?: string + column_names?: string + delimiter?: string + ecs_compatibility?: string + explain?: boolean + format?: string + grok_pattern?: <<GrokPattern>> + has_header_row?: boolean + line_merge_size_limit?: <<uint>> + lines_to_sample?: <<uint>> + quote?: string + should_trim_fields?: boolean + timeout?: <<Duration>> + timestamp_field?: <<Field>> + timestamp_format?: string + text_files?: TJsonDocument[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface TextStructureFindStructureResponse { + charset: string + has_header_row?: boolean + has_byte_order_marker: boolean + format: string + field_stats: Record<<<Field>>, TextStructureFindStructureFieldStat> + sample_start: string + num_messages_analyzed: <<integer>> + mappings: <<MappingTypeMapping>> + quote?: string + delimiter?: string + need_client_timezone: boolean + num_lines_analyzed: <<integer>> + column_names?: string[] + explanation?: string[] + grok_pattern?: <<GrokPattern>> + multiline_start_pattern?: string + exclude_lines_pattern?: string + java_timestamp_formats?: string[] + joda_timestamp_formats?: string[] + timestamp_field?: <<Field>> + should_trim_fields?: boolean + ingest_pipeline: <<IngestPipelineConfig>> +} + +---- + + +[discrete] +[[client.textStructure.testGrokPattern]] +== `client.textStructure.testGrokPattern()` + +Tests a Grok pattern on some text. + +{ref}/test-grok-pattern.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: TextStructureTestGrokPatternRequest, options?: TransportRequestOptions) => Promise<TextStructureTestGrokPatternResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface TextStructureTestGrokPatternRequest extends <<RequestBase>> { + ecs_compatibility?: string + grok_pattern: <<GrokPattern>> + text: string[] +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface TextStructureTestGrokPatternResponse { + matches: TextStructureTestGrokPatternMatchedText[] +} + +---- + + diff --git a/docs/reference/transform.asciidoc b/docs/reference/transform.asciidoc new file mode 100644 index 000000000..5a26c561c --- /dev/null +++ b/docs/reference/transform.asciidoc @@ -0,0 +1,537 @@ +[[reference-transform]] +== client.transform + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.transform.deleteTransform]] +== `client.transform.deleteTransform()` + +Delete a transform. Deletes a transform. + +{ref}/delete-transform.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: TransformDeleteTransformRequest, options?: TransportRequestOptions) => Promise<TransformDeleteTransformResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface TransformDeleteTransformRequest extends <<RequestBase>> { + transform_id: <<Id>> + force?: boolean + delete_dest_index?: boolean + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type TransformDeleteTransformResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.transform.getNodeStats]] +== `client.transform.getNodeStats()` + +Retrieves transform usage information for transform nodes. +[discrete] +=== Function signature + +[source,ts] +---- +(request: TransformGetNodeStatsRequest, options?: TransportRequestOptions) => Promise<TransformGetNodeStatsResponse> +---- + +[discrete] +[[client.transform.getTransform]] +== `client.transform.getTransform()` + +Get transforms. Retrieves configuration information for transforms. + +{ref}/get-transform.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: TransformGetTransformRequest, options?: TransportRequestOptions) => Promise<TransformGetTransformResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface TransformGetTransformRequest extends <<RequestBase>> { + transform_id?: <<Names>> + allow_no_match?: boolean + from?: <<integer>> + size?: <<integer>> + exclude_generated?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface TransformGetTransformResponse { + count: <<long>> + transforms: TransformGetTransformTransformSummary[] +} + +---- + + +[discrete] +[[client.transform.getTransformStats]] +== `client.transform.getTransformStats()` + +Get transform stats. Retrieves usage information for transforms. + +{ref}/get-transform-stats.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: TransformGetTransformStatsRequest, options?: TransportRequestOptions) => Promise<TransformGetTransformStatsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface TransformGetTransformStatsRequest extends <<RequestBase>> { + transform_id: <<Names>> + allow_no_match?: boolean + from?: <<long>> + size?: <<long>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface TransformGetTransformStatsResponse { + count: <<long>> + transforms: TransformGetTransformStatsTransformStats[] +} + +---- + + +[discrete] +[[client.transform.previewTransform]] +== `client.transform.previewTransform()` + +Preview a transform. Generates a preview of the results that you will get when you create a transform with the same configuration. It returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also generates a list of mappings and settings for the destination index. These values are determined based on the field types of the source index and the transform aggregations. + +{ref}/preview-transform.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: TransformPreviewTransformRequest, options?: TransportRequestOptions) => Promise<TransformPreviewTransformResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface TransformPreviewTransformRequest extends <<RequestBase>> { + transform_id?: <<Id>> + timeout?: <<Duration>> + dest?: <<TransformDestination>> + description?: string + frequency?: <<Duration>> + pivot?: <<TransformPivot>> + source?: <<TransformSource>> + settings?: <<TransformSettings>> + sync?: <<TransformSyncContainer>> + retention_policy?: <<TransformRetentionPolicyContainer>> + latest?: <<TransformLatest>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface TransformPreviewTransformResponse<TTransform = unknown> { + generated_dest_index: <<IndicesIndexState>> + preview: TTransform[] +} + +---- + + +[discrete] +[[client.transform.putTransform]] +== `client.transform.putTransform()` + +Create a transform. Creates a transform. A transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as a data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a unique row per entity. You must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If you choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in the pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values in the latest object. You must have `create_index`, `index`, and `read` privileges on the destination index and `read` and `view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. NOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any `.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do not give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not give users any privileges on `.data-frame-internal*` indices. + +{ref}/put-transform.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: TransformPutTransformRequest, options?: TransportRequestOptions) => Promise<TransformPutTransformResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface TransformPutTransformRequest extends <<RequestBase>> { + transform_id: <<Id>> + defer_validation?: boolean + timeout?: <<Duration>> + dest: <<TransformDestination>> + description?: string + frequency?: <<Duration>> + latest?: <<TransformLatest>> + _meta?: <<Metadata>> + pivot?: <<TransformPivot>> + retention_policy?: <<TransformRetentionPolicyContainer>> + settings?: <<TransformSettings>> + source: <<TransformSource>> + sync?: <<TransformSyncContainer>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type TransformPutTransformResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.transform.resetTransform]] +== `client.transform.resetTransform()` + +Reset a transform. Resets a transform. Before you can reset it, you must stop it; alternatively, use the `force` query parameter. If the destination index was created by the transform, it is deleted. + +{ref}/reset-transform.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: TransformResetTransformRequest, options?: TransportRequestOptions) => Promise<TransformResetTransformResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface TransformResetTransformRequest extends <<RequestBase>> { + transform_id: <<Id>> + force?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type TransformResetTransformResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.transform.scheduleNowTransform]] +== `client.transform.scheduleNowTransform()` + +Schedule a transform to start now. Instantly runs a transform to process data. If you _schedule_now a transform, it will process the new data instantly, without waiting for the configured frequency interval. After _schedule_now API is called, the transform will be processed again at now + frequency unless _schedule_now API is called again in the meantime. + +{ref}/schedule-now-transform.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: TransformScheduleNowTransformRequest, options?: TransportRequestOptions) => Promise<TransformScheduleNowTransformResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface TransformScheduleNowTransformRequest extends <<RequestBase>> { + transform_id: <<Id>> + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type TransformScheduleNowTransformResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.transform.startTransform]] +== `client.transform.startTransform()` + +Start a transform. Starts a transform. When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping definitions for the destination index from the source indices and the transform aggregations. If fields in the destination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations), the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings in a pivot transform. When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you created the transform, they occur when you start the transform—with the exception of privilege checks. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. + +{ref}/start-transform.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: TransformStartTransformRequest, options?: TransportRequestOptions) => Promise<TransformStartTransformResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface TransformStartTransformRequest extends <<RequestBase>> { + transform_id: <<Id>> + timeout?: <<Duration>> + from?: string +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type TransformStartTransformResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.transform.stopTransform]] +== `client.transform.stopTransform()` + +Stop transforms. Stops one or more transforms. + +{ref}/stop-transform.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: TransformStopTransformRequest, options?: TransportRequestOptions) => Promise<TransformStopTransformResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface TransformStopTransformRequest extends <<RequestBase>> { + transform_id: <<Name>> + allow_no_match?: boolean + force?: boolean + timeout?: <<Duration>> + wait_for_checkpoint?: boolean + wait_for_completion?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type TransformStopTransformResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.transform.updateTransform]] +== `client.transform.updateTransform()` + +Update a transform. Updates certain properties of a transform. All updated properties except `description` do not take effect until after the transform starts the next checkpoint, thus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata` privileges for the source indices. You must also have `index` and `read` privileges for the destination index. When Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the time of update and runs with those privileges. + +{ref}/update-transform.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: TransformUpdateTransformRequest, options?: TransportRequestOptions) => Promise<TransformUpdateTransformResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface TransformUpdateTransformRequest extends <<RequestBase>> { + transform_id: <<Id>> + defer_validation?: boolean + timeout?: <<Duration>> + dest?: <<TransformDestination>> + description?: string + frequency?: <<Duration>> + _meta?: <<Metadata>> + source?: <<TransformSource>> + settings?: <<TransformSettings>> + sync?: <<TransformSyncContainer>> + retention_policy?: <<TransformRetentionPolicyContainer>> | null +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface TransformUpdateTransformResponse { + authorization?: <<MlTransformAuthorization>> + create_time: <<long>> + description: string + dest: <<ReindexDestination>> + frequency?: <<Duration>> + id: <<Id>> + latest?: <<TransformLatest>> + pivot?: <<TransformPivot>> + retention_policy?: <<TransformRetentionPolicyContainer>> + settings: <<TransformSettings>> + source: <<ReindexSource>> + sync?: <<TransformSyncContainer>> + version: <<VersionString>> + _meta?: <<Metadata>> +} + +---- + + +[discrete] +[[client.transform.upgradeTransforms]] +== `client.transform.upgradeTransforms()` + +Upgrades all transforms. This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It also cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not affect the source and destination indices. The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged. + +{ref}/upgrade-transforms.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: TransformUpgradeTransformsRequest, options?: TransportRequestOptions) => Promise<TransformUpgradeTransformsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface TransformUpgradeTransformsRequest extends <<RequestBase>> { + dry_run?: boolean + timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface TransformUpgradeTransformsResponse { + needs_update: <<integer>> + no_action: <<integer>> + updated: <<integer>> +} + +---- + + diff --git a/docs/reference/update.asciidoc b/docs/reference/update.asciidoc new file mode 100644 index 000000000..025adf4f1 --- /dev/null +++ b/docs/reference/update.asciidoc @@ -0,0 +1,92 @@ +[[reference-update]] +== client.update + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.update]] +== `client.update()` + +Update a document. Updates a document by running a script or passing a partial document. + +{ref}/docs-update.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: UpdateRequest, options?: TransportRequestOptions) => Promise<UpdateResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface UpdateRequest<TDocument = unknown, TPartialDocument = unknown> extends <<RequestBase>> { + id: <<Id>> + index: <<IndexName>> + if_primary_term?: <<long>> + if_seq_no?: <<SequenceNumber>> + lang?: string + refresh?: <<Refresh>> + require_alias?: boolean + retry_on_conflict?: <<integer>> + routing?: <<Routing>> + timeout?: <<Duration>> + wait_for_active_shards?: <<WaitForActiveShards>> + _source_excludes?: <<Fields>> + _source_includes?: <<Fields>> + detect_noop?: boolean + doc?: TPartialDocument + doc_as_upsert?: boolean + script?: <<Script>> | string + scripted_upsert?: boolean + _source?: <<SearchSourceConfig>> + upsert?: TDocument +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type UpdateResponse<TDocument = unknown> = <<UpdateUpdateWriteResponseBase>><TDocument> + +---- + + diff --git a/docs/reference/update_by_query.asciidoc b/docs/reference/update_by_query.asciidoc new file mode 100644 index 000000000..119b762c0 --- /dev/null +++ b/docs/reference/update_by_query.asciidoc @@ -0,0 +1,124 @@ +[[reference-update_by_query]] +== client.updateByQuery + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.updateByQuery]] +== `client.updateByQuery()` + +Update documents. Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. + +{ref}/docs-update-by-query.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: UpdateByQueryRequest, options?: TransportRequestOptions) => Promise<UpdateByQueryResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface UpdateByQueryRequest extends <<RequestBase>> { + index: <<Indices>> + allow_no_indices?: boolean + analyzer?: string + analyze_wildcard?: boolean + default_operator?: <<QueryDslOperator>> + df?: string + expand_wildcards?: <<ExpandWildcards>> + from?: <<long>> + ignore_unavailable?: boolean + lenient?: boolean + pipeline?: string + preference?: string + q?: string + refresh?: boolean + request_cache?: boolean + requests_per_second?: <<float>> + routing?: <<Routing>> + scroll?: <<Duration>> + scroll_size?: <<long>> + search_timeout?: <<Duration>> + search_type?: <<SearchType>> + slices?: <<Slices>> + sort?: string[] + stats?: string[] + terminate_after?: <<long>> + timeout?: <<Duration>> + version?: boolean + version_type?: boolean + wait_for_active_shards?: <<WaitForActiveShards>> + wait_for_completion?: boolean + max_docs?: <<long>> + query?: <<QueryDslQueryContainer>> + script?: <<Script>> | string + slice?: <<SlicedScroll>> + conflicts?: <<Conflicts>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface UpdateByQueryResponse { + batches?: <<long>> + failures?: <<BulkIndexByScrollFailure>>[] + noops?: <<long>> + deleted?: <<long>> + requests_per_second?: <<float>> + retries?: <<Retries>> + task?: <<TaskId>> + timed_out?: boolean + took?: <<DurationValue>><<<UnitMillis>>> + total?: <<long>> + updated?: <<long>> + version_conflicts?: <<long>> + throttled?: <<Duration>> + throttled_millis?: <<DurationValue>><<<UnitMillis>>> + throttled_until?: <<Duration>> + throttled_until_millis?: <<DurationValue>><<<UnitMillis>>> +} + +---- + + diff --git a/docs/reference/update_by_query_rethrottle.asciidoc b/docs/reference/update_by_query_rethrottle.asciidoc new file mode 100644 index 000000000..6e8be02d4 --- /dev/null +++ b/docs/reference/update_by_query_rethrottle.asciidoc @@ -0,0 +1,76 @@ +[[reference-update_by_query_rethrottle]] +== client.updateByQueryRethrottle + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.updateByQueryRethrottle]] +== `client.updateByQueryRethrottle()` + +Throttle an update by query operation. Change the number of requests per second for a particular update by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. + +{ref}/docs-update-by-query.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: UpdateByQueryRethrottleRequest, options?: TransportRequestOptions) => Promise<UpdateByQueryRethrottleResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface UpdateByQueryRethrottleRequest extends <<RequestBase>> { + task_id: <<Id>> + requests_per_second?: <<float>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface UpdateByQueryRethrottleResponse { + nodes: Record<string, <<UpdateByQueryRethrottleUpdateByQueryRethrottleNode>>> +} + +---- + + diff --git a/docs/reference/watcher.asciidoc b/docs/reference/watcher.asciidoc new file mode 100644 index 000000000..327f9a2e1 --- /dev/null +++ b/docs/reference/watcher.asciidoc @@ -0,0 +1,529 @@ +[[reference-watcher]] +== client.watcher + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.watcher.ackWatch]] +== `client.watcher.ackWatch()` + +Acknowledges a watch, manually throttling the execution of the watch's actions. + +{ref}/watcher-api-ack-watch.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: WatcherAckWatchRequest, options?: TransportRequestOptions) => Promise<WatcherAckWatchResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface WatcherAckWatchRequest extends <<RequestBase>> { + watch_id: <<Name>> + action_id?: <<Names>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface WatcherAckWatchResponse { + status: <<WatcherWatchStatus>> +} + +---- + + +[discrete] +[[client.watcher.activateWatch]] +== `client.watcher.activateWatch()` + +Activates a currently inactive watch. + +{ref}/watcher-api-activate-watch.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: WatcherActivateWatchRequest, options?: TransportRequestOptions) => Promise<WatcherActivateWatchResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface WatcherActivateWatchRequest extends <<RequestBase>> { + watch_id: <<Name>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface WatcherActivateWatchResponse { + status: <<WatcherActivationStatus>> +} + +---- + + +[discrete] +[[client.watcher.deactivateWatch]] +== `client.watcher.deactivateWatch()` + +Deactivates a currently active watch. + +{ref}/watcher-api-deactivate-watch.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: WatcherDeactivateWatchRequest, options?: TransportRequestOptions) => Promise<WatcherDeactivateWatchResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface WatcherDeactivateWatchRequest extends <<RequestBase>> { + watch_id: <<Name>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface WatcherDeactivateWatchResponse { + status: <<WatcherActivationStatus>> +} + +---- + + +[discrete] +[[client.watcher.deleteWatch]] +== `client.watcher.deleteWatch()` + +Removes a watch from Watcher. + +{ref}/watcher-api-delete-watch.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: WatcherDeleteWatchRequest, options?: TransportRequestOptions) => Promise<WatcherDeleteWatchResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface WatcherDeleteWatchRequest extends <<RequestBase>> { + id: <<Name>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface WatcherDeleteWatchResponse { + found: boolean + _id: <<Id>> + _version: <<VersionNumber>> +} + +---- + + +[discrete] +[[client.watcher.executeWatch]] +== `client.watcher.executeWatch()` + +This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can execute the watch without executing all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after execution. + +{ref}/watcher-api-execute-watch.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: WatcherExecuteWatchRequest, options?: TransportRequestOptions) => Promise<WatcherExecuteWatchResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface WatcherExecuteWatchRequest extends <<RequestBase>> { + id?: <<Id>> + debug?: boolean + action_modes?: Record<string, <<WatcherActionExecutionMode>>> + alternative_input?: Record<string, any> + ignore_condition?: boolean + record_execution?: boolean + simulated_actions?: <<WatcherSimulatedActions>> + trigger_data?: <<WatcherScheduleTriggerEvent>> + watch?: <<WatcherWatch>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface WatcherExecuteWatchResponse { + _id: <<Id>> + watch_record: WatcherExecuteWatchWatchRecord +} + +---- + + +[discrete] +[[client.watcher.getSettings]] +== `client.watcher.getSettings()` + +Retrieve settings for the watcher system index + +{ref}/watcher-api-get-settings.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: WatcherGetSettingsRequest, options?: TransportRequestOptions) => Promise<WatcherGetSettingsResponse> +---- + +[discrete] +[[client.watcher.getWatch]] +== `client.watcher.getWatch()` + +Retrieves a watch by its ID. + +{ref}/watcher-api-get-watch.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: WatcherGetWatchRequest, options?: TransportRequestOptions) => Promise<WatcherGetWatchResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface WatcherGetWatchRequest extends <<RequestBase>> { + id: <<Name>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface WatcherGetWatchResponse { + found: boolean + _id: <<Id>> + status?: <<WatcherWatchStatus>> + watch?: <<WatcherWatch>> + _primary_term?: <<integer>> + _seq_no?: <<SequenceNumber>> + _version?: <<VersionNumber>> +} + +---- + + +[discrete] +[[client.watcher.putWatch]] +== `client.watcher.putWatch()` + +Creates a new watch, or updates an existing one. + +{ref}/watcher-api-put-watch.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: WatcherPutWatchRequest, options?: TransportRequestOptions) => Promise<WatcherPutWatchResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface WatcherPutWatchRequest extends <<RequestBase>> { + id: <<Id>> + active?: boolean + if_primary_term?: <<long>> + if_seq_no?: <<SequenceNumber>> + version?: <<VersionNumber>> + actions?: Record<string, <<WatcherAction>>> + condition?: <<WatcherConditionContainer>> + input?: <<WatcherInputContainer>> + metadata?: <<Metadata>> + throttle_period?: string + transform?: <<TransformContainer>> + trigger?: <<WatcherTriggerContainer>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface WatcherPutWatchResponse { + created: boolean + _id: <<Id>> + _primary_term: <<long>> + _seq_no: <<SequenceNumber>> + _version: <<VersionNumber>> +} + +---- + + +[discrete] +[[client.watcher.queryWatches]] +== `client.watcher.queryWatches()` + +Retrieves stored watches. + +{ref}/watcher-api-query-watches.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: WatcherQueryWatchesRequest, options?: TransportRequestOptions) => Promise<WatcherQueryWatchesResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface WatcherQueryWatchesRequest extends <<RequestBase>> { + from?: <<integer>> + size?: <<integer>> + query?: <<QueryDslQueryContainer>> + sort?: <<Sort>> + search_after?: <<SortResults>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface WatcherQueryWatchesResponse { + count: <<integer>> + watches: <<WatcherQueryWatch>>[] +} + +---- + + +[discrete] +[[client.watcher.start]] +== `client.watcher.start()` + +Starts Watcher if it is not already running. + +{ref}/watcher-api-start.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: WatcherStartRequest, options?: TransportRequestOptions) => Promise<WatcherStartResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface WatcherStartRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type WatcherStartResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.watcher.stats]] +== `client.watcher.stats()` + +Retrieves the current Watcher metrics. + +{ref}/watcher-api-stats.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: WatcherStatsRequest, options?: TransportRequestOptions) => Promise<WatcherStatsResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface WatcherStatsRequest extends <<RequestBase>> { + metric?: WatcherStatsWatcherMetric | WatcherStatsWatcherMetric[] + emit_stacktraces?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface WatcherStatsResponse { + _nodes: <<NodeStatistics>> + cluster_name: <<Name>> + manually_stopped: boolean + stats: WatcherStatsWatcherNodeStats[] +} + +---- + + +[discrete] +[[client.watcher.stop]] +== `client.watcher.stop()` + +Stops Watcher if it is running. + +{ref}/watcher-api-stop.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: WatcherStopRequest, options?: TransportRequestOptions) => Promise<WatcherStopResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface WatcherStopRequest extends <<RequestBase>> {} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +type WatcherStopResponse = <<AcknowledgedResponseBase>> + +---- + + +[discrete] +[[client.watcher.updateSettings]] +== `client.watcher.updateSettings()` + +Update settings for the watcher system index + +{ref}/watcher-api-update-settings.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: WatcherUpdateSettingsRequest, options?: TransportRequestOptions) => Promise<WatcherUpdateSettingsResponse> +---- + diff --git a/docs/reference/xpack.asciidoc b/docs/reference/xpack.asciidoc new file mode 100644 index 000000000..a7dba9954 --- /dev/null +++ b/docs/reference/xpack.asciidoc @@ -0,0 +1,147 @@ +[[reference-xpack]] +== client.xpack + +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version <version> || +|| || +|| || +|| || +=========================================================================================================================== +//////// +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ + + +[discrete] +[[client.xpack.info]] +== `client.xpack.info()` + +Provides general information about the installed X-Pack features. + +{ref}/info-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: XpackInfoRequest, options?: TransportRequestOptions) => Promise<XpackInfoResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface XpackInfoRequest extends <<RequestBase>> { + categories?: XpackInfoXPackCategory[] + accept_enterprise?: boolean + human?: boolean +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface XpackInfoResponse { + build: XpackInfoBuildInformation + features: XpackInfoFeatures + license: XpackInfoMinimalLicenseInformation + tagline: string +} + +---- + + +[discrete] +[[client.xpack.usage]] +== `client.xpack.usage()` + +This API provides information about which features are currently enabled and available under the current license and some usage statistics. + +{ref}/usage-api.html[{es} documentation] +[discrete] +=== Function signature + +[source,ts] +---- +(request: XpackUsageRequest, options?: TransportRequestOptions) => Promise<XpackUsageResponse> +---- + +[discrete] +=== Request + +[source,ts,subs=+macros] +---- +interface XpackUsageRequest extends <<RequestBase>> { + master_timeout?: <<Duration>> +} + +---- + + +[discrete] +=== Response + +[source,ts,subs=+macros] +---- +interface XpackUsageResponse { + aggregate_metric: XpackUsageBase + analytics: XpackUsageAnalytics + archive: XpackUsageArchive + watcher: XpackUsageWatcher + ccr: XpackUsageCcr + data_frame?: XpackUsageBase + data_science?: XpackUsageBase + data_streams?: XpackUsageDataStreams + data_tiers: XpackUsageDataTiers + enrich?: XpackUsageBase + eql: XpackUsageEql + flattened?: XpackUsageFlattened + frozen_indices: XpackUsageFrozenIndices + graph: XpackUsageBase + health_api?: XpackUsageHealthStatistics + ilm: XpackUsageIlm + logstash: XpackUsageBase + ml: XpackUsageMachineLearning + monitoring: XpackUsageMonitoring + rollup: XpackUsageBase + runtime_fields?: XpackUsageRuntimeFieldTypes + spatial: XpackUsageBase + searchable_snapshots: XpackUsageSearchableSnapshots + security: XpackUsageSecurity + slm: XpackUsageSlm + sql: XpackUsageSql + transform: XpackUsageBase + vectors?: XpackUsageVector + voting_only: XpackUsageBase +} + +---- + + diff --git a/package.json b/package.json index 68e183f6c..3552eaa0a 100644 --- a/package.json +++ b/package.json @@ -57,6 +57,8 @@ }, "devDependencies": { "@elastic/request-converter": "8.16.2", + "@microsoft/api-extractor": "^7.47.11", + "@microsoft/api-extractor-model": "^7.29.8", "@sinonjs/fake-timers": "github:sinonjs/fake-timers#0bfffc1", "@types/debug": "4.1.12", "@types/ms": "0.7.34", diff --git a/scripts/docgen.mjs b/scripts/docgen.mjs new file mode 100644 index 000000000..6817c4ca6 --- /dev/null +++ b/scripts/docgen.mjs @@ -0,0 +1,371 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import path from 'path' +import fs from 'fs/promises' +import * as Extractor from '@microsoft/api-extractor-model' + +const header = `//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in scripts/docgen.mjs. || +|| || +|| || +|| || +=========================================================================================================================== +//////// + +++++ +<style> +.lang-ts a.xref { + text-decoration: underline !important; +} +</style> +++++ +` + +const linkedRefs = new Set() +const documented = new Set() + +function nodesToText (nodes) { + let text = '' + for (const node of nodes) { + if (node.kind === 'Paragraph') { + for (const pNode of node.nodes) { + if (pNode.text) { + text += pNode.text + ' ' + } else if (pNode.kind === 'CodeSpan') { + text += '`' + pNode.code + '`' + } + } + } + } + text = text.replace(/\s+/g, ' ') + return text +} + +const skippableReferences = [ + 'Record', + 'URL', + 'Array', + 'Promise', + 'inspect.custom', + 'http.IncomingHttpHeaders', +] + +function generatePropertyType (tokens) { + let code = '' + tokens.forEach(token => { + if (token.kind === 'Reference' && !skippableReferences.includes(token.text)) { + let { text } = token + if (text.startsWith('T.')) { + text = text.split('.')[1] + } else if (text.startsWith('TB.')) { + text = text.split('.')[1] + '_2' + } + linkedRefs.add(text) + code += `<<${text}>>` + } else { + code += token.text.replace(/\n/g, '') + } + }) + return code.replace(/^export (declare )?/, '').replace(/\s+/g, ' ').trim() +} + +function generateDescription (comment) { + let code = '' + + if (comment == null) return code + + const { summarySection, customBlocks } = comment + + if (summarySection != null || customBlocks != null) { + if (summarySection != null) { + const summary = nodesToText(summarySection.nodes) + code += `${summary}\n\n` + } + + if (customBlocks != null) { + let defaultValue = '' + for (const block of customBlocks) { + if (block.blockTag.tagNameWithUpperCase === '@DEFAULTVALUE') { + defaultValue = nodesToText(block.content.nodes) + } + } + if (defaultValue.length > 0) { + code += `Default value: ${defaultValue}` + } + } + } + + return code.trim() +} + +function generateApiFunction (spec) { + let code = `[[${spec.displayName}_${spec.overloadIndex ?? ''}]]\n` + code += '[source,ts,subs=+macro]\n' + code += '----\n' + code += generatePropertyType(spec.excerptTokens) + code += '\n' + code += '----\n' + return code +} + +function generateInterface (spec) { + let code = `[[${spec.displayName}]]\n` + code += `== Interface ${spec.displayName}\n\n` + code += '[%autowidth]\n' + code += '|===\n' + code += '|Name |Type |Description\n\n' + + for (const member of spec.members) { + if (member.propertyTypeExcerpt == null) continue + code += `|\`${member.displayName}\`\n` + code += `|${generatePropertyType(member.propertyTypeExcerpt.spannedTokens)}\n` + code += `|${generateDescription(member.tsdocComment, false)}\n` + code += '|===\n' + } + + return code +} + +function generateClass(spec) { + let code = `[[${spec.displayName}]]\n` + code += `== ${spec.displayName}\n` + + code += '\n=== Constructor\n\n' + const cons = spec.members.filter(m => m.kind === 'Constructor') + for (const con of cons) { + code += '[source,ts,subs=+macros]\n' + code += '----\n' + code += generatePropertyType(con.excerptTokens).replace(/^constructor/, `new ${spec.displayName}`) + code += '\n' + code += '----\n' + } + + // generate properties + const props = spec.members.filter(m => m.kind === 'Property') + if (props.length > 0) { + code += '\n=== Properties\n' + code += '[%autowidth]\n' + code += '|===\n' + code += '|Name |Type |Description\n\n' + for (const prop of props) { + if (prop.propertyTypeExcerpt == null) continue + if (prop.displayName.startsWith('[k')) continue + + code += `|\`${prop.displayName}\`\n` + code += `|${generatePropertyType(prop.propertyTypeExcerpt.spannedTokens)}\n` + code += `|${generateDescription(prop.tsdocComment, false)}\n` + code += '|===\n' + } + } + + // generate methods + const methods = spec.members.filter(m => m.kind === 'Method') + if (methods.length > 0) { + code += '\n=== Methods\n' + code += '[%autowidth]\n' + code += '|===\n' + code += '|Name |Signature |Description\n\n' + for (const method of methods) { + code += `|\`${method.displayName}\`\n` + code += `|\`${generatePropertyType(method.excerptTokens)}\`\n` + code += `|${generateDescription(method.tsdocComment, false)}\n` + code += '|===\n' + } + } + + return code +} + +function generateAlias(spec) { + let code = `[[${spec.displayName}]]\n` + code += '[discrete]\n' + code += `== \`${spec.displayName}\`\n` + code += '[source,ts,subs=+macros]' + code += '----\n' + code += `${generatePropertyType(spec.excerpt.tokens)}\n` + code += '----\n' + return code +} + +/** + * Generates documentation for ClientOptions interface + * @param spec {Extractor.ApiItem} + * @returns {string} Asciidoc markup + */ +// function generateClientOptions (spec) { +// let code = `[reference-client-options-interface]\n\n== ClientOptions\n\n${header}\n\n` +// code += `[[${spec.displayName}]]\n` +// code += `=== ${spec.displayName}\n\n` +// code += generateInterface(spec) +// return code +// } + +// const standardTypes = { +// 'TlsConnectionOptions': 'https://nodejs.org/api/tls.html#tlsconnectoptions-callback[Node.js TLS connection options]', +// } + +/** + * @param spec {Extractor.ApiItem} + * @param model {Extractor.ApiModel} + * @returns string + */ +// function generateClientOptionsReference (spec, model) { +// let code = `${header}\n\n` +// for (const member of spec.members) { +// for (const token of member.excerptTokens) { +// if (token.kind === 'Reference' && !documented.has(token.text)) { +// documented.add(token.text) +// code += `[discrete]\n` +// code += `[[${token.text}]]\n` +// code += `=== ${token.text}\n\n` +// +// const item = model.packages[0].entryPoints[0].members.find(member => member.displayName === token.text) +// if (item != null) { +// code += generateDescription(item.tsdocComment, false) +// switch (item.kind) { +// case 'Interface': +// code += generateInterface(item) +// break; +// case 'TypeAlias': +// code += generateAlias(item) +// break +// case 'Class': +// console.log('Class', token.text) +// code += generateClass(item) +// break +// default: +// code += 'Undocumented type\n' +// break +// } +// } else if (standardTypes[token.text] != null) { +// code += `${standardTypes[token.text]}\n` +// } else { +// code += 'Unknown\n' +// } +// code += '\n' +// } +// } +// } +// return code +// } + +/** + * Generates documentation for the Client class + * @param spec {Extractor.ApiItem} + * @returns {string} Asciidoc markup + */ +// function generateClientDocs (spec) { +// let code = `[reference-client-class]\n\n== Client\n\n${header}\n\n` +// +// // generate constructor and client options +// code += '[discrete]\n' +// code += '=== Constructor\n\n' +// code += '[source,ts,subs=+macros]\n' +// code += '----\n' +// code += 'new Client(options: <<ClientOptions>>): Client\n' +// code += '----\n\n' +// +// // generate methods +// code += '[discrete]\n' +// code += '=== Methods\n\n' +// for (const method of spec.members.filter(m => m.kind === 'Method')) { +// code += `[[Client.${method.displayName}]]\n` +// code += '[discrete]\n' +// code += `==== Client.${method.displayName}\n\n` +// code += 'TODO\n\n' +// } +// +// // generate properties +// code += '[discrete]\n' +// code += '=== Properties\n\n' +// for (const prop of spec.members.filter(m => m.kind === 'Property')) { +// code += `[[Client.${prop.displayName}]]\n` +// code += '[discrete]\n' +// code += `==== Client.${prop.displayName}\n\n` +// code += 'TODO\n\n' +// } +// +// return code +// } + +async function write (name, code) { + const filePath = path.join(import.meta.dirname, '..', 'docs', 'reference2', `${name}.asciidoc`) + // console.log(`writing ${filePath}`) + await fs.writeFile(filePath, code, 'utf8') +} + +async function start () { + const model = new Extractor.ApiModel() + const pkg = model.loadPackage(path.join(import.meta.dirname, '..', 'api-extractor', 'elasticsearch.api.json')) + const entry = pkg.entryPoints[0] + + for (const member of entry.members) { + if (member.displayName.endsWith('_2')) continue + switch (member.kind) { + case 'Class': + await write(member.displayName, generateClass(member)) + break + case 'Interface': + await write(member.displayName, generateInterface(member)) + break + case 'TypeAlias': + await write(member.displayName, generateAlias(member)) + break + case 'Function': + if (member.fileUrlPath.startsWith('lib/api/api')) { + // if (member.displayName === 'CountApi') console.log(member) + // TODO: drop this: That stuff + // TODO: sub name with `client.foo.bar` + await write(`${member.displayName}_${member.overloadIndex ?? ''}`, generateApiFunction(member)) + // TODO: generate rollup page for each override + } + // console.log(member) + // process.exit(0) + break + case 'Namespace': + case 'Variable': + break + default: + console.log('unsupported type', member.kind, member.displayName) + break + } + // TODO: generate rollup page that includes a whole API namespace's functions, requests, responses + } +} + +start() + .then(() => process.exit(0)) + .catch(err => { + console.error(err) + process.exit(1) + })